sched: ems: move active balance to ems
authorPark Bumgyu <bumgyu.park@samsung.com>
Fri, 6 Apr 2018 06:02:28 +0000 (15:02 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:35:36 +0000 (17:35 +0900)
Move the active balance related code from ehmp.c to core.c of ems.
ehmp.c will be removed.

Change-Id: I6a01db308b3c889f01da642fa3b96df93dc6432f
Signed-off-by: Park Bumgyu <bumgyu.park@samsung.com>
kernel/sched/ems/core.c
kernel/sched/ems/ehmp.c

index 166530ffbd640ee377ca783427fa56bac9e41857..16c0533580fb800105ff7d2305e4d882732ea981 100644 (file)
@@ -30,6 +30,80 @@ int cpu_util_wake(int cpu, struct task_struct *p)
        return (util >= capacity) ? capacity : util;
 }
 
+static inline int task_fits(struct task_struct *p, long capacity)
+{
+       return capacity * 1024 > task_util(p) * 1248;
+}
+
+struct sched_group *
+exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p)
+{
+       struct sched_group *group = sd->groups;
+       struct sched_group *fit_group = NULL;
+       unsigned long fit_capacity = ULONG_MAX;
+
+       do {
+               int i;
+
+               /* Skip over this group if it has no CPUs allowed */
+               if (!cpumask_intersects(sched_group_span(group),
+                                       &p->cpus_allowed))
+                       continue;
+
+               for_each_cpu(i, sched_group_span(group)) {
+                       if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) {
+                               fit_capacity = capacity_of(i);
+                               fit_group = group;
+                       }
+               }
+       } while (group = group->next, group != sd->groups);
+
+       return fit_group;
+}
+
+static inline int
+check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
+{
+       return ((rq->cpu_capacity * sd->imbalance_pct) <
+                               (rq->cpu_capacity_orig * 100));
+}
+
+#define lb_sd_parent(sd) \
+       (sd->parent && sd->parent->groups != sd->parent->groups->next)
+
+int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd,
+                                       int src_cpu, int dst_cpu)
+{
+       unsigned int src_imb_pct = lb_sd_parent(sd) ? sd->imbalance_pct : 1;
+       unsigned int dst_imb_pct = lb_sd_parent(sd) ? 100 : 1;
+       unsigned long src_cap = capacity_of(src_cpu);
+       unsigned long dst_cap = capacity_of(dst_cpu);
+       int level = sd->level;
+
+       /* dst_cpu is idle */
+       if ((idle != CPU_NOT_IDLE) &&
+           (cpu_rq(src_cpu)->cfs.h_nr_running == 1)) {
+               if ((check_cpu_capacity(cpu_rq(src_cpu), sd)) &&
+                   (src_cap * sd->imbalance_pct < dst_cap * 100)) {
+                       return 1;
+               }
+
+               /* This domain is top and dst_cpu is bigger than src_cpu*/
+               if (!lb_sd_parent(sd) && src_cap < dst_cap)
+                       if (lbt_overutilized(src_cpu, level) || global_boosted())
+                               return 1;
+       }
+
+       if ((src_cap * src_imb_pct < dst_cap * dst_imb_pct) &&
+                       cpu_rq(src_cpu)->cfs.h_nr_running == 1 &&
+                       lbt_overutilized(src_cpu, level) &&
+                       !lbt_overutilized(dst_cpu, level)) {
+               return 1;
+       }
+
+       return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2);
+}
+
 static int select_proper_cpu(struct task_struct *p)
 {
        return -1;
index 6d91108f6aa483e71a8c6a49a6255a29ca7ffb65..17d7400125bfbb5f493259c7fedd0de799e21f58 100644 (file)
@@ -106,78 +106,6 @@ int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
        return estimate_state;
 }
 
-/**********************************************************************
- * load balance                                                       *
- **********************************************************************/
-#define lb_sd_parent(sd) \
-       (sd->parent && sd->parent->groups != sd->parent->groups->next)
-
-struct sched_group *
-exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p)
-{
-       struct sched_group *group = sd->groups;
-       struct sched_group *fit_group = NULL;
-       unsigned long fit_capacity = ULONG_MAX;
-
-       do {
-               int i;
-
-               /* Skip over this group if it has no CPUs allowed */
-               if (!cpumask_intersects(sched_group_span(group),
-                                       &p->cpus_allowed))
-                       continue;
-
-               for_each_cpu(i, sched_group_span(group)) {
-                       if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) {
-                               fit_capacity = capacity_of(i);
-                               fit_group = group;
-                       }
-               }
-       } while (group = group->next, group != sd->groups);
-
-       return fit_group;
-}
-
-static inline int
-check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
-{
-       return ((rq->cpu_capacity * sd->imbalance_pct) <
-                               (rq->cpu_capacity_orig * 100));
-}
-
-int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd,
-                                       int src_cpu, int dst_cpu)
-{
-       unsigned int src_imb_pct = lb_sd_parent(sd) ? sd->imbalance_pct : 1;
-       unsigned int dst_imb_pct = lb_sd_parent(sd) ? 100 : 1;
-       unsigned long src_cap = capacity_of(src_cpu);
-       unsigned long dst_cap = capacity_of(dst_cpu);
-       int level = sd->level;
-
-       /* dst_cpu is idle */
-       if ((idle != CPU_NOT_IDLE) &&
-           (cpu_rq(src_cpu)->cfs.h_nr_running == 1)) {
-               if ((check_cpu_capacity(cpu_rq(src_cpu), sd)) &&
-                   (src_cap * sd->imbalance_pct < dst_cap * 100)) {
-                       return 1;
-               }
-
-               /* This domain is top and dst_cpu is bigger than src_cpu*/
-               if (!lb_sd_parent(sd) && src_cap < dst_cap)
-                       if (lbt_overutilized(src_cpu, level) || global_boosted())
-                               return 1;
-       }
-
-       if ((src_cap * src_imb_pct < dst_cap * dst_imb_pct) &&
-                       cpu_rq(src_cpu)->cfs.h_nr_running == 1 &&
-                       lbt_overutilized(src_cpu, level) &&
-                       !lbt_overutilized(dst_cpu, level)) {
-               return 1;
-       }
-
-       return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2);
-}
-
 /**********************************************************************
  * Global boost                                                       *
  **********************************************************************/