[COMMON] sched: fair: Support improved LBT feature
authorDaeyeong Lee <daeyeong.lee@samsung.com>
Thu, 8 Mar 2018 11:42:13 +0000 (20:42 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:05 +0000 (20:24 +0300)
Change-Id: Ifc2b9e58a43e41128bde7a2c92e897684c25aecf
Signed-off-by: Daeyeong Lee <daeyeong.lee@samsung.com>
kernel/sched/fair.c

index 5605b2ec9e8e284e36157b550296d45faab18cce..8b06736f44f10c38b5f3d90e3c720bf2349c318f 100644 (file)
@@ -5192,12 +5192,19 @@ static void clear_sd_overutilized(struct sched_domain *sd)
 static inline void update_overutilized_status(struct rq *rq)
 {
        struct sched_domain *sd;
+       bool overutilized = false;
 
        rcu_read_lock();
        sd = rcu_dereference(rq->sd);
-       if (sd && !sd_overutilized(sd) &&
-           cpu_overutilized(rq->cpu))
-               set_sd_overutilized(sd);
+       if (sd && !sd_overutilized(sd)) {
+               if (sched_feat(EXYNOS_HMP))
+                       overutilized = lbt_overutilized(rq->cpu, sd->level);
+               else
+                       overutilized = cpu_overutilized(rq->cpu);
+
+               if (overutilized)
+                       set_sd_overutilized(sd);
+       }
        rcu_read_unlock();
 }
 
@@ -9236,6 +9243,8 @@ skip_unlock: __attribute__ ((unused));
        if (!capacity)
                capacity = 1;
 
+       update_lbt_overutil(cpu, capacity);
+
        cpu_rq(cpu)->cpu_capacity = capacity;
        sdg->sgc->capacity = capacity;
        sdg->sgc->min_capacity = capacity;
@@ -9517,11 +9526,20 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                }
 
 
-               if (cpu_overutilized(i)) {
-                       *overutilized = true;
+               if (sched_feat(EXYNOS_MS)) {
+                       if (lbt_overutilized(i, env->sd->level)) {
+                               *overutilized = true;
 
-                       if (rq->misfit_task_load)
-                               *misfit_task = true;
+                               if (rq->misfit_task_load)
+                                       *misfit_task = true;
+                       }
+               } else {
+                       if (cpu_overutilized(i)) {
+                               *overutilized = true;
+
+                               if (rq->misfit_task_load)
+                                       *misfit_task = true;
+                       }
                }
        }