static inline void update_overutilized_status(struct rq *rq)
{
struct sched_domain *sd;
+ bool overutilized = false;
rcu_read_lock();
sd = rcu_dereference(rq->sd);
- if (sd && !sd_overutilized(sd) &&
- cpu_overutilized(rq->cpu))
- set_sd_overutilized(sd);
+ if (sd && !sd_overutilized(sd)) {
+ if (sched_feat(EXYNOS_HMP))
+ overutilized = lbt_overutilized(rq->cpu, sd->level);
+ else
+ overutilized = cpu_overutilized(rq->cpu);
+
+ if (overutilized)
+ set_sd_overutilized(sd);
+ }
rcu_read_unlock();
}
if (!capacity)
capacity = 1;
+ update_lbt_overutil(cpu, capacity);
+
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = capacity;
}
- if (cpu_overutilized(i)) {
- *overutilized = true;
+ if (sched_feat(EXYNOS_MS)) {
+ if (lbt_overutilized(i, env->sd->level)) {
+ *overutilized = true;
- if (rq->misfit_task_load)
- *misfit_task = true;
+ if (rq->misfit_task_load)
+ *misfit_task = true;
+ }
+ } else {
+ if (cpu_overutilized(i)) {
+ *overutilized = true;
+
+ if (rq->misfit_task_load)
+ *misfit_task = true;
+ }
}
}