static inline unsigned long task_util_est(struct task_struct *p)
{
+#ifdef CONFIG_SCHED_WALT
+ if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
+ return (p->ravg.demand /
+ (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
+#endif
return max(task_util(p), _task_util_est(p));
}
struct sched_group *sg;
};
-/*
- * cpu_util returns the amount of capacity of a CPU that is used by CFS
- * tasks. The unit of the return value must be the one of capacity so we can
- * compare the utilization with the capacity of the CPU that is available for
- * CFS task (ie cpu_capacity).
+/**
+ * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
+ * @cpu: the CPU to get the utilization of
+ *
+ * The unit of the return value must be the one of capacity so we can compare
+ * the utilization with the capacity of the CPU that is available for CFS task
+ * (ie cpu_capacity).
*
* cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
* recent utilization of currently non-runnable tasks on a CPU. It represents
* current capacity (capacity_curr <= capacity_orig) of the CPU because it is
* the running time on this CPU scaled by capacity_curr.
*
+ * The estimated utilization of a CPU is defined to be the maximum between its
+ * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
+ * currently RUNNABLE on that CPU.
+ * This allows to properly represent the expected utilization of a CPU which
+ * has just got a big task running since a long sleep period. At the same time
+ * however it preserves the benefits of the "blocked utilization" in
+ * describing the potential for other tasks waking up on the same CPU.
+ *
* Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
* higher than capacity_orig because of unfortunate rounding in
* cfs.avg.util_avg or just after migrating tasks and new task wakeups until
* available capacity. We allow utilization to overshoot capacity_curr (but not
* capacity_orig) as it useful for predicting the capacity required after task
* migrations (scheduler-driven DVFS).
+ *
+ * Return: the (estimated) utilization for the specified CPU
*/
static inline unsigned long cpu_util(int cpu)
{
cfs_rq = &cpu_rq(cpu)->cfs;
util = READ_ONCE(cfs_rq->avg.util_avg);
+ if (sched_feat(UTIL_EST))
+ util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
/* Discount task's blocked util from CPU's util */
util -= min_t(unsigned int, util, task_util(p));
+ /*
+ * Covered cases:
+ *
+ * a) if *p is the only task sleeping on this CPU, then:
+ * cpu_util (== task_util) > util_est (== 0)
+ * and thus we return:
+ * cpu_util_wake = (cpu_util - task_util) = 0
+ *
+ * b) if other tasks are SLEEPING on this CPU, which is now exiting
+ * IDLE, then:
+ * cpu_util >= task_util
+ * cpu_util > util_est (== 0)
+ * and thus we discount *p's blocked utilization to return:
+ * cpu_util_wake = (cpu_util - task_util) >= 0
+ *
+ * c) if other tasks are RUNNABLE on that CPU and
+ * util_est > cpu_util
+ * then we use util_est since it returns a more restrictive
+ * estimation of the spare capacity on that CPU, by just
+ * considering the expected utilization of tasks already
+ * runnable on that CPU.
+ *
+ * Cases a) and b) are covered by the above code, while case c) is
+ * covered by the following code when estimated utilization is
+ * enabled.
+ */
+ if (sched_feat(UTIL_EST))
+ util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+
/*
* Utilization (estimated) can exceed the CPU capacity, thus let's
* clamp to the maximum CPU capacity to ensure consistency with
if (boost == 0)
return 0;
- util = task_util(task);
+ util = task_util_est(task);
margin = schedtune_margin(util, boost);
return margin;
static inline unsigned long
boosted_task_util(struct task_struct *task)
{
- unsigned long util = task_util(task);
+ unsigned long util = task_util_est(task);
long margin = schedtune_task_margin(task);
trace_sched_boost_task(task, util, margin);
* accounting. However, the blocked utilization may be zero.
*/
wake_util = cpu_util_wake(i, p);
- new_util = wake_util + task_util(p);
+ new_util = wake_util + task_util_est(p);
/*
* Ensure minimum capacity to grant the required boost.
* during energy calculation, but unboosted task
* util for group utilization calculations
*/
- eenv->util_delta = task_util(p);
+ eenv->util_delta = task_util_est(p);
eenv->util_delta_boosted = boosted_task_util(p);
cpumask_and(&cpumask_possible_cpus, &p->cpus_allowed, cpu_online_mask);
if (cpu_iter == prev_cpu)
continue;
+ /*
+ * Consider only CPUs where the task is expected to
+ * fit without making the CPU overutilized.
+ */
spare = capacity_spare_wake(cpu_iter, p);
-
- if (spare * 1024 < capacity_margin * task_util(p))
+ if (spare * 1024 < capacity_margin * task_util_est(p))
continue;
/* Add CPU candidate */
* the heuristics we use there in selecting candidate
* CPUs.
*/
- if (unlikely(!sched_feat(FIND_BEST_TARGET) && !task_util(p)))
+ if (unlikely(!sched_feat(FIND_BEST_TARGET) && !task_util_est(p)))
return false;
if(!sched_feat(EAS_PREFER_IDLE)){