if (___update_load_avg(now, cpu, &se->avg,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL, NULL)) {
- cfs_se_util_change(&se->avg);
+ if (schedtune_util_est_en(task_of(se)))
+ cfs_se_util_change(&se->avg);
#ifdef UTIL_EST_DEBUG
/*
return max(ue.ewma, ue.enqueued);
}
-static inline unsigned long task_util_est(struct task_struct *p)
+unsigned long task_util_est(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
if (!task_sleep)
return;
+ if (!schedtune_util_est_en(p))
+ return;
+
/*
* If the PELT values haven't changed since enqueue time,
* skip the util_est update.
int schedtune_prefer_idle(struct task_struct *tsk);
int schedtune_prefer_perf(struct task_struct *tsk);
+int schedtune_util_est_en(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);
#define schedtune_prefer_idle(tsk) 0
#define schedtune_prefer_perf(tsk) 0
+#define schedtune_util_est_en(tsk) 0
#define schedtune_enqueue_task(task, cpu) do { } while (0)
#define schedtune_dequeue_task(task, cpu) do { } while (0)