if (___update_load_avg(now, cpu, &se->avg,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL, NULL)) {
- cfs_se_util_change(&se->avg);
+ if (schedtune_util_est_en(task_of(se)))
+ cfs_se_util_change(&se->avg);
+
return 1;
}
return READ_ONCE(p->se.avg.util_avg);
}
-static inline unsigned long _task_util_est(struct task_struct *p)
+inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
- return max(ue.ewma, ue.enqueued);
+ return schedtune_util_est_en(p) ? max(ue.ewma, ue.enqueued)
+ : task_util(p);
}
-static inline unsigned long task_util_est(struct task_struct *p)
+inline unsigned long task_util_est(struct task_struct *p)
{
- return max(READ_ONCE(p->se.avg.util_avg), _task_util_est(p));
+ return schedtune_util_est_en(p) ? max(READ_ONCE(p->se.avg.util_avg), _task_util_est(p))
+ : task_util(p);
}
static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
if (!task_sleep)
return;
+ if (!schedtune_util_est_en(p))
+ return;
+
/*
* If the PELT values haven't changed since enqueue time,
* skip the util_est update.
int schedtune_prefer_idle(struct task_struct *tsk);
int schedtune_prefer_perf(struct task_struct *tsk);
+int schedtune_util_est_en(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);
#define schedtune_prefer_idle(tsk) 0
#define schedtune_prefer_perf(tsk) 0
+#define schedtune_util_est_en(tsk) 0
#define schedtune_enqueue_task(task, cpu) do { } while (0)
#define schedtune_dequeue_task(task, cpu) do { } while (0)