struct util_est util_est;
};
-#define ontime_of(p) (&p->se.ontime)
-
struct ontime_avg {
u64 ontime_migration_time;
u64 load_sum;
#include "ems.h"
#include "../sched.h"
+#include "../tune.h"
unsigned long task_util(struct task_struct *p)
{
wake_util = cpu_util_wake(i, p);
new_util = wake_util + task_util_est(p);
+ new_util = max(new_util, boosted_task_util(p));
/* skip over-capacity cpu */
if (new_util > capacity_orig)
extern int cpu_util_wake(int cpu, struct task_struct *p);
extern unsigned long task_util_est(struct task_struct *p);
extern inline unsigned int get_cpu_mips(unsigned int cpu);
+
+extern unsigned long boosted_task_util(struct task_struct *p);
wake_util = cpu_util_wake(cpu, p);
new_util = wake_util + task_util_est(p);
+ new_util = max(new_util, boosted_task_util(p));
/* checking prev cpu is meaningless */
if (eenv->prev_cpu == cpu)
#define MIN_CAPACITY_CPU 0
#define MAX_CAPACITY_CPU (NR_CPUS - 1)
-#define ontime_load_avg(p) (ontime_of(p)->avg.load_avg)
+#define ontime_of(p) (&p->se.ontime)
#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
return container_of(sa, struct sched_entity, avg);
}
+extern long schedtune_margin(unsigned long signal, long boost);
+static inline unsigned long ontime_load_avg(struct task_struct *p)
+{
+ int boost = schedtune_task_boost(p);
+ unsigned long load_avg = ontime_of(p)->avg.load_avg;
+
+ if (boost == 0)
+ return load_avg;
+
+ return load_avg + schedtune_margin(load_avg, boost);
+}
+
struct ontime_cond *get_current_cond(int cpu)
{
struct ontime_cond *curr;
wake_util = cpu_util_wake(i, p);
new_util = wake_util + task_util_est(p);
+ new_util = max(new_util, boosted_task_util(p));
trace_ems_prefer_idle(p, task_cpu(p), i, capacity_orig, task_util_est(p),
new_util, idle_cpu(i));
#ifdef CONFIG_SCHED_TUNE
struct reciprocal_value schedtune_spc_rdiv;
-static long
+long
schedtune_margin(unsigned long signal, long boost)
{
long long margin = 0;