*boost_migration = 1;
return p;
}
- if (ontime_load_avg(p) >= get_upper_boundary(task_cpu(p))) {
- heaviest_task = p;
- max_util_avg = ontime_load_avg(p);
- *boost_migration = 0;
+ if (schedtune_ontime_en(p)) {
+ if (ontime_load_avg(p) >= get_upper_boundary(task_cpu(p))) {
+ heaviest_task = p;
+ max_util_avg = ontime_load_avg(p);
+ *boost_migration = 0;
+ }
}
se = __pick_first_entity(se->cfs_rq);
break;
}
+ if (!schedtune_ontime_en(p))
+ goto next_entity;
+
if (ontime_load_avg(p) < get_upper_boundary(task_cpu(p)))
goto next_entity;
struct cpumask fit_cpus;
int dst_cpu, src_cpu = task_cpu(p);
+ /* If this task is not allowed to ontime, do not ontime wakeup */
+ if (!schedtune_ontime_en(p))
+ return -1;
+
/* When wakeup task is on ontime migrating, do not ontime wakeup */
if (ontime_of(p)->migrating == 1)
return -1;
{
int src_cpu = task_cpu(p);
+ if (!schedtune_ontime_en(p))
+ return true;
+
if (ontime_of(p)->migrating == 1) {
trace_ems_ontime_check_migrate(p, dst_cpu, false, "on migrating");
return false;
/* Hint to group tasks by process */
int band;
+
+ /* SchedTune ontime migration */
+ int ontime_en;
};
static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
return util_est_en;
}
+int schedtune_ontime_en(struct task_struct *p)
+{
+ struct schedtune *st;
+ int ontime_en;
+
+ if (unlikely(!schedtune_initialized))
+ return 0;
+
+ /* Get ontime value */
+ rcu_read_lock();
+ st = task_schedtune(p);
+ ontime_en = st->ontime_en;
+ rcu_read_unlock();
+
+ return ontime_en;
+
+}
+
int schedtune_prefer_idle(struct task_struct *p)
{
struct schedtune *st;
return 0;
}
+static u64
+ontime_en_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct schedtune *st = css_st(css);
+
+ return st->ontime_en;
+}
+
+static int
+ontime_en_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 ontime_en)
+{
+ struct schedtune *st = css_st(css);
+ st->ontime_en = ontime_en;
+
+ return 0;
+}
+
static u64
band_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
.read_u64 = util_est_en_read,
.write_u64 = util_est_en_write,
},
+ {
+ .name = "ontime_en",
+ .read_u64 = ontime_en_read,
+ .write_u64 = ontime_en_write,
+ },
{ } /* terminate */
};
int schedtune_prefer_idle(struct task_struct *tsk);
int schedtune_prefer_perf(struct task_struct *tsk);
int schedtune_util_est_en(struct task_struct *tsk);
+int schedtune_ontime_en(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);
#define schedtune_prefer_idle(tsk) 0
#define schedtune_prefer_perf(tsk) 0
#define schedtune_util_est_en(tsk) 0
+#define schedtune_ontime_en(tsk) 0
#define schedtune_enqueue_task(task, cpu) do { } while (0)
#define schedtune_dequeue_task(task, cpu) do { } while (0)