se->vruntime += cfs_rq->min_vruntime;
}
+#ifdef CONFIG_SMP
+void copy_sched_avg(struct sched_avg *from, struct sched_avg *to, unsigned int ratio)
+{
+ if (ratio < 0)
+ ratio = 0;
+
+ to->last_update_time = from->last_update_time;
+ to->util_avg = (from->util_avg * ratio) / 100;
+ to->util_sum = (from->util_sum * ratio) / 100;
+ to->load_avg = (from->load_avg * ratio) / 100;
+ to->load_sum = (from->load_sum * ratio) / 100;
+}
+#else
+void copy_sched_avg(struct sched_avg *from, struct sched_avg *to, unsigned int ratio) { }
+#endif
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
{
detach_task_cfs_rq(p);
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
+ /*
+ * Need to scale the applying ratio while migrating.
+ * - Copy rt sched avg into fair sched avg
+ */
+ copy_sched_avg(&p->rt.avg, &p->se.avg, 100);
attach_task_cfs_rq(p);
if (task_on_rq_queued(p)) {
}
#endif /* CONFIG_SMP */
+extern void
+copy_sched_avg(struct sched_avg *from, struct sched_avg *to, unsigned int ratio);
+
/*
* When switching a task to RT, we may overload the runqueue
* with RT tasks. In this case we try to push them off to
*/
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
+ /* Copy fair sched avg into rt sched avg */
+ copy_sched_avg(&p->se.avg, &p->rt.avg, 100);
/*
* If we are already running, then there's nothing
* that needs to be done. But if we are not running