return cpu;
}
+#ifdef CONFIG_RT_GROUP_SCHED
+/*
+ * Called within set_task_rq() right before setting a task's cpu. The
+ * caller only guarantees p->pi_lock is held; no other assumptions,
+ * including the state of rq->lock, should be made.
+ */
+void set_task_rq_rt(struct sched_rt_entity *rt_se,
+ struct rt_rq *prev, struct rt_rq *next)
+{
+ u64 p_last_update_time;
+ u64 n_last_update_time;
+
+ if (!sched_feat(ATTACH_AGE_LOAD))
+ return;
+ /*
+ * We are supposed to update the task to "current" time, then its up to
+ * date and ready to go to new CPU/rt_rq. But we have difficulty in
+ * getting what current time is, so simply throw away the out-of-date
+ * time. This will result in the wakee task is less decayed, but giving
+ * the wakee more load sounds not bad.
+ */
+ if (!(rt_se->avg.last_update_time && prev))
+ return;
+#ifndef CONFIG_64BIT
+ {
+ u64 p_last_update_time_copy;
+ u64 n_last_update_time_copy;
+
+ do {
+ p_last_update_time_copy = prev->load_last_update_time_copy;
+ n_last_update_time_copy = next->load_last_update_time_copy;
+
+ smp_rmb();
+
+ p_last_update_time = prev->avg.last_update_time;
+ n_last_update_time = next->avg.last_update_time;
+
+ } while (p_last_update_time != p_last_update_time_copy ||
+ n_last_update_time != n_last_update_time_copy);
+ }
+#else
+ p_last_update_time = prev->avg.last_update_time;
+ n_last_update_time = next->avg.last_update_time;
+#endif
+ __update_load_avg(p_last_update_time, cpu_of(rq_of_rt_rq(prev)),
+ &rt_se->avg, 0, 0, NULL);
+
+ rt_se->avg.last_update_time = n_last_update_time;
+}
+#endif /* CONFIG_RT_GROUP_SCHED */
+
#ifndef CONFIG_64BIT
static inline u64 rt_rq_last_update_time(struct rt_rq *rt_rq)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
+#ifdef CONFIG_RT_GROUP_SCHED
+#ifdef CONFIG_SMP
+extern void set_task_rq_rt(struct sched_rt_entity *rt_se,
+ struct rt_rq *prev, struct rt_rq *next);
+#else /* !CONFIG_SMP */
+static inline void set_task_rq_rt(struct sched_rt_entity *rt_se,
+ struct rt_rq *prev, struct rt_rq *next) { }
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_RT_GROUP_SCHED */
+
#ifdef CONFIG_SMP
extern void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next);
struct rq *rq;
struct task_group *tg;
+#ifndef CONFIG_64BIT
+ u64 load_last_update_time_copy;
+#endif
#endif
};