#endif
}
+#ifdef CONFIG_SMP
+static void sched_rt_update_capacity_req(struct rq *rq)
+{
+ u64 total, used, age_stamp, avg;
+ s64 delta;
+
+ if (!sched_freq())
+ return;
+
+ sched_avg_update(rq);
+ /*
+ * Since we're reading these variables without serialization make sure
+ * we read them once before doing sanity checks on them.
+ */
+ age_stamp = READ_ONCE(rq->age_stamp);
+ avg = READ_ONCE(rq->rt_avg);
+ delta = rq_clock(rq) - age_stamp;
+
+ if (unlikely(delta < 0))
+ delta = 0;
+
+ total = sched_avg_period() + delta;
+
+ used = div_u64(avg, total);
+ if (unlikely(used > SCHED_CAPACITY_SCALE))
+ used = SCHED_CAPACITY_SCALE;
+
+ set_rt_cpu_capacity(rq->cpu, 1, (unsigned long)(used));
+}
+#else
+static inline void sched_rt_update_capacity_req(struct rq *rq)
+{ }
+
+#endif
+
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq)
{
if (prev->sched_class == &rt_sched_class)
update_curr_rt(rq);
- if (!rt_rq->rt_queued)
+ if (!rt_rq->rt_queued) {
+ /*
+ * The next task to be picked on this rq will have a lower
+ * priority than rt tasks so we can spend some time to update
+ * the capacity used by rt tasks based on the last activity.
+ * This value will be the used as an estimation of the next
+ * activity.
+ */
+ sched_rt_update_capacity_req(rq);
return NULL;
+ }
put_prev_task(rq, prev);
update_curr_rt(rq);
+ if (rq->rt.rt_nr_running)
+ sched_rt_update_capacity_req(rq);
+
watchdog(rq, p);
/*