* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr;
+
+ unsigned long nr_spread_over;
+
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
rq->bkl_cnt);
#endif
+ SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
+ cfs_rq->nr_spread_over);
}
static void print_cpu(struct seq_file *m, int cpu)
#endif
}
+static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ s64 d = se->vruntime - cfs_rq->min_vruntime;
+
+ if (d < 0)
+ d = -d;
+
+ if (d > 3*sysctl_sched_latency)
+ schedstat_inc(cfs_rq, nr_spread_over);
+#endif
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
}
update_stats_enqueue(cfs_rq, se);
+ check_spread(cfs_rq, se);
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
update_stats_curr_end(cfs_rq, prev);
+ check_spread(cfs_rq, prev);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */
}
update_stats_enqueue(cfs_rq, se);
+ check_spread(cfs_rq, se);
+ check_spread(cfs_rq, curr);
__enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
resched_task(rq->curr);