);
#endif /* CONFIG_FAIR_GROUP_SCHED */
+/*
+ * Tracepoint for tasks' estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_task,
+
+ TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+ TP_ARGS(tsk, avg),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( unsigned int, util_avg )
+ __field( unsigned int, est_enqueued )
+ __field( unsigned int, est_ewma )
+
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = task_cpu(tsk);
+ __entry->util_avg = avg->util_avg;
+ __entry->est_enqueued = avg->util_est.enqueued;
+ __entry->est_ewma = avg->util_est.ewma;
+ ),
+
+ TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u",
+ __entry->comm,
+ __entry->pid,
+ __entry->cpu,
+ __entry->util_avg,
+ __entry->est_ewma,
+ __entry->est_enqueued)
+);
+
+/*
+ * Tracepoint for root cfs_rq's estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_cpu,
+
+ TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+ TP_ARGS(cpu, cfs_rq),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( unsigned int, util_avg )
+ __field( unsigned int, util_est_enqueued )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->util_avg = cfs_rq->avg.util_avg;
+ __entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued;
+ ),
+
+ TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u",
+ __entry->cpu,
+ __entry->util_avg,
+ __entry->util_est_enqueued)
+);
+
/*
* Tracepoint for accounting CPU boosted utilization
*/
enqueued = cfs_rq->avg.util_est.enqueued;
enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+ /* Update plots for Task and CPU estimated utilization */
+ trace_sched_util_est_task(p, &p->se.avg);
+ trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
}
/*
}
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
+ /* Update plots for CPU's estimated utilization */
+ trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+
/*
* Skip update of task's estimated utilization when the task has not
* yet completed an activation, e.g. being migrated.
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
WRITE_ONCE(p->se.avg.util_est, ue);
+
+ /* Update plots for Task's estimated utilization */
+ trace_sched_util_est_task(p, &p->se.avg);
}
#else /* CONFIG_SMP */