sched/events: Introduce util_est trace events
authorlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 16 Apr 2018 13:22:43 +0000 (22:22 +0900)
committerlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 23 Jul 2018 05:58:56 +0000 (14:58 +0900)
Change-Id: I22c98bbaa7dda598d31a20b310afbf16d5fb8208
Signed-off-by: lakkyung.jung <lakkyung.jung@samsung.com>
include/trace/events/sched.h
kernel/sched/fair.c

index ac264bb331e99f3b2e3d7da65d82f20b48fb2173..1138faf98c705fe2d95845536f9a296914b8af6c 100644 (file)
@@ -902,6 +902,70 @@ TRACE_EVENT(sched_load_tg,
 );
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
+/*
+ * Tracepoint for tasks' estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_task,
+
+       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+       TP_ARGS(tsk, avg),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN           )
+               __field( pid_t,         pid                     )
+               __field( int,           cpu                     )
+               __field( unsigned int,  util_avg                )
+               __field( unsigned int,  est_enqueued            )
+               __field( unsigned int,  est_ewma                )
+
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = task_cpu(tsk);
+               __entry->util_avg               = avg->util_avg;
+               __entry->est_enqueued           = avg->util_est.enqueued;
+               __entry->est_ewma               = avg->util_est.ewma;
+       ),
+
+       TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u",
+                 __entry->comm,
+                 __entry->pid,
+                 __entry->cpu,
+                 __entry->util_avg,
+                 __entry->est_ewma,
+                 __entry->est_enqueued)
+);
+
+/*
+ * Tracepoint for root cfs_rq's estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_cpu,
+
+       TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+       TP_ARGS(cpu, cfs_rq),
+
+       TP_STRUCT__entry(
+               __field( int,           cpu                     )
+               __field( unsigned int,  util_avg                )
+               __field( unsigned int,  util_est_enqueued       )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu                    = cpu;
+               __entry->util_avg               = cfs_rq->avg.util_avg;
+               __entry->util_est_enqueued      = cfs_rq->avg.util_est.enqueued;
+       ),
+
+       TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u",
+                 __entry->cpu,
+                 __entry->util_avg,
+                 __entry->util_est_enqueued)
+);
+
 /*
  * Tracepoint for accounting CPU  boosted utilization
  */
index 79e7c724e70e5763a2ecee38fd4c112a328a3833..2e5975a9eea0735a6c353c6deaf86525547790c1 100644 (file)
@@ -3671,6 +3671,10 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
        enqueued  = cfs_rq->avg.util_est.enqueued;
        enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+       /* Update plots for Task and CPU estimated utilization */
+       trace_sched_util_est_task(p, &p->se.avg);
+       trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
 }
 
 /*
@@ -3708,6 +3712,9 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        }
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
+       /* Update plots for CPU's estimated utilization */
+       trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+
        /*
         * Skip update of task's estimated utilization when the task has not
         * yet completed an activation, e.g. being migrated.
@@ -3753,6 +3760,9 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        ue.ewma  += last_ewma_diff;
        ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
        WRITE_ONCE(p->se.avg.util_est, ue);
+
+       /* Update plots for Task's estimated utilization */
+       trace_sched_util_est_task(p, &p->se.avg);
 }
 
 #else /* CONFIG_SMP */