ANDROID: sched/events: Introduce cfs_rq load tracking trace event
authorDietmar Eggemann <dietmar.eggemann@arm.com>
Fri, 17 Mar 2017 20:27:06 +0000 (20:27 +0000)
committerChris Redpath <chris.redpath@arm.com>
Tue, 19 Dec 2017 16:50:55 +0000 (16:50 +0000)
The trace event keys load and util (utilization) are mapped to:

 (1) load : cfs_rq->runnable_load_avg

 (2) util : cfs_rq->avg.util_avg

To let this trace event work for configurations w/ and w/o group
scheduling support for cfs (CONFIG_FAIR_GROUP_SCHED) the following
special handling is necessary for non-existent key=value pairs:

 path = "(null)" : In case of !CONFIG_FAIR_GROUP_SCHED.

The following list shows examples of the key=value pairs in different
configurations for:

 (1) a root task_group:

     cpu=4 path=/ load=6 util=331

 (2) a task_group:

     cpu=1 path=/tg1/tg11/tg111 load=538 util=522

 (3) an autogroup:

     cpu=3 path=/autogroup-18 load=997 util=517

 (4) w/o CONFIG_FAIR_GROUP_SCHED:

     cpu=0 path=(null) load=314 util=289

The trace event is only defined for CONFIG_SMP.

The helper function __trace_sched_path() can be used to get the length
parameter of the dynamic array (path == NULL) and to copy the path into
it (path != NULL).

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Change-Id: Iae08075d889dd772c8d2e1a15dc2ca6589e5640e
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
include/trace/events/sched.h
kernel/sched/fair.c

index da10aa21bebc847bd1c71d1ffed177494bcb774e..96ea879b2014fec2128f47149eeb881def42170c 100644 (file)
@@ -570,6 +570,69 @@ TRACE_EVENT(sched_wake_idle_without_ipi,
 
        TP_printk("cpu=%d", __entry->cpu)
 );
+
+#ifdef CONFIG_SMP
+#ifdef CREATE_TRACE_POINTS
+static inline
+int __trace_sched_cpu(struct cfs_rq *cfs_rq)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       struct rq *rq = cfs_rq->rq;
+#else
+       struct rq *rq = container_of(cfs_rq, struct rq, cfs);
+#endif
+       return cpu_of(rq);
+}
+
+static inline
+int __trace_sched_path(struct cfs_rq *cfs_rq, char *path, int len)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       int l = path ? len : 0;
+
+       if (task_group_is_autogroup(cfs_rq->tg))
+               return autogroup_path(cfs_rq->tg, path, l) + 1;
+       else
+               return cgroup_path(cfs_rq->tg->css.cgroup, path, l) + 1;
+#else
+       if (path)
+               strcpy(path, "(null)");
+
+       return strlen("(null)");
+#endif
+}
+
+#endif /* CREATE_TRACE_POINTS */
+
+/*
+ * Tracepoint for cfs_rq load tracking:
+ */
+TRACE_EVENT(sched_load_cfs_rq,
+
+       TP_PROTO(struct cfs_rq *cfs_rq),
+
+       TP_ARGS(cfs_rq),
+
+       TP_STRUCT__entry(
+               __field(        int,            cpu                     )
+               __dynamic_array(char,           path,
+                               __trace_sched_path(cfs_rq, NULL, 0)     )
+               __field(        unsigned long,  load                    )
+               __field(        unsigned long,  util                    )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu    = __trace_sched_cpu(cfs_rq);
+               __trace_sched_path(cfs_rq, __get_dynamic_array(path),
+                                  __get_dynamic_array_len(path));
+               __entry->load   = cfs_rq->runnable_load_avg;
+               __entry->util   = cfs_rq->avg.util_avg;
+       ),
+
+       TP_printk("cpu=%d path=%s load=%lu util=%lu", __entry->cpu,
+                 __get_str(path), __entry->load, __entry->util)
+);
+#endif /* CONFIG_SMP */
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index 5e7b66d9341720543111a91b03d4f9d64578fd92..733e7ee870adb39696a491c3db3c8a9a326103df 100644 (file)
@@ -3023,6 +3023,9 @@ ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
        }
        sa->util_avg = sa->util_sum / (LOAD_AVG_MAX - 1024 + sa->period_contrib);
 
+       if (cfs_rq)
+               trace_sched_load_cfs_rq(cfs_rq);
+
        return 1;
 }
 
@@ -3266,6 +3269,8 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
        update_tg_cfs_util(cfs_rq, se);
        update_tg_cfs_load(cfs_rq, se);
 
+       trace_sched_load_cfs_rq(cfs_rq);
+
        return 1;
 }
 
@@ -3427,6 +3432,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
        set_tg_cfs_propagate(cfs_rq);
 
        cfs_rq_util_change(cfs_rq);
+
+       trace_sched_load_cfs_rq(cfs_rq);
 }
 
 /**
@@ -3447,6 +3454,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
        set_tg_cfs_propagate(cfs_rq);
 
        cfs_rq_util_change(cfs_rq);
+
+       trace_sched_load_cfs_rq(cfs_rq);
 }
 
 /* Add the load generated by se into cfs_rq's load average */