TP_printk("cpu=%d", __entry->cpu)
);
+
+#ifdef CONFIG_SMP
+#ifdef CREATE_TRACE_POINTS
+static inline
+int __trace_sched_cpu(struct cfs_rq *cfs_rq)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rq *rq = cfs_rq->rq;
+#else
+ struct rq *rq = container_of(cfs_rq, struct rq, cfs);
+#endif
+ return cpu_of(rq);
+}
+
+static inline
+int __trace_sched_path(struct cfs_rq *cfs_rq, char *path, int len)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ int l = path ? len : 0;
+
+ if (task_group_is_autogroup(cfs_rq->tg))
+ return autogroup_path(cfs_rq->tg, path, l) + 1;
+ else
+ return cgroup_path(cfs_rq->tg->css.cgroup, path, l) + 1;
+#else
+ if (path)
+ strcpy(path, "(null)");
+
+ return strlen("(null)");
+#endif
+}
+
+#endif /* CREATE_TRACE_POINTS */
+
+/*
+ * Tracepoint for cfs_rq load tracking:
+ */
+TRACE_EVENT(sched_load_cfs_rq,
+
+ TP_PROTO(struct cfs_rq *cfs_rq),
+
+ TP_ARGS(cfs_rq),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __dynamic_array(char, path,
+ __trace_sched_path(cfs_rq, NULL, 0) )
+ __field( unsigned long, load )
+ __field( unsigned long, util )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = __trace_sched_cpu(cfs_rq);
+ __trace_sched_path(cfs_rq, __get_dynamic_array(path),
+ __get_dynamic_array_len(path));
+ __entry->load = cfs_rq->runnable_load_avg;
+ __entry->util = cfs_rq->avg.util_avg;
+ ),
+
+ TP_printk("cpu=%d path=%s load=%lu util=%lu", __entry->cpu,
+ __get_str(path), __entry->load, __entry->util)
+);
+#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
}
sa->util_avg = sa->util_sum / (LOAD_AVG_MAX - 1024 + sa->period_contrib);
+ if (cfs_rq)
+ trace_sched_load_cfs_rq(cfs_rq);
+
return 1;
}
update_tg_cfs_util(cfs_rq, se);
update_tg_cfs_load(cfs_rq, se);
+ trace_sched_load_cfs_rq(cfs_rq);
+
return 1;
}
set_tg_cfs_propagate(cfs_rq);
cfs_rq_util_change(cfs_rq);
+
+ trace_sched_load_cfs_rq(cfs_rq);
}
/**
set_tg_cfs_propagate(cfs_rq);
cfs_rq_util_change(cfs_rq);
+
+ trace_sched_load_cfs_rq(cfs_rq);
}
/* Add the load generated by se into cfs_rq's load average */