[COMMON] sched/rt: add a trace point for FRT algorithm
authorJohnlay Park <jonglae.park@samsung.com>
Fri, 6 Apr 2018 07:10:14 +0000 (16:10 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:35:39 +0000 (17:35 +0900)
for optimizing the system performance

Change-Id: I70c7a5f5c06b84e40689030e2186faee14a75c26
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
include/trace/events/sched.h
kernel/sched/rt.c

index e668c0e2bc87071eea38d7b053cd5fdd8c70cf81..ac264bb331e99f3b2e3d7da65d82f20b48fb2173 100644 (file)
@@ -638,47 +638,43 @@ struct cfs_rq *__trace_sched_group_cfs_rq(struct sched_entity *se)
 #endif /* CREATE_TRACE_POINTS */
 
 /*
- * Tracepoint for accounting sched averages for fair tasks.
+ * Tracepoint for logging FRT schedule activity
  */
-TRACE_EVENT(sched_load_avg_task,
+TRACE_EVENT(sched_fluid_stat,
 
-       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, int best, char* str),
 
-       TP_ARGS(tsk, avg),
+       TP_ARGS(tsk, avg, best, str),
 
        TP_STRUCT__entry(
-               __array( char,  comm,   TASK_COMM_LEN           )
+               __array( char,  selectby,       TASK_COMM_LEN   )
+               __array( char,  targettsk,      TASK_COMM_LEN   )
                __field( pid_t, pid                             )
-               __field( int,   cpu                             )
+               __field( int,   bestcpu                         )
+               __field( int,   prevcpu                         )
                __field( unsigned long, load_avg                )
                __field( unsigned long, util_avg                )
-               __field( u64,           load_sum                )
-               __field( u32,           util_sum                )
-               __field( u32,           period_contrib          )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               memcpy(__entry->selectby, str, TASK_COMM_LEN);
+               memcpy(__entry->targettsk, tsk->comm, TASK_COMM_LEN);
                __entry->pid                    = tsk->pid;
-               __entry->cpu                    = task_cpu(tsk);
+               __entry->bestcpu                = best;
+               __entry->prevcpu                = task_cpu(tsk);
                __entry->load_avg               = avg->load_avg;
                __entry->util_avg               = avg->util_avg;
-               __entry->load_sum               = avg->load_sum;
-               __entry->util_sum               = avg->util_sum;
-               __entry->period_contrib         = avg->period_contrib;
        ),
-       TP_printk("fair: comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
-                       "load_sum=%llu util_sum=%u period_contrib=%u",
-                 __entry->comm,
+       TP_printk("frt: comm=%s pid=%d assigned to #%d from #%d load_avg=%lu util_avg=%lu "
+                       "by %s.",
+                 __entry->targettsk,
                  __entry->pid,
-                 __entry->cpu,
+                 __entry->bestcpu,
+                 __entry->prevcpu,
                  __entry->load_avg,
                  __entry->util_avg,
-                 (u64)__entry->load_sum,
-                 (u32)__entry->util_sum,
-                 (u32)__entry->period_contrib)
+                 __entry->selectby)
 );
-
 /*
  * Tracepoint for accounting sched averages for tasks.
  */
index f0b2572a5ae20c04a7cfc4cf89b82269416e048c..7fd21086e7250cdabca0f7c72dd75d184a335634 100644 (file)
@@ -2278,6 +2278,11 @@ static int find_victim_rt_rq(struct task_struct *task, struct sched_group *sg, i
                set_victim_flag(cpu_rq(*best_cpu)->curr);
        }
 
+       if (victim_rt)
+               trace_sched_fluid_stat(task, &task->se.avg, *best_cpu, "VICTIM-FAIR");
+       else
+               trace_sched_fluid_stat(task, &task->se.avg, *best_cpu, "VICTIM-RT");
+
        return *best_cpu;
 
 }
@@ -2319,6 +2324,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
         */
        if (wake_flags || affordable_cpu(prefer_cpu, task_util(task))) {
                best_cpu = prefer_cpu;
+               trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "CACHE-HOT");
                goto out;
        }
 
@@ -2336,6 +2342,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
 
                if (idle_cpu(cpu)) {
                        best_cpu = cpu;
+                       trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "IDLE-FIRST");
                        goto out;
                }
        }
@@ -2380,6 +2387,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
                if (min_cpu >= 0 &&
                        ((capacity_of(min_cpu) >= min_load) || (min_cpu == prefer_cpu))) {
                        best_cpu = min_cpu;
+                       trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "FAIR-RECESS");
                        goto unlock;
                }
 
@@ -2387,6 +2395,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
                if (min_rt_cpu >= 0 &&
                        ((capacity_of(min_rt_cpu) >= min_rt_load) || (min_rt_cpu == prefer_cpu))) {
                        best_cpu = min_rt_cpu;
+                       trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "RT-RECESS");
                        goto unlock;
                }