[COMMON] sched: ems: Modify trace log name of EMS feature
authorDaeyeong Lee <daeyeong.lee@samsung.com>
Fri, 6 Apr 2018 06:47:37 +0000 (15:47 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:35:31 +0000 (17:35 +0900)
Change-Id: I8641050a38bccaa26bac3016e52c10a223e14353
Signed-off-by: Daeyeong Lee <daeyeong.lee@samsung.com>
include/trace/events/ems.h
kernel/sched/ems/lbt.c
kernel/sched/ems/ontime.c

index 866b1843e236754f240835171930372c5749fe79..0cf72f83d1ae5c00120a7ff06a8861c00d313fd8 100644 (file)
@@ -391,7 +391,7 @@ TRACE_EVENT(ehmp_find_best_target_cpu,
 /*
  * Tracepoint for ontime migration
  */
-TRACE_EVENT(ehmp_ontime_migration,
+TRACE_EVENT(ems_ontime_migration,
 
        TP_PROTO(struct task_struct *p, unsigned long load,
                int src_cpu, int dst_cpu, int boost_migration),
@@ -424,7 +424,7 @@ TRACE_EVENT(ehmp_ontime_migration,
 /*
  * Tracepoint for accounting ontime load averages for tasks.
  */
-TRACE_EVENT(ehmp_ontime_new_entity_load,
+TRACE_EVENT(ems_ontime_new_entity_load,
 
        TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg),
 
@@ -456,7 +456,7 @@ TRACE_EVENT(ehmp_ontime_new_entity_load,
 /*
  * Tracepoint for accounting ontime load averages for tasks.
  */
-TRACE_EVENT(ehmp_ontime_load_avg_task,
+TRACE_EVENT(ems_ontime_load_avg_task,
 
        TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag),
 
@@ -484,7 +484,7 @@ TRACE_EVENT(ehmp_ontime_load_avg_task,
                  (u64)__entry->load_sum, __entry->ontime_flag)
 );
 
-TRACE_EVENT(ehmp_ontime_check_migrate,
+TRACE_EVENT(ems_ontime_check_migrate,
 
        TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label),
 
@@ -511,7 +511,7 @@ TRACE_EVENT(ehmp_ontime_check_migrate,
                __entry->migrate, __entry->label)
 );
 
-TRACE_EVENT(ehmp_ontime_task_wakeup,
+TRACE_EVENT(ems_ontime_task_wakeup,
 
        TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label),
 
@@ -538,7 +538,7 @@ TRACE_EVENT(ehmp_ontime_task_wakeup,
                __entry->dst_cpu, __entry->label)
 );
 
-TRACE_EVENT(ehmp_lbt_overutilized,
+TRACE_EVENT(ems_lbt_overutilized,
 
        TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized),
 
index a2f65f0c1afc8cfabe4e67c2bb5018012fb24cfa..1c4a55443a3f08a894da115bfffcc7f45dde8ae2 100644 (file)
@@ -105,7 +105,7 @@ bool lbt_overutilized(int cpu, int level)
        overutilized = (cpu_util(cpu) > ou[level].capacity) ? true : false;
 
        if (overutilized)
-               trace_ehmp_lbt_overutilized(cpu, level, cpu_util(cpu),
+               trace_ems_lbt_overutilized(cpu, level, cpu_util(cpu),
                                ou[level].capacity, overutilized);
 
        return overutilized;
index 5a4c93e7832f84862640121a61d509a351a7c8e9..ffa5ae7f9a1f2ced517ec86e440a74275b91cd00 100644 (file)
@@ -313,7 +313,7 @@ static int ontime_migration_cpu_stop(void *data)
                rcu_read_unlock();
                double_unlock_balance(src_rq, dst_rq);
 
-               trace_ehmp_ontime_migration(p, ontime_of(p)->avg.load_avg,
+               trace_ems_ontime_migration(p, ontime_of(p)->avg.load_avg,
                                        src_cpu, dst_cpu, boost_migration);
                goto success_unlock;
        }
@@ -365,7 +365,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
 /****************************************************************/
 void ontime_trace_task_info(struct task_struct *p)
 {
-       trace_ehmp_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_flag(p));
+       trace_ems_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_flag(p));
 }
 
 DEFINE_PER_CPU(struct cpu_stop_work, ontime_migration_work);
@@ -497,7 +497,7 @@ int ontime_task_wakeup(struct task_struct *p)
                target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
 
                if (cpu_selected(target_cpu)) {
-                       trace_ehmp_ontime_task_wakeup(p, task_cpu(p),
+                       trace_ems_ontime_task_wakeup(p, task_cpu(p),
                                        target_cpu, "up ontime");
                        goto ontime_up;
                }
@@ -520,7 +520,7 @@ int ontime_task_wakeup(struct task_struct *p)
 
                if (delta > get_min_residency(ontime_task_cpu(p)) &&
                                ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) {
-                       trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1,
+                       trace_ems_ontime_task_wakeup(p, task_cpu(p), -1,
                                        "release ontime");
                        goto ontime_out;
                }
@@ -532,12 +532,12 @@ int ontime_task_wakeup(struct task_struct *p)
                target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
 
                if (cpu_selected(target_cpu)) {
-                       trace_ehmp_ontime_task_wakeup(p, task_cpu(p),
+                       trace_ems_ontime_task_wakeup(p, task_cpu(p),
                                        target_cpu, "stay ontime");
                        goto ontime_stay;
                }
 
-               trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1, "banished");
+               trace_ems_ontime_task_wakeup(p, task_cpu(p), -1, "banished");
                goto ontime_out;
        }
 
@@ -560,22 +560,22 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu)
        u64 delta;
 
        if (ontime_flag(p) & NOT_ONTIME) {
-               trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "not ontime");
+               trace_ems_ontime_check_migrate(p, dst_cpu, true, "not ontime");
                return true;
        }
 
        if (ontime_flag(p) & ONTIME_MIGRATING) {
-               trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "migrating");
+               trace_ems_ontime_check_migrate(p, dst_cpu, false, "migrating");
                return false;
        }
 
        if (cpumask_test_cpu(dst_cpu, cpu_coregroup_mask(ontime_task_cpu(p)))) {
-               trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "same coregroup");
+               trace_ems_ontime_check_migrate(p, dst_cpu, true, "same coregroup");
                return true;
        }
 
        if (capacity_orig_of(dst_cpu) > capacity_orig_of(ontime_task_cpu(p))) {
-               trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "bigger cpu");
+               trace_ems_ontime_check_migrate(p, dst_cpu, true, "bigger cpu");
                return true;
        }
 
@@ -586,21 +586,21 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu)
        delta = cpu_rq(0)->clock_task - ontime_migration_time(p);
        delta = delta >> 10;
        if (delta <= get_min_residency(ontime_task_cpu(p))) {
-               trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "min residency");
+               trace_ems_ontime_check_migrate(p, dst_cpu, false, "min residency");
                return false;
        }
 
        if (cpu_rq(task_cpu(p))->nr_running > 1) {
-               trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "big is busy");
+               trace_ems_ontime_check_migrate(p, dst_cpu, true, "big is busy");
                goto release;
        }
 
        if (ontime_load_avg(p) >= get_down_threshold(ontime_task_cpu(p))) {
-               trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "heavy task");
+               trace_ems_ontime_check_migrate(p, dst_cpu, false, "heavy task");
                return false;
        }
 
-       trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "ontime_release");
+       trace_ems_ontime_check_migrate(p, dst_cpu, true, "ontime_release");
 release:
        exclude_ontime_task(p);
 
@@ -665,7 +665,7 @@ void ontime_new_entity_load(struct task_struct *parent, struct sched_entity *se)
        ontime->avg.period_contrib = 1023;
        ontime->flags = NOT_ONTIME;
 
-       trace_ehmp_ontime_new_entity_load(task_of(se), &ontime->avg);
+       trace_ems_ontime_new_entity_load(task_of(se), &ontime->avg);
 }
 
 /****************************************************************/