From: Daeyeong Lee Date: Fri, 6 Apr 2018 06:47:37 +0000 (+0900) Subject: [COMMON] sched: ems: Modify trace log name of EMS feature X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=ec3d88f773a5c2cd81f6d3354fc351161618526f;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [COMMON] sched: ems: Modify trace log name of EMS feature Change-Id: I8641050a38bccaa26bac3016e52c10a223e14353 Signed-off-by: Daeyeong Lee --- diff --git a/include/trace/events/ems.h b/include/trace/events/ems.h index 866b1843e236..0cf72f83d1ae 100644 --- a/include/trace/events/ems.h +++ b/include/trace/events/ems.h @@ -391,7 +391,7 @@ TRACE_EVENT(ehmp_find_best_target_cpu, /* * Tracepoint for ontime migration */ -TRACE_EVENT(ehmp_ontime_migration, +TRACE_EVENT(ems_ontime_migration, TP_PROTO(struct task_struct *p, unsigned long load, int src_cpu, int dst_cpu, int boost_migration), @@ -424,7 +424,7 @@ TRACE_EVENT(ehmp_ontime_migration, /* * Tracepoint for accounting ontime load averages for tasks. */ -TRACE_EVENT(ehmp_ontime_new_entity_load, +TRACE_EVENT(ems_ontime_new_entity_load, TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg), @@ -456,7 +456,7 @@ TRACE_EVENT(ehmp_ontime_new_entity_load, /* * Tracepoint for accounting ontime load averages for tasks. */ -TRACE_EVENT(ehmp_ontime_load_avg_task, +TRACE_EVENT(ems_ontime_load_avg_task, TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag), @@ -484,7 +484,7 @@ TRACE_EVENT(ehmp_ontime_load_avg_task, (u64)__entry->load_sum, __entry->ontime_flag) ); -TRACE_EVENT(ehmp_ontime_check_migrate, +TRACE_EVENT(ems_ontime_check_migrate, TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label), @@ -511,7 +511,7 @@ TRACE_EVENT(ehmp_ontime_check_migrate, __entry->migrate, __entry->label) ); -TRACE_EVENT(ehmp_ontime_task_wakeup, +TRACE_EVENT(ems_ontime_task_wakeup, TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label), @@ -538,7 +538,7 @@ TRACE_EVENT(ehmp_ontime_task_wakeup, __entry->dst_cpu, __entry->label) ); -TRACE_EVENT(ehmp_lbt_overutilized, +TRACE_EVENT(ems_lbt_overutilized, TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized), diff --git a/kernel/sched/ems/lbt.c b/kernel/sched/ems/lbt.c index a2f65f0c1afc..1c4a55443a3f 100644 --- a/kernel/sched/ems/lbt.c +++ b/kernel/sched/ems/lbt.c @@ -105,7 +105,7 @@ bool lbt_overutilized(int cpu, int level) overutilized = (cpu_util(cpu) > ou[level].capacity) ? true : false; if (overutilized) - trace_ehmp_lbt_overutilized(cpu, level, cpu_util(cpu), + trace_ems_lbt_overutilized(cpu, level, cpu_util(cpu), ou[level].capacity, overutilized); return overutilized; diff --git a/kernel/sched/ems/ontime.c b/kernel/sched/ems/ontime.c index 5a4c93e7832f..ffa5ae7f9a1f 100644 --- a/kernel/sched/ems/ontime.c +++ b/kernel/sched/ems/ontime.c @@ -313,7 +313,7 @@ static int ontime_migration_cpu_stop(void *data) rcu_read_unlock(); double_unlock_balance(src_rq, dst_rq); - trace_ehmp_ontime_migration(p, ontime_of(p)->avg.load_avg, + trace_ems_ontime_migration(p, ontime_of(p)->avg.load_avg, src_cpu, dst_cpu, boost_migration); goto success_unlock; } @@ -365,7 +365,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3) /****************************************************************/ void ontime_trace_task_info(struct task_struct *p) { - trace_ehmp_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_flag(p)); + trace_ems_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_flag(p)); } DEFINE_PER_CPU(struct cpu_stop_work, ontime_migration_work); @@ -497,7 +497,7 @@ int ontime_task_wakeup(struct task_struct *p) target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p)); if (cpu_selected(target_cpu)) { - trace_ehmp_ontime_task_wakeup(p, task_cpu(p), + trace_ems_ontime_task_wakeup(p, task_cpu(p), target_cpu, "up ontime"); goto ontime_up; } @@ -520,7 +520,7 @@ int ontime_task_wakeup(struct task_struct *p) if (delta > get_min_residency(ontime_task_cpu(p)) && ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) { - trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1, + trace_ems_ontime_task_wakeup(p, task_cpu(p), -1, "release ontime"); goto ontime_out; } @@ -532,12 +532,12 @@ int ontime_task_wakeup(struct task_struct *p) target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p)); if (cpu_selected(target_cpu)) { - trace_ehmp_ontime_task_wakeup(p, task_cpu(p), + trace_ems_ontime_task_wakeup(p, task_cpu(p), target_cpu, "stay ontime"); goto ontime_stay; } - trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1, "banished"); + trace_ems_ontime_task_wakeup(p, task_cpu(p), -1, "banished"); goto ontime_out; } @@ -560,22 +560,22 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu) u64 delta; if (ontime_flag(p) & NOT_ONTIME) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "not ontime"); + trace_ems_ontime_check_migrate(p, dst_cpu, true, "not ontime"); return true; } if (ontime_flag(p) & ONTIME_MIGRATING) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "migrating"); + trace_ems_ontime_check_migrate(p, dst_cpu, false, "migrating"); return false; } if (cpumask_test_cpu(dst_cpu, cpu_coregroup_mask(ontime_task_cpu(p)))) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "same coregroup"); + trace_ems_ontime_check_migrate(p, dst_cpu, true, "same coregroup"); return true; } if (capacity_orig_of(dst_cpu) > capacity_orig_of(ontime_task_cpu(p))) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "bigger cpu"); + trace_ems_ontime_check_migrate(p, dst_cpu, true, "bigger cpu"); return true; } @@ -586,21 +586,21 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu) delta = cpu_rq(0)->clock_task - ontime_migration_time(p); delta = delta >> 10; if (delta <= get_min_residency(ontime_task_cpu(p))) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "min residency"); + trace_ems_ontime_check_migrate(p, dst_cpu, false, "min residency"); return false; } if (cpu_rq(task_cpu(p))->nr_running > 1) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "big is busy"); + trace_ems_ontime_check_migrate(p, dst_cpu, true, "big is busy"); goto release; } if (ontime_load_avg(p) >= get_down_threshold(ontime_task_cpu(p))) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "heavy task"); + trace_ems_ontime_check_migrate(p, dst_cpu, false, "heavy task"); return false; } - trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "ontime_release"); + trace_ems_ontime_check_migrate(p, dst_cpu, true, "ontime_release"); release: exclude_ontime_task(p); @@ -665,7 +665,7 @@ void ontime_new_entity_load(struct task_struct *parent, struct sched_entity *se) ontime->avg.period_contrib = 1023; ontime->flags = NOT_ONTIME; - trace_ehmp_ontime_new_entity_load(task_of(se), &ontime->avg); + trace_ems_ontime_new_entity_load(task_of(se), &ontime->avg); } /****************************************************************/