/*
* Tracepoint for ontime migration
*/
-TRACE_EVENT(ehmp_ontime_migration,
+TRACE_EVENT(ems_ontime_migration,
TP_PROTO(struct task_struct *p, unsigned long load,
int src_cpu, int dst_cpu, int boost_migration),
/*
* Tracepoint for accounting ontime load averages for tasks.
*/
-TRACE_EVENT(ehmp_ontime_new_entity_load,
+TRACE_EVENT(ems_ontime_new_entity_load,
TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg),
/*
* Tracepoint for accounting ontime load averages for tasks.
*/
-TRACE_EVENT(ehmp_ontime_load_avg_task,
+TRACE_EVENT(ems_ontime_load_avg_task,
TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag),
(u64)__entry->load_sum, __entry->ontime_flag)
);
-TRACE_EVENT(ehmp_ontime_check_migrate,
+TRACE_EVENT(ems_ontime_check_migrate,
TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label),
__entry->migrate, __entry->label)
);
-TRACE_EVENT(ehmp_ontime_task_wakeup,
+TRACE_EVENT(ems_ontime_task_wakeup,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label),
__entry->dst_cpu, __entry->label)
);
-TRACE_EVENT(ehmp_lbt_overutilized,
+TRACE_EVENT(ems_lbt_overutilized,
TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized),
rcu_read_unlock();
double_unlock_balance(src_rq, dst_rq);
- trace_ehmp_ontime_migration(p, ontime_of(p)->avg.load_avg,
+ trace_ems_ontime_migration(p, ontime_of(p)->avg.load_avg,
src_cpu, dst_cpu, boost_migration);
goto success_unlock;
}
/****************************************************************/
void ontime_trace_task_info(struct task_struct *p)
{
- trace_ehmp_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_flag(p));
+ trace_ems_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_flag(p));
}
DEFINE_PER_CPU(struct cpu_stop_work, ontime_migration_work);
target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
if (cpu_selected(target_cpu)) {
- trace_ehmp_ontime_task_wakeup(p, task_cpu(p),
+ trace_ems_ontime_task_wakeup(p, task_cpu(p),
target_cpu, "up ontime");
goto ontime_up;
}
if (delta > get_min_residency(ontime_task_cpu(p)) &&
ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) {
- trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1,
+ trace_ems_ontime_task_wakeup(p, task_cpu(p), -1,
"release ontime");
goto ontime_out;
}
target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
if (cpu_selected(target_cpu)) {
- trace_ehmp_ontime_task_wakeup(p, task_cpu(p),
+ trace_ems_ontime_task_wakeup(p, task_cpu(p),
target_cpu, "stay ontime");
goto ontime_stay;
}
- trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1, "banished");
+ trace_ems_ontime_task_wakeup(p, task_cpu(p), -1, "banished");
goto ontime_out;
}
u64 delta;
if (ontime_flag(p) & NOT_ONTIME) {
- trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "not ontime");
+ trace_ems_ontime_check_migrate(p, dst_cpu, true, "not ontime");
return true;
}
if (ontime_flag(p) & ONTIME_MIGRATING) {
- trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "migrating");
+ trace_ems_ontime_check_migrate(p, dst_cpu, false, "migrating");
return false;
}
if (cpumask_test_cpu(dst_cpu, cpu_coregroup_mask(ontime_task_cpu(p)))) {
- trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "same coregroup");
+ trace_ems_ontime_check_migrate(p, dst_cpu, true, "same coregroup");
return true;
}
if (capacity_orig_of(dst_cpu) > capacity_orig_of(ontime_task_cpu(p))) {
- trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "bigger cpu");
+ trace_ems_ontime_check_migrate(p, dst_cpu, true, "bigger cpu");
return true;
}
delta = cpu_rq(0)->clock_task - ontime_migration_time(p);
delta = delta >> 10;
if (delta <= get_min_residency(ontime_task_cpu(p))) {
- trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "min residency");
+ trace_ems_ontime_check_migrate(p, dst_cpu, false, "min residency");
return false;
}
if (cpu_rq(task_cpu(p))->nr_running > 1) {
- trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "big is busy");
+ trace_ems_ontime_check_migrate(p, dst_cpu, true, "big is busy");
goto release;
}
if (ontime_load_avg(p) >= get_down_threshold(ontime_task_cpu(p))) {
- trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "heavy task");
+ trace_ems_ontime_check_migrate(p, dst_cpu, false, "heavy task");
return false;
}
- trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "ontime_release");
+ trace_ems_ontime_check_migrate(p, dst_cpu, true, "ontime_release");
release:
exclude_ontime_task(p);
ontime->avg.period_contrib = 1023;
ontime->flags = NOT_ONTIME;
- trace_ehmp_ontime_new_entity_load(task_of(se), &ontime->avg);
+ trace_ems_ontime_new_entity_load(task_of(se), &ontime->avg);
}
/****************************************************************/