__entry->migrate, __entry->label)
);
+TRACE_EVENT(ehmp_ontime_task_wakeup,
+
+ TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label),
+
+ TP_ARGS(tsk, src_cpu, dst_cpu, label),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, src_cpu )
+ __field( int, dst_cpu )
+ __array( char, label, 64 )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->src_cpu = src_cpu;
+ __entry->dst_cpu = dst_cpu;
+ strncpy(__entry->label, label, 64);
+ ),
+
+ TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d reason=%s",
+ __entry->comm, __entry->pid, __entry->src_cpu,
+ __entry->dst_cpu, __entry->label)
+);
+
TRACE_EVENT(ehmp_lbt_overutilized,
TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized),
target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
- if (cpu_selected(target_cpu))
+ if (cpu_selected(target_cpu)) {
+ trace_ehmp_ontime_task_wakeup(p, task_cpu(p),
+ target_cpu, "up ontime");
goto ontime_up;
+ }
}
/*
delta = delta >> 10;
if (delta > get_min_residency(ontime_task_cpu(p)) &&
- ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p)))
+ ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) {
+ trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1,
+ "release ontime");
goto ontime_out;
+ }
/*
* If there is a possible cpu to stay ontime, task will wake up at this cpu.
cpumask_copy(&target_mask, cpu_coregroup_mask(ontime_task_cpu(p)));
target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
- if (cpu_selected(target_cpu))
+ if (cpu_selected(target_cpu)) {
+ trace_ehmp_ontime_task_wakeup(p, task_cpu(p),
+ target_cpu, "stay ontime");
goto ontime_stay;
+ }
+ trace_ehmp_ontime_task_wakeup(p, task_cpu(p), -1, "banished");
goto ontime_out;
}