/*
* Tracepoint for prefer idle
*/
+TRACE_EVENT(ems_prefer_idle,
+
+ TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu,
+ unsigned long capacity_orig, unsigned long task_util,
+ unsigned long new_util, int idle),
+
+ TP_ARGS(p, orig_cpu, target_cpu, capacity_orig, task_util, new_util, idle),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, orig_cpu )
+ __field( int, target_cpu )
+ __field( unsigned long, capacity_orig )
+ __field( unsigned long, task_util )
+ __field( unsigned long, new_util )
+ __field( int, idle )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->orig_cpu = orig_cpu;
+ __entry->target_cpu = target_cpu;
+ __entry->capacity_orig = capacity_orig;
+ __entry->task_util = task_util;
+ __entry->new_util = new_util;
+ __entry->idle = idle;
+ ),
+
+ TP_printk("comm=%s pid=%d orig_cpu=%d target_cpu=%d cap_org=%lu task_util=%lu new_util=%lu idle=%d",
+ __entry->comm, __entry->pid, __entry->orig_cpu, __entry->target_cpu,
+ __entry->capacity_orig, __entry->task_util, __entry->new_util, __entry->idle)
+);
+
+TRACE_EVENT(ems_prefer_idle_cpu_select,
+
+ TP_PROTO(struct task_struct *p, int cpu, char *state),
+
+ TP_ARGS(p, cpu, state),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __array( char, state, 30 )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->cpu = cpu;
+ memcpy(__entry->state, state, 30);
+ ),
+
+ TP_printk("comm=%s pid=%d target_cpu=%d state=%s",
+ __entry->comm, __entry->pid, __entry->cpu, __entry->state)
+);
+
TRACE_EVENT(ehmp_prefer_idle,
TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu,
*/
#include <linux/sched.h>
+#include <trace/events/ems.h>
+
+#include "ems.h"
+#include "../sched.h"
+
+static inline unsigned long task_util(struct task_struct *p)
+{
+ return p->se.avg.util_avg;
+}
+
+static inline int check_migration_task(struct task_struct *p)
+{
+ return !p->se.avg.last_update_time;
+}
+
+static inline unsigned long cpu_util_wake(int cpu, struct task_struct *p)
+{
+ unsigned long util, capacity;
+
+ /* Task has no contribution or is new */
+ if (cpu != task_cpu(p) || check_migration_task(p))
+ return cpu_util(cpu);
+
+ capacity = capacity_orig_of(cpu);
+ util = max_t(long, cpu_util(cpu) - task_util(p), 0);
+
+ return (util >= capacity) ? capacity : util;
+}
/**********************************************************************
* Prefer Perf *
/**********************************************************************
* Prefer Idle *
**********************************************************************/
+static bool mark_lowest_idle_util_cpu(int cpu, unsigned long new_util,
+ int *lowest_idle_util_cpu, unsigned long *lowest_idle_util)
+{
+ if (!idle_cpu(cpu))
+ return false;
+
+ if (new_util >= *lowest_idle_util)
+ return false;
+
+ *lowest_idle_util = new_util;
+ *lowest_idle_util_cpu = cpu;
+
+ return true;
+}
+
+static bool mark_lowest_util_cpu(int cpu, unsigned long new_util,
+ int *lowest_util_cpu, unsigned long *lowest_util,
+ unsigned long *target_capacity)
+{
+ if (capacity_orig_of(cpu) > *target_capacity)
+ return false;
+
+ if (new_util >= *lowest_util)
+ return false;
+
+ *lowest_util = new_util;
+ *lowest_util_cpu = cpu;
+ *target_capacity = capacity_orig_of(cpu);
+
+ return true;
+}
+
+static int select_idle_cpu(struct task_struct *p)
+{
+ unsigned long lowest_idle_util = ULONG_MAX;
+ unsigned long lowest_util = ULONG_MAX;
+ unsigned long target_capacity = ULONG_MAX;
+ int lowest_idle_util_cpu = -1;
+ int lowest_util_cpu = -1;
+ int target_cpu = -1;
+ int cpu;
+ int i;
+ char state[30] = "prev_cpu";
+
+ for_each_possible_cpu(cpu) {
+ if (cpu != cpumask_first(cpu_coregroup_mask(cpu)))
+ continue;
+
+ for_each_cpu_and(i, tsk_cpus_allowed(p), cpu_coregroup_mask(cpu)) {
+ unsigned long capacity_orig = capacity_orig_of(i);
+ unsigned long new_util, wake_util;
+
+ if (!cpu_active(i))
+ continue;
+
+ wake_util = cpu_util_wake(i, p);
+ new_util = wake_util + task_util(p);
+
+ trace_ems_prefer_idle(p, task_cpu(p), i, capacity_orig, task_util(p),
+ new_util, idle_cpu(i));
+
+ if (new_util > capacity_orig)
+ continue;
+
+ /* Priority #1 : idle cpu with lowest util */
+ if (mark_lowest_idle_util_cpu(i, new_util,
+ &lowest_idle_util_cpu, &lowest_idle_util))
+ continue;
+
+ /* Priority #2 : active cpu with lowest util */
+ mark_lowest_util_cpu(i, new_util,
+ &lowest_util_cpu, &lowest_util, &target_capacity);
+ }
+
+ if (cpu_selected(lowest_idle_util_cpu)) {
+ strcpy(state, "lowest_idle_util");
+ target_cpu = lowest_idle_util_cpu;
+ break;
+ }
+
+ if (cpu_selected(lowest_util_cpu)) {
+ strcpy(state, "lowest_util");
+ target_cpu = lowest_util_cpu;
+ break;
+ }
+ }
+
+ target_cpu = !cpu_selected(target_cpu) ? task_cpu(p) : target_cpu;
+
+
+ trace_ems_prefer_idle_cpu_select(p, target_cpu, state);
+
+ return target_cpu;
+}
+
int prefer_idle_cpu(struct task_struct *p)
{
- return -1;
+ if (schedtune_prefer_idle(p) <= 0)
+ return -1;
+
+ return select_idle_cpu(p);
}
/**********************************************************************