__entry->prev_energy, __entry->best_energy, __entry->backup_energy)
);
+/*
+ * Tracepoint for proper cpu selection
+ */
+TRACE_EVENT(ems_select_proper_cpu,
+
+ TP_PROTO(struct task_struct *p, int best_cpu, unsigned long min_util),
+
+ TP_ARGS(p, best_cpu, min_util),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, best_cpu )
+ __field( unsigned long, min_util )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->best_cpu = best_cpu;
+ __entry->min_util = min_util;
+ ),
+
+ TP_printk("comm=%s pid=%d best_cpu=%d min_util=%lu",
+ __entry->comm, __entry->pid, __entry->best_cpu, __entry->min_util)
+);
+
/*
* Tracepoint for wakeup balance
*/
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2);
}
-static int select_proper_cpu(struct task_struct *p)
+static int select_proper_cpu(struct task_struct *p, int prev_cpu)
{
- return -1;
+ int cpu;
+ unsigned long best_min_util = ULONG_MAX;
+ int best_cpu = -1;
+
+ for_each_possible_cpu(cpu) {
+ int i;
+
+ /* visit each coregroup only once */
+ if (cpu != cpumask_first(cpu_coregroup_mask(cpu)))
+ continue;
+
+ /* skip if task cannot be assigned to coregroup */
+ if (!cpumask_intersects(&p->cpus_allowed, cpu_coregroup_mask(cpu)))
+ continue;
+
+ for_each_cpu_and(i, tsk_cpus_allowed(p), cpu_coregroup_mask(cpu)) {
+ unsigned long capacity_orig = capacity_orig_of(i);
+ unsigned long wake_util, new_util;
+
+ wake_util = cpu_util_wake(i, p);
+ new_util = wake_util + task_util(p);
+
+ /* skip over-capacity cpu */
+ if (new_util > capacity_orig)
+ continue;
+
+ /*
+ * According to the criteria determined by the LBT(Load
+ * Balance trigger), the cpu that becomes overutilized
+ * when the task is assigned is skipped.
+ */
+ if (lbt_bring_overutilize(i, p))
+ continue;
+
+ /*
+ * Best target) lowest utilization among lowest-cap cpu
+ *
+ * If the sequence reaches this function, the wakeup task
+ * does not require performance and the prev cpu is over-
+ * utilized, so it should do load balancing without
+ * considering energy side. Therefore, it selects cpu
+ * with smallest cpapacity and the least utilization among
+ * cpu that fits the task.
+ */
+ if (best_min_util < new_util)
+ continue;
+
+ best_min_util = new_util;
+ best_cpu = i;
+ }
+
+ /*
+ * if it fails to find the best cpu in this coregroup, visit next
+ * coregroup.
+ */
+ if (cpu_selected(best_cpu))
+ break;
+ }
+
+ trace_ems_select_proper_cpu(p, best_cpu, best_min_util);
+
+ /*
+ * if it fails to find the vest cpu, choosing any cpu is meaningless.
+ * Return prev cpu.
+ */
+ return cpu_selected(best_cpu) ? best_cpu : prev_cpu;
}
extern void sync_entity_load_avg(struct sched_entity *se);
/*
* Priority 7 : proper cpu
*/
- target_cpu = select_proper_cpu(p);
+ target_cpu = select_proper_cpu(p, prev_cpu);
if (cpu_selected(target_cpu))
strcpy(state, "proper cpu");