From b057512ba5139be27e3471eb1aac1cfb1cea7450 Mon Sep 17 00:00:00 2001 From: Park Bumgyu Date: Thu, 12 Apr 2018 16:49:30 +0900 Subject: [PATCH] sche: ems: implement proper_cpu exynos_proper_cpu() is called last in exynos wakeup balance. This is called when the task does not require preformance, and the prev cpu is overutilized and does not select the energy cpu. Since balancing is necessary, the task is sent to the cpu with the smallest utilization in order to minimize power consumption. Change-Id: I5ef1a3b6d295763e7c4dc0df9f871d7ef05f8c9a Signed-off-by: Park Bumgyu --- include/trace/events/ems.h | 27 +++++++++++++++ kernel/sched/ems/core.c | 71 ++++++++++++++++++++++++++++++++++++-- kernel/sched/ems/energy.c | 2 +- 3 files changed, 96 insertions(+), 4 deletions(-) diff --git a/include/trace/events/ems.h b/include/trace/events/ems.h index 470bf6257a53..7c9535bd0427 100644 --- a/include/trace/events/ems.h +++ b/include/trace/events/ems.h @@ -57,6 +57,33 @@ TRACE_EVENT(ems_select_eco_cpu, __entry->prev_energy, __entry->best_energy, __entry->backup_energy) ); +/* + * Tracepoint for proper cpu selection + */ +TRACE_EVENT(ems_select_proper_cpu, + + TP_PROTO(struct task_struct *p, int best_cpu, unsigned long min_util), + + TP_ARGS(p, best_cpu, min_util), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, best_cpu ) + __field( unsigned long, min_util ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->best_cpu = best_cpu; + __entry->min_util = min_util; + ), + + TP_printk("comm=%s pid=%d best_cpu=%d min_util=%lu", + __entry->comm, __entry->pid, __entry->best_cpu, __entry->min_util) +); + /* * Tracepoint for wakeup balance */ diff --git a/kernel/sched/ems/core.c b/kernel/sched/ems/core.c index 0c6514b83ba4..03efea678f83 100644 --- a/kernel/sched/ems/core.c +++ b/kernel/sched/ems/core.c @@ -106,9 +106,74 @@ int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd, return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2); } -static int select_proper_cpu(struct task_struct *p) +static int select_proper_cpu(struct task_struct *p, int prev_cpu) { - return -1; + int cpu; + unsigned long best_min_util = ULONG_MAX; + int best_cpu = -1; + + for_each_possible_cpu(cpu) { + int i; + + /* visit each coregroup only once */ + if (cpu != cpumask_first(cpu_coregroup_mask(cpu))) + continue; + + /* skip if task cannot be assigned to coregroup */ + if (!cpumask_intersects(&p->cpus_allowed, cpu_coregroup_mask(cpu))) + continue; + + for_each_cpu_and(i, tsk_cpus_allowed(p), cpu_coregroup_mask(cpu)) { + unsigned long capacity_orig = capacity_orig_of(i); + unsigned long wake_util, new_util; + + wake_util = cpu_util_wake(i, p); + new_util = wake_util + task_util(p); + + /* skip over-capacity cpu */ + if (new_util > capacity_orig) + continue; + + /* + * According to the criteria determined by the LBT(Load + * Balance trigger), the cpu that becomes overutilized + * when the task is assigned is skipped. + */ + if (lbt_bring_overutilize(i, p)) + continue; + + /* + * Best target) lowest utilization among lowest-cap cpu + * + * If the sequence reaches this function, the wakeup task + * does not require performance and the prev cpu is over- + * utilized, so it should do load balancing without + * considering energy side. Therefore, it selects cpu + * with smallest cpapacity and the least utilization among + * cpu that fits the task. + */ + if (best_min_util < new_util) + continue; + + best_min_util = new_util; + best_cpu = i; + } + + /* + * if it fails to find the best cpu in this coregroup, visit next + * coregroup. + */ + if (cpu_selected(best_cpu)) + break; + } + + trace_ems_select_proper_cpu(p, best_cpu, best_min_util); + + /* + * if it fails to find the vest cpu, choosing any cpu is meaningless. + * Return prev cpu. + */ + return cpu_selected(best_cpu) ? best_cpu : prev_cpu; } extern void sync_entity_load_avg(struct sched_entity *se); @@ -220,7 +285,7 @@ int exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int /* * Priority 7 : proper cpu */ - target_cpu = select_proper_cpu(p); + target_cpu = select_proper_cpu(p, prev_cpu); if (cpu_selected(target_cpu)) strcpy(state, "proper cpu"); diff --git a/kernel/sched/ems/energy.c b/kernel/sched/ems/energy.c index 262a2dff686a..11d9182bb9bb 100644 --- a/kernel/sched/ems/energy.c +++ b/kernel/sched/ems/energy.c @@ -319,7 +319,7 @@ int select_energy_cpu(struct task_struct *p, int prev_cpu, int sd_flag, int sync */ find_eco_target(&eenv); if (eenv.best_cpu < 0 && eenv.backup_cpu < 0) - return prev_cpu; + return -1; return select_eco_cpu(&eenv); } -- 2.20.1