From 5590a04e9581fe22e7cc1c91b52b4830bf01a995 Mon Sep 17 00:00:00 2001 From: Park Bumgyu Date: Mon, 20 Aug 2018 10:29:10 +0900 Subject: [PATCH] [RAMEN9610-9421][COMMON] sched: ems: support prefer perf service Change-Id: Ida3e81c598a22e984839533e62604ffd20c94dc3 Signed-off-by: Park Bumgyu --- include/trace/events/ems.h | 26 ++++++ kernel/sched/ems/core.c | 6 ++ kernel/sched/ems/ems.h | 1 + kernel/sched/ems/service.c | 165 ++++++++++++++++++++++++++++++++++++- 4 files changed, 197 insertions(+), 1 deletion(-) diff --git a/include/trace/events/ems.h b/include/trace/events/ems.h index db0723fb607a..0f425ff13cc7 100644 --- a/include/trace/events/ems.h +++ b/include/trace/events/ems.h @@ -441,6 +441,32 @@ TRACE_EVENT(ems_manage_band, TP_printk("comm=%s pid=%d band_id=%d event=%s", __entry->comm, __entry->pid, __entry->band_id, __entry->event) ); + +TRACE_EVENT(ems_prefer_perf_service, + + TP_PROTO(struct task_struct *p, unsigned long util, int service_cpu, char *event), + + TP_ARGS(p, util, service_cpu, event), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( unsigned long, util ) + __field( int, service_cpu ) + __array( char, event, 64 ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->util = util; + __entry->service_cpu = service_cpu; + strncpy(__entry->event, event, 63); + ), + + TP_printk("comm=%s pid=%d util=%lu service_cpu=%d event=%s", + __entry->comm, __entry->pid, __entry->util, __entry->service_cpu, __entry->event) +); #endif /* _TRACE_EMS_H */ /* This part must be outside protection */ diff --git a/kernel/sched/ems/core.c b/kernel/sched/ems/core.c index 5e48067f9e64..d066fc0f431f 100644 --- a/kernel/sched/ems/core.c +++ b/kernel/sched/ems/core.c @@ -200,6 +200,12 @@ int exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int update_band(p, old_util); } + target_cpu = select_service_cpu(p); + if (cpu_selected(target_cpu)) { + strcpy(state, "service"); + goto out; + } + /* * Priority 1 : ontime task * diff --git a/kernel/sched/ems/ems.h b/kernel/sched/ems/ems.h index ffaf5cdac69c..1a7c86fe021a 100644 --- a/kernel/sched/ems/ems.h +++ b/kernel/sched/ems/ems.h @@ -18,6 +18,7 @@ extern struct kobject *ems_kobj; +extern int select_service_cpu(struct task_struct *p); extern int ontime_task_wakeup(struct task_struct *p, int sync); extern int select_perf_cpu(struct task_struct *p); extern int global_boosting(struct task_struct *p); diff --git a/kernel/sched/ems/service.c b/kernel/sched/ems/service.c index 50d77db1f240..0bec0de9a967 100644 --- a/kernel/sched/ems/service.c +++ b/kernel/sched/ems/service.c @@ -6,7 +6,14 @@ */ #include -#include +#include +#include +#include +#include + +#include "../sched.h" +#include "../tune.h" +#include "ems.h" /********************************************************************** * Kernel Prefer Perf * @@ -28,6 +35,115 @@ void request_kernel_prefer_perf(int grp_idx, int value) atomic_set(&kernel_prefer_perf_req[grp_idx], value); } +struct prefer_perf { + int boost; + unsigned int threshold; + unsigned int coregroup_count; + struct cpumask *prefer_cpus; +}; + +static struct prefer_perf *prefer_perf_services; +static int prefer_perf_service_count; + +static struct prefer_perf *find_prefer_perf(int boost) +{ + int i; + + for (i = 0; i < prefer_perf_service_count; i++) + if (prefer_perf_services[i].boost == boost) + return &prefer_perf_services[i]; + + return NULL; +} + +static int +select_prefer_cpu(struct task_struct *p, int coregroup_count, struct cpumask *prefer_cpus) +{ + struct cpumask mask; + int coregroup, cpu; + unsigned long max_spare_cap = 0; + int best_perf_cstate = INT_MAX; + int best_perf_cpu = -1; + int backup_cpu = -1; + + rcu_read_lock(); + + for (coregroup = 0; coregroup < coregroup_count; coregroup++) { + cpumask_and(&mask, &prefer_cpus[coregroup], cpu_active_mask); + if (cpumask_empty(&mask)) + continue; + + for_each_cpu_and(cpu, &p->cpus_allowed, &mask) { + unsigned long capacity_orig; + unsigned long wake_util; + + if (idle_cpu(cpu)) { + int idle_idx = idle_get_state_idx(cpu_rq(cpu)); + + /* find shallowest idle state cpu */ + if (idle_idx >= best_perf_cstate) + continue; + + /* Keep track of best idle CPU */ + best_perf_cstate = idle_idx; + best_perf_cpu = cpu; + continue; + } + + capacity_orig = capacity_orig_of(cpu); + wake_util = cpu_util_wake(cpu, p); + if ((capacity_orig - wake_util) < max_spare_cap) + continue; + + max_spare_cap = capacity_orig - wake_util; + backup_cpu = cpu; + } + + if (cpu_selected(best_perf_cpu)) + break; + } + + rcu_read_unlock(); + + if (best_perf_cpu == -1) + return backup_cpu; + + return best_perf_cpu; +} + +int select_service_cpu(struct task_struct *p) +{ + struct prefer_perf *pp; + int boost, service_cpu; + unsigned long util; + char state[30]; + + if (!prefer_perf_services) + return -1; + + boost = schedtune_prefer_perf(p); + if (boost <= 0) + return -1; + + pp = find_prefer_perf(boost); + if (!pp) + return -1; + + util = task_util_est(p); + if (util <= pp->threshold) { + service_cpu = select_prefer_cpu(p, 1, pp->prefer_cpus); + strcpy(state, "light task"); + goto out; + } + + service_cpu = select_prefer_cpu(p, pp->coregroup_count, pp->prefer_cpus); + strcpy(state, "heavy task"); + +out: + trace_ems_prefer_perf_service(p, util, service_cpu, state); + return service_cpu; +} + static ssize_t show_prefer_perf(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -45,10 +161,57 @@ static ssize_t show_prefer_perf(struct kobject *kobj, static struct kobj_attribute prefer_perf_attr = __ATTR(kernel_prefer_perf, 0444, show_prefer_perf, NULL); +static void __init build_prefer_cpus(void) +{ + struct device_node *dn, *child; + int index = 0; + + dn = of_find_node_by_name(NULL, "ems"); + dn = of_find_node_by_name(dn, "prefer-perf-service"); + prefer_perf_service_count = of_get_child_count(dn); + + prefer_perf_services = kcalloc(prefer_perf_service_count, + sizeof(struct prefer_perf), GFP_KERNEL); + if (!prefer_perf_services) + return; + + for_each_child_of_node(dn, child) { + const char *mask[NR_CPUS]; + int i, proplen; + + if (index >= prefer_perf_service_count) + return; + + of_property_read_u32(child, "boost", + &prefer_perf_services[index].boost); + + of_property_read_u32(child, "light-task-threshold", + &prefer_perf_services[index].threshold); + + proplen = of_property_count_strings(child, "prefer-cpus"); + if (proplen < 0) + goto next; + + prefer_perf_services[index].coregroup_count = proplen; + + of_property_read_string_array(child, "prefer-cpus", mask, proplen); + prefer_perf_services[index].prefer_cpus = kcalloc(proplen, + sizeof(struct cpumask), GFP_KERNEL); + + for (i = 0; i < proplen; i++) + cpulist_parse(mask[i], &prefer_perf_services[index].prefer_cpus[i]); + +next: + index++; + } +} + static int __init init_service(void) { int ret; + build_prefer_cpus(); + ret = sysfs_create_file(ems_kobj, &prefer_perf_attr.attr); if (ret) pr_err("%s: faile to create sysfs file\n", __func__); -- 2.20.1