[RAMEN9610-9421][COMMON] sched: ems: support prefer perf service
authorPark Bumgyu <bumgyu.park@samsung.com>
Mon, 20 Aug 2018 01:29:10 +0000 (10:29 +0900)
committerhskang <hs1218.kang@samsung.com>
Thu, 13 Dec 2018 11:41:53 +0000 (20:41 +0900)
Change-Id: Ida3e81c598a22e984839533e62604ffd20c94dc3
Signed-off-by: Park Bumgyu <bumgyu.park@samsung.com>
include/trace/events/ems.h
kernel/sched/ems/core.c
kernel/sched/ems/ems.h
kernel/sched/ems/service.c

index db0723fb607aa0a0e86ac0ae0031379b84422d0b..0f425ff13cc72ddefbe121c643943d26047e5c41 100644 (file)
@@ -441,6 +441,32 @@ TRACE_EVENT(ems_manage_band,
        TP_printk("comm=%s pid=%d band_id=%d event=%s",
                        __entry->comm, __entry->pid, __entry->band_id, __entry->event)
 );
+
+TRACE_EVENT(ems_prefer_perf_service,
+
+       TP_PROTO(struct task_struct *p, unsigned long util, int service_cpu, char *event),
+
+       TP_ARGS(p, util, service_cpu, event),
+
+       TP_STRUCT__entry(
+               __array( char,          comm,           TASK_COMM_LEN   )
+               __field( pid_t,         pid                             )
+               __field( unsigned long, util                            )
+               __field( int,           service_cpu                     )
+               __array( char,          event,          64              )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid                    = p->pid;
+               __entry->util                   = util;
+               __entry->service_cpu            = service_cpu;
+               strncpy(__entry->event, event, 63);
+       ),
+
+       TP_printk("comm=%s pid=%d util=%lu service_cpu=%d event=%s",
+                       __entry->comm, __entry->pid, __entry->util, __entry->service_cpu, __entry->event)
+);
 #endif /* _TRACE_EMS_H */
 
 /* This part must be outside protection */
index 5e48067f9e64bed68dddf09ee5aff9764efa700f..d066fc0f431fa50ac63912aedc4632e5453a272e 100644 (file)
@@ -200,6 +200,12 @@ int exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int
                update_band(p, old_util);
        }
 
+       target_cpu = select_service_cpu(p);
+       if (cpu_selected(target_cpu)) {
+               strcpy(state, "service");
+               goto out;
+       }
+
        /*
         * Priority 1 : ontime task
         *
index ffaf5cdac69c9f403c14ff8997a3f7155d5c5b31..1a7c86fe021a0ed9ecd69ab6574dfdbcc733e920 100644 (file)
@@ -18,6 +18,7 @@
 
 extern struct kobject *ems_kobj;
 
+extern int select_service_cpu(struct task_struct *p);
 extern int ontime_task_wakeup(struct task_struct *p, int sync);
 extern int select_perf_cpu(struct task_struct *p);
 extern int global_boosting(struct task_struct *p);
index 50d77db1f24075a1b4c1285b8953d3af62188a37..0bec0de9a967402e96623ed042388129d47a1517 100644 (file)
@@ -6,7 +6,14 @@
  */
 
 #include <linux/kobject.h>
-#include <linux/ems.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/ems_service.h>
+#include <trace/events/ems.h>
+
+#include "../sched.h"
+#include "../tune.h"
+#include "ems.h"
 
 /**********************************************************************
  *                        Kernel Prefer Perf                          *
@@ -28,6 +35,115 @@ void request_kernel_prefer_perf(int grp_idx, int value)
        atomic_set(&kernel_prefer_perf_req[grp_idx], value);
 }
 
+struct prefer_perf {
+       int                     boost;
+       unsigned int            threshold;
+       unsigned int            coregroup_count;
+       struct cpumask          *prefer_cpus;
+};
+
+static struct prefer_perf *prefer_perf_services;
+static int prefer_perf_service_count;
+
+static struct prefer_perf *find_prefer_perf(int boost)
+{
+       int i;
+
+       for (i = 0; i < prefer_perf_service_count; i++)
+               if (prefer_perf_services[i].boost == boost)
+                       return &prefer_perf_services[i];
+
+       return NULL;
+}
+
+static int
+select_prefer_cpu(struct task_struct *p, int coregroup_count, struct cpumask *prefer_cpus)
+{
+       struct cpumask mask;
+       int coregroup, cpu;
+       unsigned long max_spare_cap = 0;
+       int best_perf_cstate = INT_MAX;
+       int best_perf_cpu = -1;
+       int backup_cpu = -1;
+
+       rcu_read_lock();
+
+       for (coregroup = 0; coregroup < coregroup_count; coregroup++) {
+               cpumask_and(&mask, &prefer_cpus[coregroup], cpu_active_mask);
+               if (cpumask_empty(&mask))
+                       continue;
+
+               for_each_cpu_and(cpu, &p->cpus_allowed, &mask) {
+                       unsigned long capacity_orig;
+                       unsigned long wake_util;
+
+                       if (idle_cpu(cpu)) {
+                               int idle_idx = idle_get_state_idx(cpu_rq(cpu));
+
+                               /* find shallowest idle state cpu */
+                               if (idle_idx >= best_perf_cstate)
+                                       continue;
+
+                               /* Keep track of best idle CPU */
+                               best_perf_cstate = idle_idx;
+                               best_perf_cpu = cpu;
+                               continue;
+                       }
+
+                       capacity_orig = capacity_orig_of(cpu);
+                       wake_util = cpu_util_wake(cpu, p);
+                       if ((capacity_orig - wake_util) < max_spare_cap)
+                               continue;
+
+                       max_spare_cap = capacity_orig - wake_util;
+                       backup_cpu = cpu;
+               }
+
+               if (cpu_selected(best_perf_cpu))
+                       break;
+       }
+
+       rcu_read_unlock();
+
+       if (best_perf_cpu == -1)
+               return backup_cpu;
+
+       return best_perf_cpu;
+}
+
+int select_service_cpu(struct task_struct *p)
+{
+       struct prefer_perf *pp;
+       int boost, service_cpu;
+       unsigned long util;
+       char state[30];
+
+       if (!prefer_perf_services)
+               return -1;
+
+       boost = schedtune_prefer_perf(p);
+       if (boost <= 0)
+               return -1;
+
+       pp = find_prefer_perf(boost);
+       if (!pp)
+               return -1;
+
+       util = task_util_est(p);
+       if (util <= pp->threshold) {
+               service_cpu = select_prefer_cpu(p, 1, pp->prefer_cpus);
+               strcpy(state, "light task");
+               goto out;
+       }
+
+       service_cpu = select_prefer_cpu(p, pp->coregroup_count, pp->prefer_cpus);
+       strcpy(state, "heavy task");
+
+out:
+       trace_ems_prefer_perf_service(p, util, service_cpu, state);
+       return service_cpu;
+}
+
 static ssize_t show_prefer_perf(struct kobject *kobj,
                struct kobj_attribute *attr, char *buf)
 {
@@ -45,10 +161,57 @@ static ssize_t show_prefer_perf(struct kobject *kobj,
 static struct kobj_attribute prefer_perf_attr =
 __ATTR(kernel_prefer_perf, 0444, show_prefer_perf, NULL);
 
+static void __init build_prefer_cpus(void)
+{
+       struct device_node *dn, *child;
+       int index = 0;
+
+       dn = of_find_node_by_name(NULL, "ems");
+       dn = of_find_node_by_name(dn, "prefer-perf-service");
+       prefer_perf_service_count = of_get_child_count(dn);
+
+       prefer_perf_services = kcalloc(prefer_perf_service_count,
+                               sizeof(struct prefer_perf), GFP_KERNEL);
+       if (!prefer_perf_services)
+               return;
+
+       for_each_child_of_node(dn, child) {
+               const char *mask[NR_CPUS];
+               int i, proplen;
+
+               if (index >= prefer_perf_service_count)
+                       return;
+
+               of_property_read_u32(child, "boost",
+                                       &prefer_perf_services[index].boost);
+
+               of_property_read_u32(child, "light-task-threshold",
+                                       &prefer_perf_services[index].threshold);
+
+               proplen = of_property_count_strings(child, "prefer-cpus");
+               if (proplen < 0)
+                       goto next;
+
+               prefer_perf_services[index].coregroup_count = proplen;
+
+               of_property_read_string_array(child, "prefer-cpus", mask, proplen);
+               prefer_perf_services[index].prefer_cpus = kcalloc(proplen,
+                                               sizeof(struct cpumask), GFP_KERNEL);
+
+               for (i = 0; i < proplen; i++)
+                       cpulist_parse(mask[i], &prefer_perf_services[index].prefer_cpus[i]);
+
+next:
+               index++;
+       }
+}
+
 static int __init init_service(void)
 {
        int ret;
 
+       build_prefer_cpus();
+
        ret = sysfs_create_file(ems_kobj, &prefer_perf_attr.attr);
        if (ret)
                pr_err("%s: faile to create sysfs file\n", __func__);