#include <linux/sched.h>
#include <linux/tracepoint.h>
+/*
+ * Tracepoint for wakeup balance
+ */
+TRACE_EVENT(ems_wakeup_balance,
+
+ TP_PROTO(struct task_struct *p, int target_cpu, char *state),
+
+ TP_ARGS(p, target_cpu, state),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, target_cpu )
+ __array( char, state, 30 )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->target_cpu = target_cpu;
+ memcpy(__entry->state, state, 30);
+ ),
+
+ TP_printk("comm=%s pid=%d target_cpu=%d state=%s",
+ __entry->comm, __entry->pid, __entry->target_cpu, __entry->state)
+);
+
/*
* Tracepoint for selection of boost cpu
*/
--- /dev/null
+/*
+ * Core Exynos Mobile Scheduler
+ *
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd
+ * Park Bumgyu <bumgyu.park@samsung.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ems.h>
+
+#include "ems.h"
+#include "../sched.h"
+
+static int select_energy_cpu(struct task_struct *p)
+{
+ return -1;
+}
+
+static int select_proper_cpu(struct task_struct *p)
+{
+ return -1;
+}
+
+#define cpu_selected(cpu) (cpu >= 0)
+
+extern void sync_entity_load_avg(struct sched_entity *se);
+
+int exynos_wakeup_balance(struct task_struct *p, int sd_flag, int sync)
+{
+ int target_cpu = -1;
+ char state[30] = "fail";
+
+ /*
+ * Since the utilization of a task is accumulated before sleep, it updates
+ * the utilization to determine which cpu the task will be assigned to.
+ * Exclude new task.
+ */
+ if (!(sd_flag & SD_BALANCE_FORK))
+ sync_entity_load_avg(&p->se);
+
+ /*
+ * Priority 1 : ontime task
+ *
+ * If task which has more utilization than threshold wakes up, the task is
+ * classified as "ontime task" and assigned to performance cpu. Conversely,
+ * if heavy task that has been classified as ontime task sleeps for a long
+ * time and utilization becomes small, it is excluded from ontime task and
+ * is no longer guaranteed to operate on performance cpu.
+ *
+ * Ontime task is very sensitive to performance because it is usually the
+ * main task of application. Therefore, it has the highest priority.
+ */
+ target_cpu = ontime_task_wakeup(p);
+ if (cpu_selected(target_cpu)) {
+ strcpy(state, "ontime migration");
+ goto out;
+ }
+
+ /*
+ * Priority 2 : prefer-perf
+ *
+ * Prefer-perf is a function that operates on cgroup basis managed by
+ * schedtune. When perfer-perf is set to 1, the tasks in the group are
+ * preferentially assigned to the performance cpu.
+ *
+ * It has a high priority because it is a function that is turned on
+ * temporarily in scenario requiring reactivity(touch, app laucning).
+ */
+ target_cpu = prefer_perf_cpu(p);
+ if (cpu_selected(target_cpu)) {
+ strcpy(state, "prefer-perf");
+ goto out;
+ }
+
+ /*
+ * Priority 3 : global boosting
+ *
+ * Global boost is a function that preferentially assigns all tasks in the
+ * system to the performance cpu. Unlike prefer-perf, which targets only
+ * group tasks, global boost targets all tasks. So, it maximizes performance
+ * cpu utilization.
+ *
+ * Typically, prefer-perf operates on groups that contains UX related tasks,
+ * such as "top-app" or "foreground", so that major tasks are likely to be
+ * assigned to performance cpu. On the other hand, global boost assigns
+ * all tasks to performance cpu, which is not as effective as perfer-perf.
+ * For this reason, global boost has a lower priority than prefer-perf.
+ */
+ target_cpu = global_boosting(p);
+ if (cpu_selected(target_cpu)) {
+ strcpy(state, "global boosting");
+ goto out;
+ }
+
+ /*
+ * Priority 4 : group balancing
+ */
+ target_cpu = group_balancing(p);
+ if (cpu_selected(target_cpu)) {
+ strcpy(state, "group balancing");
+ goto out;
+ }
+
+ /*
+ * Priority 5 : prefer-idle
+ *
+ * Prefer-idle is a function that operates on cgroup basis managed by
+ * schedtune. When perfer-idle is set to 1, the tasks in the group are
+ * preferentially assigned to the idle cpu.
+ *
+ * Prefer-idle has a smaller performance impact than the above. Therefore
+ * it has a relatively low priority.
+ */
+ target_cpu = prefer_idle_cpu(p);
+ if (cpu_selected(target_cpu)) {
+ strcpy(state, "prefer-idle");
+ goto out;
+ }
+
+ /*
+ * Priority 6 : energy cpu
+ *
+ * A scheduling scheme based on cpu energy, find the least power consumption
+ * cpu referring energy table when assigning task.
+ */
+ target_cpu = select_energy_cpu(p);
+ if (cpu_selected(target_cpu)) {
+ strcpy(state, "energy cpu");
+ goto out;
+ }
+
+ /*
+ * Priority 7 : proper cpu
+ */
+ target_cpu = select_proper_cpu(p);
+ if (cpu_selected(target_cpu))
+ strcpy(state, "proper cpu");
+
+out:
+ trace_ems_wakeup_balance(p, target_cpu, state);
+ return target_cpu;
+}
+
+struct kobject *ems_kobj;
+
+static int __init init_sysfs(void)
+{
+ ems_kobj = kobject_create_and_add("ems", kernel_kobj);
+
+ return 0;
+}
+core_initcall(init_sysfs);
#include "sched.h"
#include "tune.h"
#include "walt.h"
+#include "ems/ems.h"
/*
* Targeted preemption latency for CPU-bound tasks:
eenv->max_cpu_count = EAS_CPU_BKP + 1;
/* Find a cpu with sufficient capacity */
- if (sched_feat(EXYNOS_MS)) {
- eenv->cpu[EAS_CPU_NXT].cpu_id = exynos_select_cpu(p,
- &eenv->cpu[EAS_CPU_BKP].cpu_id,
- boosted, prefer_idle);
- if (ontime_of(p)->flags == ONTIME)
- return eenv->cpu[EAS_CPU_NXT].cpu_id;
- }
- else
- eenv->cpu[EAS_CPU_NXT].cpu_id = find_best_target(p,
- &eenv->cpu[EAS_CPU_BKP].cpu_id,
- boosted, prefer_idle);
+ eenv->cpu[EAS_CPU_NXT].cpu_id = find_best_target(p,
+ &eenv->cpu[EAS_CPU_BKP].cpu_id,
+ boosted, prefer_idle);
/* take note if no backup was found */
if (eenv->cpu[EAS_CPU_BKP].cpu_id < 0)
int want_affine = 0;
int want_energy = 0;
int sync = wake_flags & WF_SYNC;
+ int target_cpu;
+
+ if (sched_feat(EXYNOS_MS)) {
+ target_cpu = exynos_wakeup_balance(p, sd_flag, sync);
+ if (target_cpu >= 0)
+ return target_cpu;
+ }
rcu_read_lock();