sched: change exynos scheduler name from EHMP to EMS
authorPark Bumgyu <bumgyu.park@samsung.com>
Thu, 22 Mar 2018 05:43:33 +0000 (14:43 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:53 +0000 (20:24 +0300)
Rename Exynos scheduler. Existing EHMP(Exynos HMP) was a scheduler
considering only HMP chipset. EMS(Exynos Mobile Scheduler) will support
all chipsets regardless of cluster and core configuration.

Change-Id: I2802ddcd9e401a0d92f9c98656b5e591d429d6ce
Signed-off-by: Park Bumgyu <bumgyu.park@samsung.com>
15 files changed:
drivers/cpufreq/Kconfig
include/linux/ehmp.h [deleted file]
include/linux/ems.h [new file with mode: 0644]
include/linux/sched.h
include/trace/events/ehmp.h [deleted file]
include/trace/events/ems.h [new file with mode: 0644]
init/Kconfig
kernel/sched/Makefile
kernel/sched/ems/Makefile
kernel/sched/ems/ehmp.c
kernel/sched/ems/ems.h [new file with mode: 0644]
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/sched.h
kernel/sched/tune.c

index 388b9c4cb673742d9b93878b41b7b81dcdd9c361..d383407a67486bab3e63e858238e00997955edda 100644 (file)
@@ -197,7 +197,6 @@ config CPU_FREQ_GOV_SCHEDUTIL
        depends on CPU_FREQ && SMP
        select CPU_FREQ_GOV_ATTR_SET
        select IRQ_WORK
-       select FREQVAR_TUNE
        help
          This governor makes decisions based on the utilization data provided
          by the scheduler.  It sets the CPU frequency to be proportional to
@@ -212,7 +211,7 @@ config CPU_FREQ_GOV_SCHEDUTIL
 
 config FREQVAR_TUNE
        bool "CPU frequency variant tuner"
-       depends on CPU_FREQ_GOV_SCHEDUTIL
+       depends on SCHED_EMS && CPU_FREQ_GOV_SCHEDUTIL
        help
          This option provides the controller which tunes system performance
          as frequency variant.
diff --git a/include/linux/ehmp.h b/include/linux/ehmp.h
deleted file mode 100644 (file)
index 5633347..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/plist.h>
-#include <linux/sched/idle.h>
-
-#ifdef CONFIG_SCHED_TUNE
-enum stune_group {
-       STUNE_ROOT,
-       STUNE_FOREGROUND,
-       STUNE_BACKGROUND,
-       STUNE_TOPAPP,
-       STUNE_GROUP_COUNT,
-};
-#endif
-
-struct gb_qos_request {
-       struct plist_node node;
-       char *name;
-       bool active;
-};
-
-#ifdef CONFIG_SCHED_EHMP
-extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
-                               int state, int cpus);
-extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
-               struct task_struct *p);
-extern void exynos_init_entity_util_avg(struct sched_entity *se);
-extern int exynos_need_active_balance(enum cpu_idle_type idle,
-               struct sched_domain *sd, int src_cpu, int dst_cpu);
-
-extern unsigned long global_boost(void);
-extern int find_second_max_cap(void);
-
-extern int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
-                               bool boosted, bool prefer_idle);
-
-extern void ontime_migration(void);
-extern int ontime_can_migration(struct task_struct *p, int cpu);
-extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
-                                               struct sched_avg *sa);
-extern void ontime_new_entity_load(struct task_struct *parent,
-                                       struct sched_entity *se);
-extern void ontime_trace_task_info(struct task_struct *p);
-extern void ehmp_update_max_cpu_capacity(int cpu, unsigned long val);
-
-extern bool lbt_overutilized(int cpu, int level);
-extern void update_lbt_overutil(int cpu, unsigned long capacity);
-
-extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
-
-extern void request_kernel_prefer_perf(int grp_idx, int enable);
-#else
-static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
-                               int state, int cpus) { return 0; }
-static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
-               struct task_struct *p) { return NULL; }
-static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
-static inline int exynos_need_active_balance(enum cpu_idle_type idle,
-               struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
-
-static inline unsigned long global_boost(void) { return 0; }
-static inline int find_second_max_cap(void) { return -EINVAL; }
-
-static inline int exynos_select_cpu(struct task_struct *p, int prev_cpu,
-                                       int sync, int sd_flag) { return -EINVAL; }
-
-static inline void ontime_migration(void) { }
-static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; }
-static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
-                                                       struct sched_avg *sa) { }
-static inline void ontime_new_entity_load(struct task_struct *p,
-                                       struct sched_entity *se) { }
-static inline void ontime_trace_task_info(struct task_struct *p) { }
-
-static inline void ehmp_update_max_cpu_capacity(int cpu, unsigned long val) { }
-
-static inline bool lbt_overutilized(int cpu, int level) { return false; }
-static inline void update_lbt_overutil(int cpu, unsigned long capacity) { }
-
-static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }
-
-//extern void request_kernel_prefer_perf(int grp_idx, int enable) { }
-#endif /* CONFIG_SCHED_EHMP */
diff --git a/include/linux/ems.h b/include/linux/ems.h
new file mode 100644 (file)
index 0000000..d019bbe
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/plist.h>
+#include <linux/sched/idle.h>
+
+#ifdef CONFIG_SCHED_TUNE
+enum stune_group {
+       STUNE_ROOT,
+       STUNE_FOREGROUND,
+       STUNE_BACKGROUND,
+       STUNE_TOPAPP,
+       STUNE_GROUP_COUNT,
+};
+#endif
+
+struct gb_qos_request {
+       struct plist_node node;
+       char *name;
+       bool active;
+};
+
+#ifdef CONFIG_SCHED_EMS
+extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+                               int state, int cpus);
+extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
+               struct task_struct *p);
+extern void exynos_init_entity_util_avg(struct sched_entity *se);
+extern int exynos_need_active_balance(enum cpu_idle_type idle,
+               struct sched_domain *sd, int src_cpu, int dst_cpu);
+
+extern unsigned long global_boost(void);
+extern int find_second_max_cap(void);
+
+extern int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
+                               bool boosted, bool prefer_idle);
+
+extern void ontime_migration(void);
+extern int ontime_can_migration(struct task_struct *p, int cpu);
+extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
+                                               struct sched_avg *sa);
+extern void ontime_new_entity_load(struct task_struct *parent,
+                                       struct sched_entity *se);
+extern void ontime_trace_task_info(struct task_struct *p);
+extern void ehmp_update_max_cpu_capacity(int cpu, unsigned long val);
+
+extern bool lbt_overutilized(int cpu, int level);
+extern void update_lbt_overutil(int cpu, unsigned long capacity);
+
+extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
+
+extern void request_kernel_prefer_perf(int grp_idx, int enable);
+#else
+static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+                               int state, int cpus) { return 0; }
+static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
+               struct task_struct *p) { return NULL; }
+static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
+static inline int exynos_need_active_balance(enum cpu_idle_type idle,
+               struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
+
+static inline unsigned long global_boost(void) { return 0; }
+static inline int find_second_max_cap(void) { return -EINVAL; }
+
+static inline int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
+                               bool boosted, bool prefer_idle) { return -EINVAL; }
+
+static inline void ontime_migration(void) { }
+static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; }
+static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
+                                                       struct sched_avg *sa) { }
+static inline void ontime_new_entity_load(struct task_struct *p,
+                                       struct sched_entity *se) { }
+static inline void ontime_trace_task_info(struct task_struct *p) { }
+
+static inline void ehmp_update_max_cpu_capacity(int cpu, unsigned long val) { }
+
+static inline bool lbt_overutilized(int cpu, int level) { return false; }
+static inline void update_lbt_overutil(int cpu, unsigned long capacity) { }
+
+static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }
+
+static inline void request_kernel_prefer_perf(int grp_idx, int enable) { }
+#endif /* CONFIG_SCHED_EMS */
index 9e4757aa1704f1a2f6540cef30f667bf3e517ab1..f5e0cc02c845f6a78eb030ee97bc2016f1e94da4 100644 (file)
@@ -417,7 +417,6 @@ struct sched_avg {
        struct util_est                 util_est;
 };
 
-#ifdef CONFIG_SCHED_EHMP
 #define NOT_ONTIME             1
 #define ONTIME_MIGRATING       2
 #define ONTIME                 4
@@ -434,7 +433,6 @@ struct ontime_entity {
        int flags;
        int cpu;
 };
-#endif
 
 struct sched_statistics {
 #ifdef CONFIG_SCHEDSTATS
@@ -506,9 +504,7 @@ struct sched_entity {
         */
        struct sched_avg                avg ____cacheline_aligned_in_smp;
 #endif
-#ifdef CONFIG_SCHED_EHMP
        struct ontime_entity            ontime;
-#endif
 };
 
 #ifdef CONFIG_SCHED_WALT
diff --git a/include/trace/events/ehmp.h b/include/trace/events/ehmp.h
deleted file mode 100644 (file)
index 6c9878a..0000000
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- *  Copyright (C) 2017 Park Bumgyu <bumgyu.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ehmp
-
-#if !defined(_TRACE_EHMP_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EHMP_H
-
-#include <linux/sched.h>
-#include <linux/tracepoint.h>
-
-/*
- * Tracepoint for selection of boost cpu
- */
-TRACE_EVENT(ehmp_select_boost_cpu,
-
-       TP_PROTO(struct task_struct *p, int cpu, int trigger, char *state),
-
-       TP_ARGS(p, cpu, trigger, state),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN   )
-               __field(        pid_t,          pid                     )
-               __field(        int,            cpu                     )
-               __field(        int,            trigger                 )
-               __array(        char,           state,          64      )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->cpu            = cpu;
-               __entry->trigger        = trigger;
-               memcpy(__entry->state, state, 64);
-       ),
-
-       TP_printk("comm=%s pid=%d target_cpu=%d trigger=%d state=%s",
-                 __entry->comm, __entry->pid, __entry->cpu,
-                 __entry->trigger, __entry->state)
-);
-
-/*
- * Tracepoint for selection of group balancer
- */
-TRACE_EVENT(ehmp_select_group_boost,
-
-       TP_PROTO(struct task_struct *p, int cpu, char *state),
-
-       TP_ARGS(p, cpu, state),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN   )
-               __field(        pid_t,          pid                     )
-               __field(        int,            cpu                     )
-               __array(        char,           state,          64      )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->cpu            = cpu;
-               memcpy(__entry->state, state, 64);
-       ),
-
-       TP_printk("comm=%s pid=%d target_cpu=%d state=%s",
-                 __entry->comm, __entry->pid, __entry->cpu, __entry->state)
-);
-
-TRACE_EVENT(ehmp_global_boost,
-
-       TP_PROTO(char *name, unsigned long boost),
-
-       TP_ARGS(name, boost),
-
-       TP_STRUCT__entry(
-               __array(        char,           name,           64      )
-               __field(        unsigned long,  boost                   )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->name, name, 64);
-               __entry->boost          = boost;
-       ),
-
-       TP_printk("name=%s global_boost_value=%ld", __entry->name, __entry->boost)
-);
-
-/*
- * Tracepoint for prefer idle
- */
-TRACE_EVENT(ehmp_prefer_idle,
-
-       TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu,
-               unsigned long task_util, unsigned long new_util, int idle),
-
-       TP_ARGS(p, orig_cpu, target_cpu, task_util, new_util, idle),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN   )
-               __field(        pid_t,          pid                     )
-               __field(        int,            orig_cpu                )
-               __field(        int,            target_cpu              )
-               __field(        unsigned long,  task_util               )
-               __field(        unsigned long,  new_util                )
-               __field(        int,            idle                    )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->orig_cpu       = orig_cpu;
-               __entry->target_cpu     = target_cpu;
-               __entry->task_util      = task_util;
-               __entry->new_util       = new_util;
-               __entry->idle           = idle;
-       ),
-
-       TP_printk("comm=%s pid=%d orig_cpu=%d target_cpu=%d task_util=%lu new_util=%lu idle=%d",
-               __entry->comm, __entry->pid, __entry->orig_cpu, __entry->target_cpu,
-               __entry->task_util, __entry->new_util, __entry->idle)
-);
-
-TRACE_EVENT(ehmp_prefer_idle_cpu_select,
-
-       TP_PROTO(struct task_struct *p, int cpu),
-
-       TP_ARGS(p, cpu),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN   )
-               __field(        pid_t,          pid                     )
-               __field(        int,            cpu                     )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->cpu            = cpu;
-       ),
-
-       TP_printk("comm=%s pid=%d target_cpu=%d",
-                 __entry->comm, __entry->pid, __entry->cpu)
-);
-
-/*
- * Tracepoint for cpu selection
- */
-TRACE_EVENT(ehmp_find_best_target_stat,
-
-       TP_PROTO(int cpu, unsigned long cap, unsigned long util, unsigned long target_util),
-
-       TP_ARGS(cpu, cap, util, target_util),
-
-       TP_STRUCT__entry(
-               __field( int,           cpu     )
-               __field( unsigned long, cap     )
-               __field( unsigned long, util    )
-               __field( unsigned long, target_util     )
-       ),
-
-       TP_fast_assign(
-               __entry->cpu = cpu;
-               __entry->cap = cap;
-               __entry->util = util;
-               __entry->target_util = target_util;
-       ),
-
-       TP_printk("find_best : [cpu%d] capacity %lu, util %lu, target_util %lu\n",
-               __entry->cpu, __entry->cap, __entry->util, __entry->target_util)
-);
-
-TRACE_EVENT(ehmp_find_best_target_candi,
-
-       TP_PROTO(unsigned int cpu),
-
-       TP_ARGS(cpu),
-
-       TP_STRUCT__entry(
-               __field( unsigned int, cpu      )
-       ),
-
-       TP_fast_assign(
-               __entry->cpu = cpu;
-       ),
-
-       TP_printk("find_best: energy candidate cpu %d\n", __entry->cpu)
-);
-
-TRACE_EVENT(ehmp_find_best_target_cpu,
-
-       TP_PROTO(unsigned int cpu, unsigned long target_util),
-
-       TP_ARGS(cpu, target_util),
-
-       TP_STRUCT__entry(
-               __field( unsigned int, cpu      )
-               __field( unsigned long, target_util     )
-       ),
-
-       TP_fast_assign(
-               __entry->cpu = cpu;
-               __entry->target_util = target_util;
-       ),
-
-       TP_printk("find_best: target_cpu %d, target_util %lu\n", __entry->cpu, __entry->target_util)
-);
-
-/*
- * Tracepoint for ontime migration
- */
-TRACE_EVENT(ehmp_ontime_migration,
-
-       TP_PROTO(struct task_struct *p, unsigned long load,
-               int src_cpu, int dst_cpu, int boost_migration),
-
-       TP_ARGS(p, load, src_cpu, dst_cpu, boost_migration),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN   )
-               __field(        pid_t,          pid                     )
-               __field(        unsigned long,  load                    )
-               __field(        int,            src_cpu                 )
-               __field(        int,            dst_cpu                 )
-               __field(        int,            bm                      )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->load           = load;
-               __entry->src_cpu        = src_cpu;
-               __entry->dst_cpu        = dst_cpu;
-               __entry->bm             = boost_migration;
-       ),
-
-       TP_printk("comm=%s pid=%d ontime_load_avg=%lu src_cpu=%d dst_cpu=%d boost_migration=%d",
-               __entry->comm, __entry->pid, __entry->load,
-               __entry->src_cpu, __entry->dst_cpu, __entry->bm)
-);
-
-/*
- * Tracepoint for accounting ontime load averages for tasks.
- */
-TRACE_EVENT(ehmp_ontime_new_entity_load,
-
-       TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg),
-
-       TP_ARGS(tsk, avg),
-
-       TP_STRUCT__entry(
-               __array( char,          comm,   TASK_COMM_LEN           )
-               __field( pid_t,         pid                             )
-               __field( int,           cpu                             )
-               __field( unsigned long, load_avg                        )
-               __field( u64,           load_sum                        )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
-               __entry->pid                    = tsk->pid;
-               __entry->cpu                    = task_cpu(tsk);
-               __entry->load_avg               = avg->load_avg;
-               __entry->load_sum               = avg->load_sum;
-       ),
-       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu",
-                 __entry->comm,
-                 __entry->pid,
-                 __entry->cpu,
-                 __entry->load_avg,
-                 (u64)__entry->load_sum)
-);
-
-/*
- * Tracepoint for accounting ontime load averages for tasks.
- */
-TRACE_EVENT(ehmp_ontime_load_avg_task,
-
-       TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag),
-
-       TP_ARGS(tsk, avg, ontime_flag),
-
-       TP_STRUCT__entry(
-               __array( char,          comm,   TASK_COMM_LEN           )
-               __field( pid_t,         pid                             )
-               __field( int,           cpu                             )
-               __field( unsigned long, load_avg                        )
-               __field( u64,           load_sum                        )
-               __field( int,           ontime_flag                     )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
-               __entry->pid                    = tsk->pid;
-               __entry->cpu                    = task_cpu(tsk);
-               __entry->load_avg               = avg->load_avg;
-               __entry->load_sum               = avg->load_sum;
-               __entry->ontime_flag            = ontime_flag;
-       ),
-       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu ontime_flag=%d",
-                 __entry->comm, __entry->pid, __entry->cpu, __entry->load_avg,
-                 (u64)__entry->load_sum, __entry->ontime_flag)
-);
-
-TRACE_EVENT(ehmp_ontime_check_migrate,
-
-       TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label),
-
-       TP_ARGS(tsk, cpu, migrate, label),
-
-       TP_STRUCT__entry(
-               __array( char,          comm,   TASK_COMM_LEN   )
-               __field( pid_t,         pid                     )
-               __field( int,           cpu                     )
-               __field( int,           migrate                 )
-               __array( char,          label,  64              )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
-               __entry->pid                    = tsk->pid;
-               __entry->cpu                    = cpu;
-               __entry->migrate                = migrate;
-               strncpy(__entry->label, label, 64);
-       ),
-
-       TP_printk("comm=%s pid=%d target_cpu=%d migrate=%d reason=%s",
-               __entry->comm, __entry->pid, __entry->cpu,
-               __entry->migrate, __entry->label)
-);
-
-TRACE_EVENT(ehmp_ontime_task_wakeup,
-
-       TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label),
-
-       TP_ARGS(tsk, src_cpu, dst_cpu, label),
-
-       TP_STRUCT__entry(
-               __array( char,          comm,   TASK_COMM_LEN   )
-               __field( pid_t,         pid                     )
-               __field( int,           src_cpu                 )
-               __field( int,           dst_cpu                 )
-               __array( char,          label,  64              )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
-               __entry->pid                    = tsk->pid;
-               __entry->src_cpu                = src_cpu;
-               __entry->dst_cpu                = dst_cpu;
-               strncpy(__entry->label, label, 64);
-       ),
-
-       TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d reason=%s",
-               __entry->comm, __entry->pid, __entry->src_cpu,
-               __entry->dst_cpu, __entry->label)
-);
-
-TRACE_EVENT(ehmp_lbt_overutilized,
-
-       TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized),
-
-       TP_ARGS(cpu, level, util, capacity, overutilized),
-
-       TP_STRUCT__entry(
-               __field( int,           cpu                     )
-               __field( int,           level                   )
-               __field( unsigned long, util                    )
-               __field( unsigned long, capacity                )
-               __field( bool,          overutilized            )
-       ),
-
-       TP_fast_assign(
-               __entry->cpu                    = cpu;
-               __entry->level                  = level;
-               __entry->util                   = util;
-               __entry->capacity               = capacity;
-               __entry->overutilized           = overutilized;
-       ),
-
-       TP_printk("cpu=%d level=%d util=%lu capacity=%lu overutilized=%d",
-               __entry->cpu, __entry->level, __entry->util,
-               __entry->capacity, __entry->overutilized)
-);
-
-#endif /* _TRACE_EHMP_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/ems.h b/include/trace/events/ems.h
new file mode 100644 (file)
index 0000000..49598ef
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+ *  Copyright (C) 2017 Park Bumgyu <bumgyu.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ems
+
+#if !defined(_TRACE_EMS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EMS_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for selection of boost cpu
+ */
+TRACE_EVENT(ehmp_select_boost_cpu,
+
+       TP_PROTO(struct task_struct *p, int cpu, int trigger, char *state),
+
+       TP_ARGS(p, cpu, trigger, state),
+
+       TP_STRUCT__entry(
+               __array(        char,           comm,   TASK_COMM_LEN   )
+               __field(        pid_t,          pid                     )
+               __field(        int,            cpu                     )
+               __field(        int,            trigger                 )
+               __array(        char,           state,          64      )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->cpu            = cpu;
+               __entry->trigger        = trigger;
+               memcpy(__entry->state, state, 64);
+       ),
+
+       TP_printk("comm=%s pid=%d target_cpu=%d trigger=%d state=%s",
+                 __entry->comm, __entry->pid, __entry->cpu,
+                 __entry->trigger, __entry->state)
+);
+
+/*
+ * Tracepoint for selection of group balancer
+ */
+TRACE_EVENT(ehmp_select_group_boost,
+
+       TP_PROTO(struct task_struct *p, int cpu, char *state),
+
+       TP_ARGS(p, cpu, state),
+
+       TP_STRUCT__entry(
+               __array(        char,           comm,   TASK_COMM_LEN   )
+               __field(        pid_t,          pid                     )
+               __field(        int,            cpu                     )
+               __array(        char,           state,          64      )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->cpu            = cpu;
+               memcpy(__entry->state, state, 64);
+       ),
+
+       TP_printk("comm=%s pid=%d target_cpu=%d state=%s",
+                 __entry->comm, __entry->pid, __entry->cpu, __entry->state)
+);
+
+TRACE_EVENT(ehmp_global_boost,
+
+       TP_PROTO(char *name, unsigned long boost),
+
+       TP_ARGS(name, boost),
+
+       TP_STRUCT__entry(
+               __array(        char,           name,           64      )
+               __field(        unsigned long,  boost                   )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->name, name, 64);
+               __entry->boost          = boost;
+       ),
+
+       TP_printk("name=%s global_boost_value=%ld", __entry->name, __entry->boost)
+);
+
+/*
+ * Tracepoint for prefer idle
+ */
+TRACE_EVENT(ehmp_prefer_idle,
+
+       TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu,
+               unsigned long task_util, unsigned long new_util, int idle),
+
+       TP_ARGS(p, orig_cpu, target_cpu, task_util, new_util, idle),
+
+       TP_STRUCT__entry(
+               __array(        char,           comm,   TASK_COMM_LEN   )
+               __field(        pid_t,          pid                     )
+               __field(        int,            orig_cpu                )
+               __field(        int,            target_cpu              )
+               __field(        unsigned long,  task_util               )
+               __field(        unsigned long,  new_util                )
+               __field(        int,            idle                    )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->orig_cpu       = orig_cpu;
+               __entry->target_cpu     = target_cpu;
+               __entry->task_util      = task_util;
+               __entry->new_util       = new_util;
+               __entry->idle           = idle;
+       ),
+
+       TP_printk("comm=%s pid=%d orig_cpu=%d target_cpu=%d task_util=%lu new_util=%lu idle=%d",
+               __entry->comm, __entry->pid, __entry->orig_cpu, __entry->target_cpu,
+               __entry->task_util, __entry->new_util, __entry->idle)
+);
+
+TRACE_EVENT(ehmp_prefer_idle_cpu_select,
+
+       TP_PROTO(struct task_struct *p, int cpu),
+
+       TP_ARGS(p, cpu),
+
+       TP_STRUCT__entry(
+               __array(        char,           comm,   TASK_COMM_LEN   )
+               __field(        pid_t,          pid                     )
+               __field(        int,            cpu                     )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->cpu            = cpu;
+       ),
+
+       TP_printk("comm=%s pid=%d target_cpu=%d",
+                 __entry->comm, __entry->pid, __entry->cpu)
+);
+
+/*
+ * Tracepoint for cpu selection
+ */
+TRACE_EVENT(ehmp_find_best_target_stat,
+
+       TP_PROTO(int cpu, unsigned long cap, unsigned long util, unsigned long target_util),
+
+       TP_ARGS(cpu, cap, util, target_util),
+
+       TP_STRUCT__entry(
+               __field( int,           cpu     )
+               __field( unsigned long, cap     )
+               __field( unsigned long, util    )
+               __field( unsigned long, target_util     )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu = cpu;
+               __entry->cap = cap;
+               __entry->util = util;
+               __entry->target_util = target_util;
+       ),
+
+       TP_printk("find_best : [cpu%d] capacity %lu, util %lu, target_util %lu\n",
+               __entry->cpu, __entry->cap, __entry->util, __entry->target_util)
+);
+
+TRACE_EVENT(ehmp_find_best_target_candi,
+
+       TP_PROTO(unsigned int cpu),
+
+       TP_ARGS(cpu),
+
+       TP_STRUCT__entry(
+               __field( unsigned int, cpu      )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu = cpu;
+       ),
+
+       TP_printk("find_best: energy candidate cpu %d\n", __entry->cpu)
+);
+
+TRACE_EVENT(ehmp_find_best_target_cpu,
+
+       TP_PROTO(unsigned int cpu, unsigned long target_util),
+
+       TP_ARGS(cpu, target_util),
+
+       TP_STRUCT__entry(
+               __field( unsigned int, cpu      )
+               __field( unsigned long, target_util     )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu = cpu;
+               __entry->target_util = target_util;
+       ),
+
+       TP_printk("find_best: target_cpu %d, target_util %lu\n", __entry->cpu, __entry->target_util)
+);
+
+/*
+ * Tracepoint for ontime migration
+ */
+TRACE_EVENT(ehmp_ontime_migration,
+
+       TP_PROTO(struct task_struct *p, unsigned long load,
+               int src_cpu, int dst_cpu, int boost_migration),
+
+       TP_ARGS(p, load, src_cpu, dst_cpu, boost_migration),
+
+       TP_STRUCT__entry(
+               __array(        char,           comm,   TASK_COMM_LEN   )
+               __field(        pid_t,          pid                     )
+               __field(        unsigned long,  load                    )
+               __field(        int,            src_cpu                 )
+               __field(        int,            dst_cpu                 )
+               __field(        int,            bm                      )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->load           = load;
+               __entry->src_cpu        = src_cpu;
+               __entry->dst_cpu        = dst_cpu;
+               __entry->bm             = boost_migration;
+       ),
+
+       TP_printk("comm=%s pid=%d ontime_load_avg=%lu src_cpu=%d dst_cpu=%d boost_migration=%d",
+               __entry->comm, __entry->pid, __entry->load,
+               __entry->src_cpu, __entry->dst_cpu, __entry->bm)
+);
+
+/*
+ * Tracepoint for accounting ontime load averages for tasks.
+ */
+TRACE_EVENT(ehmp_ontime_new_entity_load,
+
+       TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg),
+
+       TP_ARGS(tsk, avg),
+
+       TP_STRUCT__entry(
+               __array( char,          comm,   TASK_COMM_LEN           )
+               __field( pid_t,         pid                             )
+               __field( int,           cpu                             )
+               __field( unsigned long, load_avg                        )
+               __field( u64,           load_sum                        )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = task_cpu(tsk);
+               __entry->load_avg               = avg->load_avg;
+               __entry->load_sum               = avg->load_sum;
+       ),
+       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu",
+                 __entry->comm,
+                 __entry->pid,
+                 __entry->cpu,
+                 __entry->load_avg,
+                 (u64)__entry->load_sum)
+);
+
+/*
+ * Tracepoint for accounting ontime load averages for tasks.
+ */
+TRACE_EVENT(ehmp_ontime_load_avg_task,
+
+       TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag),
+
+       TP_ARGS(tsk, avg, ontime_flag),
+
+       TP_STRUCT__entry(
+               __array( char,          comm,   TASK_COMM_LEN           )
+               __field( pid_t,         pid                             )
+               __field( int,           cpu                             )
+               __field( unsigned long, load_avg                        )
+               __field( u64,           load_sum                        )
+               __field( int,           ontime_flag                     )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = task_cpu(tsk);
+               __entry->load_avg               = avg->load_avg;
+               __entry->load_sum               = avg->load_sum;
+               __entry->ontime_flag            = ontime_flag;
+       ),
+       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu ontime_flag=%d",
+                 __entry->comm, __entry->pid, __entry->cpu, __entry->load_avg,
+                 (u64)__entry->load_sum, __entry->ontime_flag)
+);
+
+TRACE_EVENT(ehmp_ontime_check_migrate,
+
+       TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label),
+
+       TP_ARGS(tsk, cpu, migrate, label),
+
+       TP_STRUCT__entry(
+               __array( char,          comm,   TASK_COMM_LEN   )
+               __field( pid_t,         pid                     )
+               __field( int,           cpu                     )
+               __field( int,           migrate                 )
+               __array( char,          label,  64              )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = cpu;
+               __entry->migrate                = migrate;
+               strncpy(__entry->label, label, 64);
+       ),
+
+       TP_printk("comm=%s pid=%d target_cpu=%d migrate=%d reason=%s",
+               __entry->comm, __entry->pid, __entry->cpu,
+               __entry->migrate, __entry->label)
+);
+
+TRACE_EVENT(ehmp_ontime_task_wakeup,
+
+       TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label),
+
+       TP_ARGS(tsk, src_cpu, dst_cpu, label),
+
+       TP_STRUCT__entry(
+               __array( char,          comm,   TASK_COMM_LEN   )
+               __field( pid_t,         pid                     )
+               __field( int,           src_cpu                 )
+               __field( int,           dst_cpu                 )
+               __array( char,          label,  64              )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->src_cpu                = src_cpu;
+               __entry->dst_cpu                = dst_cpu;
+               strncpy(__entry->label, label, 64);
+       ),
+
+       TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d reason=%s",
+               __entry->comm, __entry->pid, __entry->src_cpu,
+               __entry->dst_cpu, __entry->label)
+);
+
+TRACE_EVENT(ehmp_lbt_overutilized,
+
+       TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized),
+
+       TP_ARGS(cpu, level, util, capacity, overutilized),
+
+       TP_STRUCT__entry(
+               __field( int,           cpu                     )
+               __field( int,           level                   )
+               __field( unsigned long, util                    )
+               __field( unsigned long, capacity                )
+               __field( bool,          overutilized            )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu                    = cpu;
+               __entry->level                  = level;
+               __entry->util                   = util;
+               __entry->capacity               = capacity;
+               __entry->overutilized           = overutilized;
+       ),
+
+       TP_printk("cpu=%d level=%d util=%lu capacity=%lu overutilized=%d",
+               __entry->cpu, __entry->level, __entry->util,
+               __entry->capacity, __entry->overutilized)
+);
+
+#endif /* _TRACE_EMS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index f1fa25160c2200a9e2a19e3ebbf77e6c23eb064e..da82b6662d97d9553b75a726c122257364781dfd 100644 (file)
@@ -1066,15 +1066,15 @@ config SCHED_TUNE
 
          If unsure, say N.
 
-config SCHED_EHMP
-       bool "Exynos scheduler for Heterogeneous Multi-Processor"
+config SCHED_EMS
+       bool "Exynos Mobile Scheduler"
        depends on SMP
        help
-         This option supports Exynos scheduler for HMP architecture. It is
-         designed to secure the limits of energy aware scheduler. This option
-         provides features such as independent boosting functinos such as
-         global boost and on-time migration, and prefer_perf and enhanced
-         prefer_idle that work in conjunction with SCHEDTUNE.
+         This option supports Exynos mobile scheduler. It is designed to
+         secure the limits of energy aware scheduler. This option provides
+         features such as independent boosting functinos such as on-time migration,
+         and prefer_perf and enhanced prefer_idle that work in conjunction with
+         SCHEDTUNE.
 
          If unsure, say N.
 
index 154671b2cc5a0585477e76f665fa34703895dd33..65236522a4d21c023f0652414a6e43d127350632 100644 (file)
@@ -31,4 +31,4 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
 obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 obj-$(CONFIG_MEMBARRIER) += membarrier.o
 obj-$(CONFIG_PSI) += psi.o
-obj-$(CONFIG_SCHED_EHMP) += ems/
+obj-$(CONFIG_SCHED_EMS) += ems/
index f2b1f7be7905f6dccdf14ff0ce7e7384c39afda8..5a981b8e53c494c3c2cb35ed77dcd326f84d755d 100644 (file)
@@ -1,2 +1,2 @@
-obj-$(CONFIG_SCHED_EHMP) += ehmp.o
+obj-$(CONFIG_SCHED_EMS) += ehmp.o
 obj-$(CONFIG_FREQVAR_TUNE) += freqvar_tune.o
index 02c835d8137993312bf5eefc82491d405b2aaabc..f5a936c26168be45c373be5fb4ec3727173b00cc 100644 (file)
@@ -8,11 +8,11 @@
 #include <linux/sched.h>
 #include <linux/cpuidle.h>
 #include <linux/pm_qos.h>
-#include <linux/ehmp.h>
+#include <linux/ems.h>
 #include <linux/sched_energy.h>
 
 #define CREATE_TRACE_POINTS
-#include <trace/events/ehmp.h>
+#include <trace/events/ems.h>
 
 #include "../sched.h"
 #include "../tune.h"
diff --git a/kernel/sched/ems/ems.h b/kernel/sched/ems/ems.h
new file mode 100644 (file)
index 0000000..1ad0eb0
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
index 8b06736f44f10c38b5f3d90e3c720bf2349c318f..f51f49578f71a30ff8a343d4c39f65300a815426 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/mempolicy.h>
 #include <linux/migrate.h>
 #include <linux/task_work.h>
-#include <linux/ehmp.h>
+#include <linux/ems.h>
 
 #include <trace/events/sched.h>
 
@@ -797,7 +797,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
        long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
        long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
 
-       if (sched_feat(EXYNOS_HMP)) {
+       if (sched_feat(EXYNOS_MS)) {
                exynos_init_entity_util_avg(se);
                goto util_init_done;
        }
@@ -5197,7 +5197,7 @@ static inline void update_overutilized_status(struct rq *rq)
        rcu_read_lock();
        sd = rcu_dereference(rq->sd);
        if (sd && !sd_overutilized(sd)) {
-               if (sched_feat(EXYNOS_HMP))
+               if (sched_feat(EXYNOS_MS))
                        overutilized = lbt_overutilized(rq->cpu, sd->level);
                else
                        overutilized = cpu_overutilized(rq->cpu);
@@ -6130,7 +6130,7 @@ static int group_idle_state(struct energy_env *eenv, int cpu_idx)
         * after moving, previous cpu/cluster can be powered down,
         * so it should be consider it when idle power was calculated.
         */
-       if (sched_feat(EXYNOS_HMP)) {
+       if (sched_feat(EXYNOS_MS)) {
                new_state = exynos_estimate_idle_state(cpu_idx, sched_group_span(sg),
                                                max_idle_state_idx, sg->group_weight);
                if (new_state)
@@ -6743,7 +6743,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
        unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
                                (sd->imbalance_pct-100) / 100;
 
-       if (sched_feat(EXYNOS_HMP)) {
+       if (sched_feat(EXYNOS_MS)) {
                idlest = exynos_fit_idlest_group(sd, p);
                if (idlest)
                        return idlest;
@@ -9525,7 +9525,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        *overload = 1;
                }
 
-
                if (sched_feat(EXYNOS_MS)) {
                        if (lbt_overutilized(i, env->sd->level)) {
                                *overutilized = true;
@@ -10292,7 +10291,7 @@ static int need_active_balance(struct lb_env *env)
                        return 1;
        }
 
-       if (sched_feat(EXYNOS_HMP))
+       if (sched_feat(EXYNOS_MS))
                return exynos_need_active_balance(env->idle, sd, env->src_cpu, env->dst_cpu);
 
        /*
index ca512de98d611135f705a41a659b8ece881c4cb6..98432cf8f4b2a8ec7e1a0a7c97d4b698c0c1c4ac 100644 (file)
@@ -119,11 +119,8 @@ SCHED_FEAT(EAS_PREFER_IDLE, true)
 SCHED_FEAT(FIND_BEST_TARGET, true)
 SCHED_FEAT(FBT_STRICT_ORDER, true)
 
-#ifdef CONFIG_SCHED_EHMP
-SCHED_FEAT(EXYNOS_HMP, true)
-#else
-SCHED_FEAT(EXYNOS_HMP, false)
-#endif
+SCHED_FEAT(EXYNOS_MS, true)
+
 /*
  * Apply schedtune boost hold to tasks of all sched classes.
  * If enabled, schedtune will hold the boost applied to a CPU
index d6987ba1cfc69624f5408f7569abcb05d28dd9b4..27d8582df918ab85f505e7f0c33e30360ae85a9c 100644 (file)
@@ -856,7 +856,7 @@ struct rq {
        u64 cum_window_demand;
 #endif /* CONFIG_SCHED_WALT */
 
-#ifdef CONFIG_SCHED_EHMP
+#ifdef CONFIG_SCHED_EMS
        bool ontime_migrating;
 #endif
 
index 534ee933ceb677d302d3861c55aa155b511b18f1..ff250f0a8f7680ebc10d88d88a1be0ee1ec95d65 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/printk.h>
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
-#include <linux/ehmp.h>
+#include <linux/ems.h>
 
 #include <trace/events/sched.h>
 
@@ -497,7 +497,7 @@ int schedtune_prefer_idle(struct task_struct *p)
        return prefer_idle;
 }
 
-#ifdef CONFIG_SCHED_EHMP
+#ifdef CONFIG_SCHED_EMS
 static atomic_t kernel_prefer_perf_req[BOOSTGROUPS_COUNT];
 int kernel_prefer_perf(int grp_idx)
 {