depends on CPU_FREQ && SMP
select CPU_FREQ_GOV_ATTR_SET
select IRQ_WORK
- select FREQVAR_TUNE
help
This governor makes decisions based on the utilization data provided
by the scheduler. It sets the CPU frequency to be proportional to
config FREQVAR_TUNE
bool "CPU frequency variant tuner"
- depends on CPU_FREQ_GOV_SCHEDUTIL
+ depends on SCHED_EMS && CPU_FREQ_GOV_SCHEDUTIL
help
This option provides the controller which tunes system performance
as frequency variant.
+++ /dev/null
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/plist.h>
-#include <linux/sched/idle.h>
-
-#ifdef CONFIG_SCHED_TUNE
-enum stune_group {
- STUNE_ROOT,
- STUNE_FOREGROUND,
- STUNE_BACKGROUND,
- STUNE_TOPAPP,
- STUNE_GROUP_COUNT,
-};
-#endif
-
-struct gb_qos_request {
- struct plist_node node;
- char *name;
- bool active;
-};
-
-#ifdef CONFIG_SCHED_EHMP
-extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
- int state, int cpus);
-extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
- struct task_struct *p);
-extern void exynos_init_entity_util_avg(struct sched_entity *se);
-extern int exynos_need_active_balance(enum cpu_idle_type idle,
- struct sched_domain *sd, int src_cpu, int dst_cpu);
-
-extern unsigned long global_boost(void);
-extern int find_second_max_cap(void);
-
-extern int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
- bool boosted, bool prefer_idle);
-
-extern void ontime_migration(void);
-extern int ontime_can_migration(struct task_struct *p, int cpu);
-extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
- struct sched_avg *sa);
-extern void ontime_new_entity_load(struct task_struct *parent,
- struct sched_entity *se);
-extern void ontime_trace_task_info(struct task_struct *p);
-extern void ehmp_update_max_cpu_capacity(int cpu, unsigned long val);
-
-extern bool lbt_overutilized(int cpu, int level);
-extern void update_lbt_overutil(int cpu, unsigned long capacity);
-
-extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
-
-extern void request_kernel_prefer_perf(int grp_idx, int enable);
-#else
-static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
- int state, int cpus) { return 0; }
-static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
- struct task_struct *p) { return NULL; }
-static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
-static inline int exynos_need_active_balance(enum cpu_idle_type idle,
- struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
-
-static inline unsigned long global_boost(void) { return 0; }
-static inline int find_second_max_cap(void) { return -EINVAL; }
-
-static inline int exynos_select_cpu(struct task_struct *p, int prev_cpu,
- int sync, int sd_flag) { return -EINVAL; }
-
-static inline void ontime_migration(void) { }
-static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; }
-static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
- struct sched_avg *sa) { }
-static inline void ontime_new_entity_load(struct task_struct *p,
- struct sched_entity *se) { }
-static inline void ontime_trace_task_info(struct task_struct *p) { }
-
-static inline void ehmp_update_max_cpu_capacity(int cpu, unsigned long val) { }
-
-static inline bool lbt_overutilized(int cpu, int level) { return false; }
-static inline void update_lbt_overutil(int cpu, unsigned long capacity) { }
-
-static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }
-
-//extern void request_kernel_prefer_perf(int grp_idx, int enable) { }
-#endif /* CONFIG_SCHED_EHMP */
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/plist.h>
+#include <linux/sched/idle.h>
+
+#ifdef CONFIG_SCHED_TUNE
+enum stune_group {
+ STUNE_ROOT,
+ STUNE_FOREGROUND,
+ STUNE_BACKGROUND,
+ STUNE_TOPAPP,
+ STUNE_GROUP_COUNT,
+};
+#endif
+
+struct gb_qos_request {
+ struct plist_node node;
+ char *name;
+ bool active;
+};
+
+#ifdef CONFIG_SCHED_EMS
+extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+ int state, int cpus);
+extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
+ struct task_struct *p);
+extern void exynos_init_entity_util_avg(struct sched_entity *se);
+extern int exynos_need_active_balance(enum cpu_idle_type idle,
+ struct sched_domain *sd, int src_cpu, int dst_cpu);
+
+extern unsigned long global_boost(void);
+extern int find_second_max_cap(void);
+
+extern int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
+ bool boosted, bool prefer_idle);
+
+extern void ontime_migration(void);
+extern int ontime_can_migration(struct task_struct *p, int cpu);
+extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
+ struct sched_avg *sa);
+extern void ontime_new_entity_load(struct task_struct *parent,
+ struct sched_entity *se);
+extern void ontime_trace_task_info(struct task_struct *p);
+extern void ehmp_update_max_cpu_capacity(int cpu, unsigned long val);
+
+extern bool lbt_overutilized(int cpu, int level);
+extern void update_lbt_overutil(int cpu, unsigned long capacity);
+
+extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
+
+extern void request_kernel_prefer_perf(int grp_idx, int enable);
+#else
+static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+ int state, int cpus) { return 0; }
+static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
+ struct task_struct *p) { return NULL; }
+static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
+static inline int exynos_need_active_balance(enum cpu_idle_type idle,
+ struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
+
+static inline unsigned long global_boost(void) { return 0; }
+static inline int find_second_max_cap(void) { return -EINVAL; }
+
+static inline int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
+ bool boosted, bool prefer_idle) { return -EINVAL; }
+
+static inline void ontime_migration(void) { }
+static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; }
+static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
+ struct sched_avg *sa) { }
+static inline void ontime_new_entity_load(struct task_struct *p,
+ struct sched_entity *se) { }
+static inline void ontime_trace_task_info(struct task_struct *p) { }
+
+static inline void ehmp_update_max_cpu_capacity(int cpu, unsigned long val) { }
+
+static inline bool lbt_overutilized(int cpu, int level) { return false; }
+static inline void update_lbt_overutil(int cpu, unsigned long capacity) { }
+
+static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }
+
+static inline void request_kernel_prefer_perf(int grp_idx, int enable) { }
+#endif /* CONFIG_SCHED_EMS */
struct util_est util_est;
};
-#ifdef CONFIG_SCHED_EHMP
#define NOT_ONTIME 1
#define ONTIME_MIGRATING 2
#define ONTIME 4
int flags;
int cpu;
};
-#endif
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
*/
struct sched_avg avg ____cacheline_aligned_in_smp;
#endif
-#ifdef CONFIG_SCHED_EHMP
struct ontime_entity ontime;
-#endif
};
#ifdef CONFIG_SCHED_WALT
+++ /dev/null
-/*
- * Copyright (C) 2017 Park Bumgyu <bumgyu.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ehmp
-
-#if !defined(_TRACE_EHMP_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EHMP_H
-
-#include <linux/sched.h>
-#include <linux/tracepoint.h>
-
-/*
- * Tracepoint for selection of boost cpu
- */
-TRACE_EVENT(ehmp_select_boost_cpu,
-
- TP_PROTO(struct task_struct *p, int cpu, int trigger, char *state),
-
- TP_ARGS(p, cpu, trigger, state),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, cpu )
- __field( int, trigger )
- __array( char, state, 64 )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->cpu = cpu;
- __entry->trigger = trigger;
- memcpy(__entry->state, state, 64);
- ),
-
- TP_printk("comm=%s pid=%d target_cpu=%d trigger=%d state=%s",
- __entry->comm, __entry->pid, __entry->cpu,
- __entry->trigger, __entry->state)
-);
-
-/*
- * Tracepoint for selection of group balancer
- */
-TRACE_EVENT(ehmp_select_group_boost,
-
- TP_PROTO(struct task_struct *p, int cpu, char *state),
-
- TP_ARGS(p, cpu, state),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, cpu )
- __array( char, state, 64 )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->cpu = cpu;
- memcpy(__entry->state, state, 64);
- ),
-
- TP_printk("comm=%s pid=%d target_cpu=%d state=%s",
- __entry->comm, __entry->pid, __entry->cpu, __entry->state)
-);
-
-TRACE_EVENT(ehmp_global_boost,
-
- TP_PROTO(char *name, unsigned long boost),
-
- TP_ARGS(name, boost),
-
- TP_STRUCT__entry(
- __array( char, name, 64 )
- __field( unsigned long, boost )
- ),
-
- TP_fast_assign(
- memcpy(__entry->name, name, 64);
- __entry->boost = boost;
- ),
-
- TP_printk("name=%s global_boost_value=%ld", __entry->name, __entry->boost)
-);
-
-/*
- * Tracepoint for prefer idle
- */
-TRACE_EVENT(ehmp_prefer_idle,
-
- TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu,
- unsigned long task_util, unsigned long new_util, int idle),
-
- TP_ARGS(p, orig_cpu, target_cpu, task_util, new_util, idle),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, orig_cpu )
- __field( int, target_cpu )
- __field( unsigned long, task_util )
- __field( unsigned long, new_util )
- __field( int, idle )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->orig_cpu = orig_cpu;
- __entry->target_cpu = target_cpu;
- __entry->task_util = task_util;
- __entry->new_util = new_util;
- __entry->idle = idle;
- ),
-
- TP_printk("comm=%s pid=%d orig_cpu=%d target_cpu=%d task_util=%lu new_util=%lu idle=%d",
- __entry->comm, __entry->pid, __entry->orig_cpu, __entry->target_cpu,
- __entry->task_util, __entry->new_util, __entry->idle)
-);
-
-TRACE_EVENT(ehmp_prefer_idle_cpu_select,
-
- TP_PROTO(struct task_struct *p, int cpu),
-
- TP_ARGS(p, cpu),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, cpu )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->cpu = cpu;
- ),
-
- TP_printk("comm=%s pid=%d target_cpu=%d",
- __entry->comm, __entry->pid, __entry->cpu)
-);
-
-/*
- * Tracepoint for cpu selection
- */
-TRACE_EVENT(ehmp_find_best_target_stat,
-
- TP_PROTO(int cpu, unsigned long cap, unsigned long util, unsigned long target_util),
-
- TP_ARGS(cpu, cap, util, target_util),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( unsigned long, cap )
- __field( unsigned long, util )
- __field( unsigned long, target_util )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->cap = cap;
- __entry->util = util;
- __entry->target_util = target_util;
- ),
-
- TP_printk("find_best : [cpu%d] capacity %lu, util %lu, target_util %lu\n",
- __entry->cpu, __entry->cap, __entry->util, __entry->target_util)
-);
-
-TRACE_EVENT(ehmp_find_best_target_candi,
-
- TP_PROTO(unsigned int cpu),
-
- TP_ARGS(cpu),
-
- TP_STRUCT__entry(
- __field( unsigned int, cpu )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- ),
-
- TP_printk("find_best: energy candidate cpu %d\n", __entry->cpu)
-);
-
-TRACE_EVENT(ehmp_find_best_target_cpu,
-
- TP_PROTO(unsigned int cpu, unsigned long target_util),
-
- TP_ARGS(cpu, target_util),
-
- TP_STRUCT__entry(
- __field( unsigned int, cpu )
- __field( unsigned long, target_util )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->target_util = target_util;
- ),
-
- TP_printk("find_best: target_cpu %d, target_util %lu\n", __entry->cpu, __entry->target_util)
-);
-
-/*
- * Tracepoint for ontime migration
- */
-TRACE_EVENT(ehmp_ontime_migration,
-
- TP_PROTO(struct task_struct *p, unsigned long load,
- int src_cpu, int dst_cpu, int boost_migration),
-
- TP_ARGS(p, load, src_cpu, dst_cpu, boost_migration),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( unsigned long, load )
- __field( int, src_cpu )
- __field( int, dst_cpu )
- __field( int, bm )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->load = load;
- __entry->src_cpu = src_cpu;
- __entry->dst_cpu = dst_cpu;
- __entry->bm = boost_migration;
- ),
-
- TP_printk("comm=%s pid=%d ontime_load_avg=%lu src_cpu=%d dst_cpu=%d boost_migration=%d",
- __entry->comm, __entry->pid, __entry->load,
- __entry->src_cpu, __entry->dst_cpu, __entry->bm)
-);
-
-/*
- * Tracepoint for accounting ontime load averages for tasks.
- */
-TRACE_EVENT(ehmp_ontime_new_entity_load,
-
- TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg),
-
- TP_ARGS(tsk, avg),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, cpu )
- __field( unsigned long, load_avg )
- __field( u64, load_sum )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->cpu = task_cpu(tsk);
- __entry->load_avg = avg->load_avg;
- __entry->load_sum = avg->load_sum;
- ),
- TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu",
- __entry->comm,
- __entry->pid,
- __entry->cpu,
- __entry->load_avg,
- (u64)__entry->load_sum)
-);
-
-/*
- * Tracepoint for accounting ontime load averages for tasks.
- */
-TRACE_EVENT(ehmp_ontime_load_avg_task,
-
- TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag),
-
- TP_ARGS(tsk, avg, ontime_flag),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, cpu )
- __field( unsigned long, load_avg )
- __field( u64, load_sum )
- __field( int, ontime_flag )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->cpu = task_cpu(tsk);
- __entry->load_avg = avg->load_avg;
- __entry->load_sum = avg->load_sum;
- __entry->ontime_flag = ontime_flag;
- ),
- TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu ontime_flag=%d",
- __entry->comm, __entry->pid, __entry->cpu, __entry->load_avg,
- (u64)__entry->load_sum, __entry->ontime_flag)
-);
-
-TRACE_EVENT(ehmp_ontime_check_migrate,
-
- TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label),
-
- TP_ARGS(tsk, cpu, migrate, label),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, cpu )
- __field( int, migrate )
- __array( char, label, 64 )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->cpu = cpu;
- __entry->migrate = migrate;
- strncpy(__entry->label, label, 64);
- ),
-
- TP_printk("comm=%s pid=%d target_cpu=%d migrate=%d reason=%s",
- __entry->comm, __entry->pid, __entry->cpu,
- __entry->migrate, __entry->label)
-);
-
-TRACE_EVENT(ehmp_ontime_task_wakeup,
-
- TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label),
-
- TP_ARGS(tsk, src_cpu, dst_cpu, label),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, src_cpu )
- __field( int, dst_cpu )
- __array( char, label, 64 )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->src_cpu = src_cpu;
- __entry->dst_cpu = dst_cpu;
- strncpy(__entry->label, label, 64);
- ),
-
- TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d reason=%s",
- __entry->comm, __entry->pid, __entry->src_cpu,
- __entry->dst_cpu, __entry->label)
-);
-
-TRACE_EVENT(ehmp_lbt_overutilized,
-
- TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized),
-
- TP_ARGS(cpu, level, util, capacity, overutilized),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, level )
- __field( unsigned long, util )
- __field( unsigned long, capacity )
- __field( bool, overutilized )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->level = level;
- __entry->util = util;
- __entry->capacity = capacity;
- __entry->overutilized = overutilized;
- ),
-
- TP_printk("cpu=%d level=%d util=%lu capacity=%lu overutilized=%d",
- __entry->cpu, __entry->level, __entry->util,
- __entry->capacity, __entry->overutilized)
-);
-
-#endif /* _TRACE_EHMP_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
--- /dev/null
+/*
+ * Copyright (C) 2017 Park Bumgyu <bumgyu.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ems
+
+#if !defined(_TRACE_EMS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EMS_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for selection of boost cpu
+ */
+TRACE_EVENT(ehmp_select_boost_cpu,
+
+ TP_PROTO(struct task_struct *p, int cpu, int trigger, char *state),
+
+ TP_ARGS(p, cpu, trigger, state),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( int, trigger )
+ __array( char, state, 64 )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->cpu = cpu;
+ __entry->trigger = trigger;
+ memcpy(__entry->state, state, 64);
+ ),
+
+ TP_printk("comm=%s pid=%d target_cpu=%d trigger=%d state=%s",
+ __entry->comm, __entry->pid, __entry->cpu,
+ __entry->trigger, __entry->state)
+);
+
+/*
+ * Tracepoint for selection of group balancer
+ */
+TRACE_EVENT(ehmp_select_group_boost,
+
+ TP_PROTO(struct task_struct *p, int cpu, char *state),
+
+ TP_ARGS(p, cpu, state),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __array( char, state, 64 )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->cpu = cpu;
+ memcpy(__entry->state, state, 64);
+ ),
+
+ TP_printk("comm=%s pid=%d target_cpu=%d state=%s",
+ __entry->comm, __entry->pid, __entry->cpu, __entry->state)
+);
+
+TRACE_EVENT(ehmp_global_boost,
+
+ TP_PROTO(char *name, unsigned long boost),
+
+ TP_ARGS(name, boost),
+
+ TP_STRUCT__entry(
+ __array( char, name, 64 )
+ __field( unsigned long, boost )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->name, name, 64);
+ __entry->boost = boost;
+ ),
+
+ TP_printk("name=%s global_boost_value=%ld", __entry->name, __entry->boost)
+);
+
+/*
+ * Tracepoint for prefer idle
+ */
+TRACE_EVENT(ehmp_prefer_idle,
+
+ TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu,
+ unsigned long task_util, unsigned long new_util, int idle),
+
+ TP_ARGS(p, orig_cpu, target_cpu, task_util, new_util, idle),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, orig_cpu )
+ __field( int, target_cpu )
+ __field( unsigned long, task_util )
+ __field( unsigned long, new_util )
+ __field( int, idle )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->orig_cpu = orig_cpu;
+ __entry->target_cpu = target_cpu;
+ __entry->task_util = task_util;
+ __entry->new_util = new_util;
+ __entry->idle = idle;
+ ),
+
+ TP_printk("comm=%s pid=%d orig_cpu=%d target_cpu=%d task_util=%lu new_util=%lu idle=%d",
+ __entry->comm, __entry->pid, __entry->orig_cpu, __entry->target_cpu,
+ __entry->task_util, __entry->new_util, __entry->idle)
+);
+
+TRACE_EVENT(ehmp_prefer_idle_cpu_select,
+
+ TP_PROTO(struct task_struct *p, int cpu),
+
+ TP_ARGS(p, cpu),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("comm=%s pid=%d target_cpu=%d",
+ __entry->comm, __entry->pid, __entry->cpu)
+);
+
+/*
+ * Tracepoint for cpu selection
+ */
+TRACE_EVENT(ehmp_find_best_target_stat,
+
+ TP_PROTO(int cpu, unsigned long cap, unsigned long util, unsigned long target_util),
+
+ TP_ARGS(cpu, cap, util, target_util),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( unsigned long, cap )
+ __field( unsigned long, util )
+ __field( unsigned long, target_util )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->cap = cap;
+ __entry->util = util;
+ __entry->target_util = target_util;
+ ),
+
+ TP_printk("find_best : [cpu%d] capacity %lu, util %lu, target_util %lu\n",
+ __entry->cpu, __entry->cap, __entry->util, __entry->target_util)
+);
+
+TRACE_EVENT(ehmp_find_best_target_candi,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("find_best: energy candidate cpu %d\n", __entry->cpu)
+);
+
+TRACE_EVENT(ehmp_find_best_target_cpu,
+
+ TP_PROTO(unsigned int cpu, unsigned long target_util),
+
+ TP_ARGS(cpu, target_util),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, cpu )
+ __field( unsigned long, target_util )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->target_util = target_util;
+ ),
+
+ TP_printk("find_best: target_cpu %d, target_util %lu\n", __entry->cpu, __entry->target_util)
+);
+
+/*
+ * Tracepoint for ontime migration
+ */
+TRACE_EVENT(ehmp_ontime_migration,
+
+ TP_PROTO(struct task_struct *p, unsigned long load,
+ int src_cpu, int dst_cpu, int boost_migration),
+
+ TP_ARGS(p, load, src_cpu, dst_cpu, boost_migration),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( unsigned long, load )
+ __field( int, src_cpu )
+ __field( int, dst_cpu )
+ __field( int, bm )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->load = load;
+ __entry->src_cpu = src_cpu;
+ __entry->dst_cpu = dst_cpu;
+ __entry->bm = boost_migration;
+ ),
+
+ TP_printk("comm=%s pid=%d ontime_load_avg=%lu src_cpu=%d dst_cpu=%d boost_migration=%d",
+ __entry->comm, __entry->pid, __entry->load,
+ __entry->src_cpu, __entry->dst_cpu, __entry->bm)
+);
+
+/*
+ * Tracepoint for accounting ontime load averages for tasks.
+ */
+TRACE_EVENT(ehmp_ontime_new_entity_load,
+
+ TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg),
+
+ TP_ARGS(tsk, avg),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( unsigned long, load_avg )
+ __field( u64, load_sum )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = task_cpu(tsk);
+ __entry->load_avg = avg->load_avg;
+ __entry->load_sum = avg->load_sum;
+ ),
+ TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu",
+ __entry->comm,
+ __entry->pid,
+ __entry->cpu,
+ __entry->load_avg,
+ (u64)__entry->load_sum)
+);
+
+/*
+ * Tracepoint for accounting ontime load averages for tasks.
+ */
+TRACE_EVENT(ehmp_ontime_load_avg_task,
+
+ TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag),
+
+ TP_ARGS(tsk, avg, ontime_flag),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( unsigned long, load_avg )
+ __field( u64, load_sum )
+ __field( int, ontime_flag )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = task_cpu(tsk);
+ __entry->load_avg = avg->load_avg;
+ __entry->load_sum = avg->load_sum;
+ __entry->ontime_flag = ontime_flag;
+ ),
+ TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu ontime_flag=%d",
+ __entry->comm, __entry->pid, __entry->cpu, __entry->load_avg,
+ (u64)__entry->load_sum, __entry->ontime_flag)
+);
+
+TRACE_EVENT(ehmp_ontime_check_migrate,
+
+ TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label),
+
+ TP_ARGS(tsk, cpu, migrate, label),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( int, migrate )
+ __array( char, label, 64 )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = cpu;
+ __entry->migrate = migrate;
+ strncpy(__entry->label, label, 64);
+ ),
+
+ TP_printk("comm=%s pid=%d target_cpu=%d migrate=%d reason=%s",
+ __entry->comm, __entry->pid, __entry->cpu,
+ __entry->migrate, __entry->label)
+);
+
+TRACE_EVENT(ehmp_ontime_task_wakeup,
+
+ TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label),
+
+ TP_ARGS(tsk, src_cpu, dst_cpu, label),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, src_cpu )
+ __field( int, dst_cpu )
+ __array( char, label, 64 )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->src_cpu = src_cpu;
+ __entry->dst_cpu = dst_cpu;
+ strncpy(__entry->label, label, 64);
+ ),
+
+ TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d reason=%s",
+ __entry->comm, __entry->pid, __entry->src_cpu,
+ __entry->dst_cpu, __entry->label)
+);
+
+TRACE_EVENT(ehmp_lbt_overutilized,
+
+ TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized),
+
+ TP_ARGS(cpu, level, util, capacity, overutilized),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( int, level )
+ __field( unsigned long, util )
+ __field( unsigned long, capacity )
+ __field( bool, overutilized )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->level = level;
+ __entry->util = util;
+ __entry->capacity = capacity;
+ __entry->overutilized = overutilized;
+ ),
+
+ TP_printk("cpu=%d level=%d util=%lu capacity=%lu overutilized=%d",
+ __entry->cpu, __entry->level, __entry->util,
+ __entry->capacity, __entry->overutilized)
+);
+
+#endif /* _TRACE_EMS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
If unsure, say N.
-config SCHED_EHMP
- bool "Exynos scheduler for Heterogeneous Multi-Processor"
+config SCHED_EMS
+ bool "Exynos Mobile Scheduler"
depends on SMP
help
- This option supports Exynos scheduler for HMP architecture. It is
- designed to secure the limits of energy aware scheduler. This option
- provides features such as independent boosting functinos such as
- global boost and on-time migration, and prefer_perf and enhanced
- prefer_idle that work in conjunction with SCHEDTUNE.
+ This option supports Exynos mobile scheduler. It is designed to
+ secure the limits of energy aware scheduler. This option provides
+ features such as independent boosting functinos such as on-time migration,
+ and prefer_perf and enhanced prefer_idle that work in conjunction with
+ SCHEDTUNE.
If unsure, say N.
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_MEMBARRIER) += membarrier.o
obj-$(CONFIG_PSI) += psi.o
-obj-$(CONFIG_SCHED_EHMP) += ems/
+obj-$(CONFIG_SCHED_EMS) += ems/
-obj-$(CONFIG_SCHED_EHMP) += ehmp.o
+obj-$(CONFIG_SCHED_EMS) += ehmp.o
obj-$(CONFIG_FREQVAR_TUNE) += freqvar_tune.o
#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
-#include <linux/ehmp.h>
+#include <linux/ems.h>
#include <linux/sched_energy.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/ehmp.h>
+#include <trace/events/ems.h>
#include "../sched.h"
#include "../tune.h"
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#include <linux/mempolicy.h>
#include <linux/migrate.h>
#include <linux/task_work.h>
-#include <linux/ehmp.h>
+#include <linux/ems.h>
#include <trace/events/sched.h>
long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
- if (sched_feat(EXYNOS_HMP)) {
+ if (sched_feat(EXYNOS_MS)) {
exynos_init_entity_util_avg(se);
goto util_init_done;
}
rcu_read_lock();
sd = rcu_dereference(rq->sd);
if (sd && !sd_overutilized(sd)) {
- if (sched_feat(EXYNOS_HMP))
+ if (sched_feat(EXYNOS_MS))
overutilized = lbt_overutilized(rq->cpu, sd->level);
else
overutilized = cpu_overutilized(rq->cpu);
* after moving, previous cpu/cluster can be powered down,
* so it should be consider it when idle power was calculated.
*/
- if (sched_feat(EXYNOS_HMP)) {
+ if (sched_feat(EXYNOS_MS)) {
new_state = exynos_estimate_idle_state(cpu_idx, sched_group_span(sg),
max_idle_state_idx, sg->group_weight);
if (new_state)
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
- if (sched_feat(EXYNOS_HMP)) {
+ if (sched_feat(EXYNOS_MS)) {
idlest = exynos_fit_idlest_group(sd, p);
if (idlest)
return idlest;
*overload = 1;
}
-
if (sched_feat(EXYNOS_MS)) {
if (lbt_overutilized(i, env->sd->level)) {
*overutilized = true;
return 1;
}
- if (sched_feat(EXYNOS_HMP))
+ if (sched_feat(EXYNOS_MS))
return exynos_need_active_balance(env->idle, sd, env->src_cpu, env->dst_cpu);
/*
SCHED_FEAT(FIND_BEST_TARGET, true)
SCHED_FEAT(FBT_STRICT_ORDER, true)
-#ifdef CONFIG_SCHED_EHMP
-SCHED_FEAT(EXYNOS_HMP, true)
-#else
-SCHED_FEAT(EXYNOS_HMP, false)
-#endif
+SCHED_FEAT(EXYNOS_MS, true)
+
/*
* Apply schedtune boost hold to tasks of all sched classes.
* If enabled, schedtune will hold the boost applied to a CPU
u64 cum_window_demand;
#endif /* CONFIG_SCHED_WALT */
-#ifdef CONFIG_SCHED_EHMP
+#ifdef CONFIG_SCHED_EMS
bool ontime_migrating;
#endif
#include <linux/printk.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
-#include <linux/ehmp.h>
+#include <linux/ems.h>
#include <trace/events/sched.h>
return prefer_idle;
}
-#ifdef CONFIG_SCHED_EHMP
+#ifdef CONFIG_SCHED_EMS
static atomic_t kernel_prefer_perf_req[BOOSTGROUPS_COUNT];
int kernel_prefer_perf(int grp_idx)
{