From: Park Bumgyu Date: Thu, 22 Mar 2018 05:43:33 +0000 (+0900) Subject: sched: change exynos scheduler name from EHMP to EMS X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=196711ecb908db3d51ef5f6cfbfeb0b397f6598c;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git sched: change exynos scheduler name from EHMP to EMS Rename Exynos scheduler. Existing EHMP(Exynos HMP) was a scheduler considering only HMP chipset. EMS(Exynos Mobile Scheduler) will support all chipsets regardless of cluster and core configuration. Change-Id: I2802ddcd9e401a0d92f9c98656b5e591d429d6ce Signed-off-by: Park Bumgyu --- diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 8c409edac140..efe920aa0df9 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -190,7 +190,6 @@ config CPU_FREQ_GOV_SCHEDUTIL depends on CPU_FREQ && SMP select CPU_FREQ_GOV_ATTR_SET select IRQ_WORK - select FREQVAR_TUNE help This governor makes decisions based on the utilization data provided by the scheduler. It sets the CPU frequency to be proportional to @@ -205,7 +204,7 @@ config CPU_FREQ_GOV_SCHEDUTIL config FREQVAR_TUNE bool "CPU frequency variant tuner" - depends on CPU_FREQ_GOV_SCHEDUTIL + depends on SCHED_EMS && CPU_FREQ_GOV_SCHEDUTIL help This option provides the controller which tunes system performance as frequency variant. diff --git a/include/linux/ehmp.h b/include/linux/ehmp.h deleted file mode 100644 index 5633347a4f37..000000000000 --- a/include/linux/ehmp.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2017 Samsung Electronics Co., Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include - -#ifdef CONFIG_SCHED_TUNE -enum stune_group { - STUNE_ROOT, - STUNE_FOREGROUND, - STUNE_BACKGROUND, - STUNE_TOPAPP, - STUNE_GROUP_COUNT, -}; -#endif - -struct gb_qos_request { - struct plist_node node; - char *name; - bool active; -}; - -#ifdef CONFIG_SCHED_EHMP -extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, - int state, int cpus); -extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, - struct task_struct *p); -extern void exynos_init_entity_util_avg(struct sched_entity *se); -extern int exynos_need_active_balance(enum cpu_idle_type idle, - struct sched_domain *sd, int src_cpu, int dst_cpu); - -extern unsigned long global_boost(void); -extern int find_second_max_cap(void); - -extern int exynos_select_cpu(struct task_struct *p, int *backup_cpu, - bool boosted, bool prefer_idle); - -extern void ontime_migration(void); -extern int ontime_can_migration(struct task_struct *p, int cpu); -extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, - struct sched_avg *sa); -extern void ontime_new_entity_load(struct task_struct *parent, - struct sched_entity *se); -extern void ontime_trace_task_info(struct task_struct *p); -extern void ehmp_update_max_cpu_capacity(int cpu, unsigned long val); - -extern bool lbt_overutilized(int cpu, int level); -extern void update_lbt_overutil(int cpu, unsigned long capacity); - -extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value); - -extern void request_kernel_prefer_perf(int grp_idx, int enable); -#else -static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, - int state, int cpus) { return 0; } -static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, - struct task_struct *p) { return NULL; } -static inline void exynos_init_entity_util_avg(struct sched_entity *se) { } -static inline int exynos_need_active_balance(enum cpu_idle_type idle, - struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; } - -static inline unsigned long global_boost(void) { return 0; } -static inline int find_second_max_cap(void) { return -EINVAL; } - -static inline int exynos_select_cpu(struct task_struct *p, int prev_cpu, - int sync, int sd_flag) { return -EINVAL; } - -static inline void ontime_migration(void) { } -static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; } -static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, - struct sched_avg *sa) { } -static inline void ontime_new_entity_load(struct task_struct *p, - struct sched_entity *se) { } -static inline void ontime_trace_task_info(struct task_struct *p) { } - -static inline void ehmp_update_max_cpu_capacity(int cpu, unsigned long val) { } - -static inline bool lbt_overutilized(int cpu, int level) { return false; } -static inline void update_lbt_overutil(int cpu, unsigned long capacity) { } - -static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { } - -//extern void request_kernel_prefer_perf(int grp_idx, int enable) { } -#endif /* CONFIG_SCHED_EHMP */ diff --git a/include/linux/ems.h b/include/linux/ems.h new file mode 100644 index 000000000000..d019bbec826a --- /dev/null +++ b/include/linux/ems.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2017 Samsung Electronics Co., Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#ifdef CONFIG_SCHED_TUNE +enum stune_group { + STUNE_ROOT, + STUNE_FOREGROUND, + STUNE_BACKGROUND, + STUNE_TOPAPP, + STUNE_GROUP_COUNT, +}; +#endif + +struct gb_qos_request { + struct plist_node node; + char *name; + bool active; +}; + +#ifdef CONFIG_SCHED_EMS +extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, + int state, int cpus); +extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, + struct task_struct *p); +extern void exynos_init_entity_util_avg(struct sched_entity *se); +extern int exynos_need_active_balance(enum cpu_idle_type idle, + struct sched_domain *sd, int src_cpu, int dst_cpu); + +extern unsigned long global_boost(void); +extern int find_second_max_cap(void); + +extern int exynos_select_cpu(struct task_struct *p, int *backup_cpu, + bool boosted, bool prefer_idle); + +extern void ontime_migration(void); +extern int ontime_can_migration(struct task_struct *p, int cpu); +extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, + struct sched_avg *sa); +extern void ontime_new_entity_load(struct task_struct *parent, + struct sched_entity *se); +extern void ontime_trace_task_info(struct task_struct *p); +extern void ehmp_update_max_cpu_capacity(int cpu, unsigned long val); + +extern bool lbt_overutilized(int cpu, int level); +extern void update_lbt_overutil(int cpu, unsigned long capacity); + +extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value); + +extern void request_kernel_prefer_perf(int grp_idx, int enable); +#else +static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, + int state, int cpus) { return 0; } +static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, + struct task_struct *p) { return NULL; } +static inline void exynos_init_entity_util_avg(struct sched_entity *se) { } +static inline int exynos_need_active_balance(enum cpu_idle_type idle, + struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; } + +static inline unsigned long global_boost(void) { return 0; } +static inline int find_second_max_cap(void) { return -EINVAL; } + +static inline int exynos_select_cpu(struct task_struct *p, int *backup_cpu, + bool boosted, bool prefer_idle) { return -EINVAL; } + +static inline void ontime_migration(void) { } +static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; } +static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, + struct sched_avg *sa) { } +static inline void ontime_new_entity_load(struct task_struct *p, + struct sched_entity *se) { } +static inline void ontime_trace_task_info(struct task_struct *p) { } + +static inline void ehmp_update_max_cpu_capacity(int cpu, unsigned long val) { } + +static inline bool lbt_overutilized(int cpu, int level) { return false; } +static inline void update_lbt_overutil(int cpu, unsigned long capacity) { } + +static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { } + +static inline void request_kernel_prefer_perf(int grp_idx, int enable) { } +#endif /* CONFIG_SCHED_EMS */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 03caa8b1cd4f..7d11d7455566 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -347,7 +347,6 @@ struct sched_avg { unsigned long util_avg; }; -#ifdef CONFIG_SCHED_EHMP #define NOT_ONTIME 1 #define ONTIME_MIGRATING 2 #define ONTIME 4 @@ -366,7 +365,6 @@ struct ontime_entity { int flags; int cpu; }; -#endif struct sched_statistics { #ifdef CONFIG_SCHEDSTATS @@ -438,9 +436,7 @@ struct sched_entity { */ struct sched_avg avg ____cacheline_aligned_in_smp; #endif -#ifdef CONFIG_SCHED_EHMP struct ontime_entity ontime; -#endif }; #ifdef CONFIG_SCHED_WALT diff --git a/include/trace/events/ehmp.h b/include/trace/events/ehmp.h deleted file mode 100644 index 6c9878a9524e..000000000000 --- a/include/trace/events/ehmp.h +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Copyright (C) 2017 Park Bumgyu - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM ehmp - -#if !defined(_TRACE_EHMP_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_EHMP_H - -#include -#include - -/* - * Tracepoint for selection of boost cpu - */ -TRACE_EVENT(ehmp_select_boost_cpu, - - TP_PROTO(struct task_struct *p, int cpu, int trigger, char *state), - - TP_ARGS(p, cpu, trigger, state), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, cpu ) - __field( int, trigger ) - __array( char, state, 64 ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->cpu = cpu; - __entry->trigger = trigger; - memcpy(__entry->state, state, 64); - ), - - TP_printk("comm=%s pid=%d target_cpu=%d trigger=%d state=%s", - __entry->comm, __entry->pid, __entry->cpu, - __entry->trigger, __entry->state) -); - -/* - * Tracepoint for selection of group balancer - */ -TRACE_EVENT(ehmp_select_group_boost, - - TP_PROTO(struct task_struct *p, int cpu, char *state), - - TP_ARGS(p, cpu, state), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, cpu ) - __array( char, state, 64 ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->cpu = cpu; - memcpy(__entry->state, state, 64); - ), - - TP_printk("comm=%s pid=%d target_cpu=%d state=%s", - __entry->comm, __entry->pid, __entry->cpu, __entry->state) -); - -TRACE_EVENT(ehmp_global_boost, - - TP_PROTO(char *name, unsigned long boost), - - TP_ARGS(name, boost), - - TP_STRUCT__entry( - __array( char, name, 64 ) - __field( unsigned long, boost ) - ), - - TP_fast_assign( - memcpy(__entry->name, name, 64); - __entry->boost = boost; - ), - - TP_printk("name=%s global_boost_value=%ld", __entry->name, __entry->boost) -); - -/* - * Tracepoint for prefer idle - */ -TRACE_EVENT(ehmp_prefer_idle, - - TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu, - unsigned long task_util, unsigned long new_util, int idle), - - TP_ARGS(p, orig_cpu, target_cpu, task_util, new_util, idle), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, orig_cpu ) - __field( int, target_cpu ) - __field( unsigned long, task_util ) - __field( unsigned long, new_util ) - __field( int, idle ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->orig_cpu = orig_cpu; - __entry->target_cpu = target_cpu; - __entry->task_util = task_util; - __entry->new_util = new_util; - __entry->idle = idle; - ), - - TP_printk("comm=%s pid=%d orig_cpu=%d target_cpu=%d task_util=%lu new_util=%lu idle=%d", - __entry->comm, __entry->pid, __entry->orig_cpu, __entry->target_cpu, - __entry->task_util, __entry->new_util, __entry->idle) -); - -TRACE_EVENT(ehmp_prefer_idle_cpu_select, - - TP_PROTO(struct task_struct *p, int cpu), - - TP_ARGS(p, cpu), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, cpu ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->cpu = cpu; - ), - - TP_printk("comm=%s pid=%d target_cpu=%d", - __entry->comm, __entry->pid, __entry->cpu) -); - -/* - * Tracepoint for cpu selection - */ -TRACE_EVENT(ehmp_find_best_target_stat, - - TP_PROTO(int cpu, unsigned long cap, unsigned long util, unsigned long target_util), - - TP_ARGS(cpu, cap, util, target_util), - - TP_STRUCT__entry( - __field( int, cpu ) - __field( unsigned long, cap ) - __field( unsigned long, util ) - __field( unsigned long, target_util ) - ), - - TP_fast_assign( - __entry->cpu = cpu; - __entry->cap = cap; - __entry->util = util; - __entry->target_util = target_util; - ), - - TP_printk("find_best : [cpu%d] capacity %lu, util %lu, target_util %lu\n", - __entry->cpu, __entry->cap, __entry->util, __entry->target_util) -); - -TRACE_EVENT(ehmp_find_best_target_candi, - - TP_PROTO(unsigned int cpu), - - TP_ARGS(cpu), - - TP_STRUCT__entry( - __field( unsigned int, cpu ) - ), - - TP_fast_assign( - __entry->cpu = cpu; - ), - - TP_printk("find_best: energy candidate cpu %d\n", __entry->cpu) -); - -TRACE_EVENT(ehmp_find_best_target_cpu, - - TP_PROTO(unsigned int cpu, unsigned long target_util), - - TP_ARGS(cpu, target_util), - - TP_STRUCT__entry( - __field( unsigned int, cpu ) - __field( unsigned long, target_util ) - ), - - TP_fast_assign( - __entry->cpu = cpu; - __entry->target_util = target_util; - ), - - TP_printk("find_best: target_cpu %d, target_util %lu\n", __entry->cpu, __entry->target_util) -); - -/* - * Tracepoint for ontime migration - */ -TRACE_EVENT(ehmp_ontime_migration, - - TP_PROTO(struct task_struct *p, unsigned long load, - int src_cpu, int dst_cpu, int boost_migration), - - TP_ARGS(p, load, src_cpu, dst_cpu, boost_migration), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( unsigned long, load ) - __field( int, src_cpu ) - __field( int, dst_cpu ) - __field( int, bm ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->load = load; - __entry->src_cpu = src_cpu; - __entry->dst_cpu = dst_cpu; - __entry->bm = boost_migration; - ), - - TP_printk("comm=%s pid=%d ontime_load_avg=%lu src_cpu=%d dst_cpu=%d boost_migration=%d", - __entry->comm, __entry->pid, __entry->load, - __entry->src_cpu, __entry->dst_cpu, __entry->bm) -); - -/* - * Tracepoint for accounting ontime load averages for tasks. - */ -TRACE_EVENT(ehmp_ontime_new_entity_load, - - TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg), - - TP_ARGS(tsk, avg), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, cpu ) - __field( unsigned long, load_avg ) - __field( u64, load_sum ) - ), - - TP_fast_assign( - memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); - __entry->pid = tsk->pid; - __entry->cpu = task_cpu(tsk); - __entry->load_avg = avg->load_avg; - __entry->load_sum = avg->load_sum; - ), - TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu", - __entry->comm, - __entry->pid, - __entry->cpu, - __entry->load_avg, - (u64)__entry->load_sum) -); - -/* - * Tracepoint for accounting ontime load averages for tasks. - */ -TRACE_EVENT(ehmp_ontime_load_avg_task, - - TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag), - - TP_ARGS(tsk, avg, ontime_flag), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, cpu ) - __field( unsigned long, load_avg ) - __field( u64, load_sum ) - __field( int, ontime_flag ) - ), - - TP_fast_assign( - memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); - __entry->pid = tsk->pid; - __entry->cpu = task_cpu(tsk); - __entry->load_avg = avg->load_avg; - __entry->load_sum = avg->load_sum; - __entry->ontime_flag = ontime_flag; - ), - TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu ontime_flag=%d", - __entry->comm, __entry->pid, __entry->cpu, __entry->load_avg, - (u64)__entry->load_sum, __entry->ontime_flag) -); - -TRACE_EVENT(ehmp_ontime_check_migrate, - - TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label), - - TP_ARGS(tsk, cpu, migrate, label), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, cpu ) - __field( int, migrate ) - __array( char, label, 64 ) - ), - - TP_fast_assign( - memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); - __entry->pid = tsk->pid; - __entry->cpu = cpu; - __entry->migrate = migrate; - strncpy(__entry->label, label, 64); - ), - - TP_printk("comm=%s pid=%d target_cpu=%d migrate=%d reason=%s", - __entry->comm, __entry->pid, __entry->cpu, - __entry->migrate, __entry->label) -); - -TRACE_EVENT(ehmp_ontime_task_wakeup, - - TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label), - - TP_ARGS(tsk, src_cpu, dst_cpu, label), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, src_cpu ) - __field( int, dst_cpu ) - __array( char, label, 64 ) - ), - - TP_fast_assign( - memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); - __entry->pid = tsk->pid; - __entry->src_cpu = src_cpu; - __entry->dst_cpu = dst_cpu; - strncpy(__entry->label, label, 64); - ), - - TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d reason=%s", - __entry->comm, __entry->pid, __entry->src_cpu, - __entry->dst_cpu, __entry->label) -); - -TRACE_EVENT(ehmp_lbt_overutilized, - - TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized), - - TP_ARGS(cpu, level, util, capacity, overutilized), - - TP_STRUCT__entry( - __field( int, cpu ) - __field( int, level ) - __field( unsigned long, util ) - __field( unsigned long, capacity ) - __field( bool, overutilized ) - ), - - TP_fast_assign( - __entry->cpu = cpu; - __entry->level = level; - __entry->util = util; - __entry->capacity = capacity; - __entry->overutilized = overutilized; - ), - - TP_printk("cpu=%d level=%d util=%lu capacity=%lu overutilized=%d", - __entry->cpu, __entry->level, __entry->util, - __entry->capacity, __entry->overutilized) -); - -#endif /* _TRACE_EHMP_H */ - -/* This part must be outside protection */ -#include diff --git a/include/trace/events/ems.h b/include/trace/events/ems.h new file mode 100644 index 000000000000..49598ef75624 --- /dev/null +++ b/include/trace/events/ems.h @@ -0,0 +1,394 @@ +/* + * Copyright (C) 2017 Park Bumgyu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ems + +#if !defined(_TRACE_EMS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EMS_H + +#include +#include + +/* + * Tracepoint for selection of boost cpu + */ +TRACE_EVENT(ehmp_select_boost_cpu, + + TP_PROTO(struct task_struct *p, int cpu, int trigger, char *state), + + TP_ARGS(p, cpu, trigger, state), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + __field( int, trigger ) + __array( char, state, 64 ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->cpu = cpu; + __entry->trigger = trigger; + memcpy(__entry->state, state, 64); + ), + + TP_printk("comm=%s pid=%d target_cpu=%d trigger=%d state=%s", + __entry->comm, __entry->pid, __entry->cpu, + __entry->trigger, __entry->state) +); + +/* + * Tracepoint for selection of group balancer + */ +TRACE_EVENT(ehmp_select_group_boost, + + TP_PROTO(struct task_struct *p, int cpu, char *state), + + TP_ARGS(p, cpu, state), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + __array( char, state, 64 ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->cpu = cpu; + memcpy(__entry->state, state, 64); + ), + + TP_printk("comm=%s pid=%d target_cpu=%d state=%s", + __entry->comm, __entry->pid, __entry->cpu, __entry->state) +); + +TRACE_EVENT(ehmp_global_boost, + + TP_PROTO(char *name, unsigned long boost), + + TP_ARGS(name, boost), + + TP_STRUCT__entry( + __array( char, name, 64 ) + __field( unsigned long, boost ) + ), + + TP_fast_assign( + memcpy(__entry->name, name, 64); + __entry->boost = boost; + ), + + TP_printk("name=%s global_boost_value=%ld", __entry->name, __entry->boost) +); + +/* + * Tracepoint for prefer idle + */ +TRACE_EVENT(ehmp_prefer_idle, + + TP_PROTO(struct task_struct *p, int orig_cpu, int target_cpu, + unsigned long task_util, unsigned long new_util, int idle), + + TP_ARGS(p, orig_cpu, target_cpu, task_util, new_util, idle), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, orig_cpu ) + __field( int, target_cpu ) + __field( unsigned long, task_util ) + __field( unsigned long, new_util ) + __field( int, idle ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->orig_cpu = orig_cpu; + __entry->target_cpu = target_cpu; + __entry->task_util = task_util; + __entry->new_util = new_util; + __entry->idle = idle; + ), + + TP_printk("comm=%s pid=%d orig_cpu=%d target_cpu=%d task_util=%lu new_util=%lu idle=%d", + __entry->comm, __entry->pid, __entry->orig_cpu, __entry->target_cpu, + __entry->task_util, __entry->new_util, __entry->idle) +); + +TRACE_EVENT(ehmp_prefer_idle_cpu_select, + + TP_PROTO(struct task_struct *p, int cpu), + + TP_ARGS(p, cpu), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->cpu = cpu; + ), + + TP_printk("comm=%s pid=%d target_cpu=%d", + __entry->comm, __entry->pid, __entry->cpu) +); + +/* + * Tracepoint for cpu selection + */ +TRACE_EVENT(ehmp_find_best_target_stat, + + TP_PROTO(int cpu, unsigned long cap, unsigned long util, unsigned long target_util), + + TP_ARGS(cpu, cap, util, target_util), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( unsigned long, cap ) + __field( unsigned long, util ) + __field( unsigned long, target_util ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->cap = cap; + __entry->util = util; + __entry->target_util = target_util; + ), + + TP_printk("find_best : [cpu%d] capacity %lu, util %lu, target_util %lu\n", + __entry->cpu, __entry->cap, __entry->util, __entry->target_util) +); + +TRACE_EVENT(ehmp_find_best_target_candi, + + TP_PROTO(unsigned int cpu), + + TP_ARGS(cpu), + + TP_STRUCT__entry( + __field( unsigned int, cpu ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + ), + + TP_printk("find_best: energy candidate cpu %d\n", __entry->cpu) +); + +TRACE_EVENT(ehmp_find_best_target_cpu, + + TP_PROTO(unsigned int cpu, unsigned long target_util), + + TP_ARGS(cpu, target_util), + + TP_STRUCT__entry( + __field( unsigned int, cpu ) + __field( unsigned long, target_util ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->target_util = target_util; + ), + + TP_printk("find_best: target_cpu %d, target_util %lu\n", __entry->cpu, __entry->target_util) +); + +/* + * Tracepoint for ontime migration + */ +TRACE_EVENT(ehmp_ontime_migration, + + TP_PROTO(struct task_struct *p, unsigned long load, + int src_cpu, int dst_cpu, int boost_migration), + + TP_ARGS(p, load, src_cpu, dst_cpu, boost_migration), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( unsigned long, load ) + __field( int, src_cpu ) + __field( int, dst_cpu ) + __field( int, bm ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->load = load; + __entry->src_cpu = src_cpu; + __entry->dst_cpu = dst_cpu; + __entry->bm = boost_migration; + ), + + TP_printk("comm=%s pid=%d ontime_load_avg=%lu src_cpu=%d dst_cpu=%d boost_migration=%d", + __entry->comm, __entry->pid, __entry->load, + __entry->src_cpu, __entry->dst_cpu, __entry->bm) +); + +/* + * Tracepoint for accounting ontime load averages for tasks. + */ +TRACE_EVENT(ehmp_ontime_new_entity_load, + + TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg), + + TP_ARGS(tsk, avg), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + __field( unsigned long, load_avg ) + __field( u64, load_sum ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->cpu = task_cpu(tsk); + __entry->load_avg = avg->load_avg; + __entry->load_sum = avg->load_sum; + ), + TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu", + __entry->comm, + __entry->pid, + __entry->cpu, + __entry->load_avg, + (u64)__entry->load_sum) +); + +/* + * Tracepoint for accounting ontime load averages for tasks. + */ +TRACE_EVENT(ehmp_ontime_load_avg_task, + + TP_PROTO(struct task_struct *tsk, struct ontime_avg *avg, int ontime_flag), + + TP_ARGS(tsk, avg, ontime_flag), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + __field( unsigned long, load_avg ) + __field( u64, load_sum ) + __field( int, ontime_flag ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->cpu = task_cpu(tsk); + __entry->load_avg = avg->load_avg; + __entry->load_sum = avg->load_sum; + __entry->ontime_flag = ontime_flag; + ), + TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu load_sum=%llu ontime_flag=%d", + __entry->comm, __entry->pid, __entry->cpu, __entry->load_avg, + (u64)__entry->load_sum, __entry->ontime_flag) +); + +TRACE_EVENT(ehmp_ontime_check_migrate, + + TP_PROTO(struct task_struct *tsk, int cpu, int migrate, char *label), + + TP_ARGS(tsk, cpu, migrate, label), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + __field( int, migrate ) + __array( char, label, 64 ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->cpu = cpu; + __entry->migrate = migrate; + strncpy(__entry->label, label, 64); + ), + + TP_printk("comm=%s pid=%d target_cpu=%d migrate=%d reason=%s", + __entry->comm, __entry->pid, __entry->cpu, + __entry->migrate, __entry->label) +); + +TRACE_EVENT(ehmp_ontime_task_wakeup, + + TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu, char *label), + + TP_ARGS(tsk, src_cpu, dst_cpu, label), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, src_cpu ) + __field( int, dst_cpu ) + __array( char, label, 64 ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->src_cpu = src_cpu; + __entry->dst_cpu = dst_cpu; + strncpy(__entry->label, label, 64); + ), + + TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d reason=%s", + __entry->comm, __entry->pid, __entry->src_cpu, + __entry->dst_cpu, __entry->label) +); + +TRACE_EVENT(ehmp_lbt_overutilized, + + TP_PROTO(int cpu, int level, unsigned long util, unsigned long capacity, bool overutilized), + + TP_ARGS(cpu, level, util, capacity, overutilized), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( int, level ) + __field( unsigned long, util ) + __field( unsigned long, capacity ) + __field( bool, overutilized ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->level = level; + __entry->util = util; + __entry->capacity = capacity; + __entry->overutilized = overutilized; + ), + + TP_printk("cpu=%d level=%d util=%lu capacity=%lu overutilized=%d", + __entry->cpu, __entry->level, __entry->util, + __entry->capacity, __entry->overutilized) +); + +#endif /* _TRACE_EMS_H */ + +/* This part must be outside protection */ +#include diff --git a/init/Kconfig b/init/Kconfig index f22ccf30851a..7872c3630069 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -991,15 +991,15 @@ config SCHED_TUNE If unsure, say N. -config SCHED_EHMP - bool "Exynos scheduler for Heterogeneous Multi-Processor" +config SCHED_EMS + bool "Exynos Mobile Scheduler" depends on SMP help - This option supports Exynos scheduler for HMP architecture. It is - designed to secure the limits of energy aware scheduler. This option - provides features such as independent boosting functinos such as - global boost and on-time migration, and prefer_perf and enhanced - prefer_idle that work in conjunction with SCHEDTUNE. + This option supports Exynos mobile scheduler. It is designed to + secure the limits of energy aware scheduler. This option provides + features such as independent boosting functinos such as on-time migration, + and prefer_perf and enhanced prefer_idle that work in conjunction with + SCHEDTUNE. If unsure, say N. diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index f8b8b5152868..c4b9f40b147c 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -30,4 +30,4 @@ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_MEMBARRIER) += membarrier.o -obj-$(CONFIG_SCHED_EHMP) += ems/ +obj-$(CONFIG_SCHED_EMS) += ems/ diff --git a/kernel/sched/ems/Makefile b/kernel/sched/ems/Makefile index f2b1f7be7905..5a981b8e53c4 100644 --- a/kernel/sched/ems/Makefile +++ b/kernel/sched/ems/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_SCHED_EHMP) += ehmp.o +obj-$(CONFIG_SCHED_EMS) += ehmp.o obj-$(CONFIG_FREQVAR_TUNE) += freqvar_tune.o diff --git a/kernel/sched/ems/ehmp.c b/kernel/sched/ems/ehmp.c index 01f64b0628a6..7368f61a28f6 100644 --- a/kernel/sched/ems/ehmp.c +++ b/kernel/sched/ems/ehmp.c @@ -8,11 +8,11 @@ #include #include #include -#include +#include #include #define CREATE_TRACE_POINTS -#include +#include #include "../sched.h" #include "../tune.h" diff --git a/kernel/sched/ems/ems.h b/kernel/sched/ems/ems.h new file mode 100644 index 000000000000..1ad0eb098216 --- /dev/null +++ b/kernel/sched/ems/ems.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ec584ec4935c..98681cdc9878 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include @@ -796,7 +796,7 @@ void post_init_entity_util_avg(struct sched_entity *se) struct sched_avg *sa = &se->avg; long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2; - if (sched_feat(EXYNOS_HMP)) { + if (sched_feat(EXYNOS_MS)) { exynos_init_entity_util_avg(se); goto util_init_done; } @@ -4975,7 +4975,7 @@ static inline void update_overutilized_status(struct rq *rq) rcu_read_lock(); sd = rcu_dereference(rq->sd); if (sd && !sd_overutilized(sd)) { - if (sched_feat(EXYNOS_HMP)) + if (sched_feat(EXYNOS_MS)) overutilized = lbt_overutilized(rq->cpu, sd->level); else overutilized = cpu_overutilized(rq->cpu); @@ -5713,7 +5713,7 @@ static int group_idle_state(struct energy_env *eenv, int cpu_idx) * after moving, previous cpu/cluster can be powered down, * so it should be consider it when idle power was calculated. */ - if (sched_feat(EXYNOS_HMP)) { + if (sched_feat(EXYNOS_MS)) { new_state = exynos_estimate_idle_state(cpu_idx, sched_group_span(sg), max_idle_state_idx, sg->group_weight); if (new_state) @@ -6287,7 +6287,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; - if (sched_feat(EXYNOS_HMP)) { + if (sched_feat(EXYNOS_MS)) { idlest = exynos_fit_idlest_group(sd, p); if (idlest) return idlest; @@ -7337,7 +7337,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, eenv->max_cpu_count = EAS_CPU_BKP + 1; /* Find a cpu with sufficient capacity */ - if (sched_feat(EXYNOS_HMP)) { + if (sched_feat(EXYNOS_MS)) { eenv->cpu[EAS_CPU_NXT].cpu_id = exynos_select_cpu(p, &eenv->cpu[EAS_CPU_BKP].cpu_id, boosted, prefer_idle); @@ -9037,7 +9037,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, !sgs->group_misfit_task && rq_has_misfit(rq)) sgs->group_misfit_task = capacity_of(i); - if (sched_feat(EXYNOS_HMP)) { + if (sched_feat(EXYNOS_MS)) { if (lbt_overutilized(i, env->sd->level)) { *overutilized = true; @@ -9754,7 +9754,7 @@ static int need_active_balance(struct lb_env *env) return 1; } - if (sched_feat(EXYNOS_HMP)) + if (sched_feat(EXYNOS_MS)) return exynos_need_active_balance(env->idle, sd, env->src_cpu, env->dst_cpu); /* diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 524fdaee4932..738f5914a6b6 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -114,8 +114,4 @@ SCHED_FEAT(EAS_PREFER_IDLE, true) SCHED_FEAT(FIND_BEST_TARGET, true) SCHED_FEAT(FBT_STRICT_ORDER, true) -#ifdef CONFIG_SCHED_EHMP -SCHED_FEAT(EXYNOS_HMP, true) -#else -SCHED_FEAT(EXYNOS_HMP, false) -#endif +SCHED_FEAT(EXYNOS_MS, true) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0cab9df5f390..f41f94c00bc3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -841,7 +841,7 @@ struct rq { u64 cum_window_demand; #endif /* CONFIG_SCHED_WALT */ -#ifdef CONFIG_SCHED_EHMP +#ifdef CONFIG_SCHED_EMS bool ontime_migrating; #endif diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index 94d5a7140bd3..e9f212d59578 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include @@ -435,7 +435,7 @@ int schedtune_prefer_idle(struct task_struct *p) return prefer_idle; } -#ifdef CONFIG_SCHED_EHMP +#ifdef CONFIG_SCHED_EMS static atomic_t kernel_prefer_perf_req[BOOSTGROUPS_COUNT]; int kernel_prefer_perf(int grp_idx) {