};
#ifdef CONFIG_SCHED_EMS
+/* task util initialization */
extern void exynos_init_entity_util_avg(struct sched_entity *se);
+
+/* active balance */
extern int exynos_need_active_balance(enum cpu_idle_type idle,
- struct sched_domain *sd, int src_cpu, int dst_cpu);
+ struct sched_domain *sd, int src_cpu, int dst_cpu);
+/* wakeup balance */
extern int
exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int sync);
+/* ontime migration */
extern void ontime_migration(void);
extern int ontime_can_migration(struct task_struct *p, int cpu);
-extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
- struct sched_avg *sa);
-extern void ontime_new_entity_load(struct task_struct *parent,
- struct sched_entity *se);
+extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, struct sched_avg *sa);
+extern void ontime_new_entity_load(struct task_struct *parent, struct sched_entity *se);
extern void ontime_trace_task_info(struct task_struct *p);
+/* load balance trigger */
extern bool lbt_overutilized(int cpu, int level);
extern void update_lbt_overutil(int cpu, unsigned long capacity);
+/* global boost */
extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
+/* prefer perf */
extern void request_kernel_prefer_perf(int grp_idx, int enable);
/* task band */
extern int band_playing(struct task_struct *p, int cpu);
#else
static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
+
static inline int exynos_need_active_balance(enum cpu_idle_type idle,
- struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
+ struct sched_domain *sd, int src_cpu, int dst_cpu)
+{
+ return 0;
+}
static inline int
exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int sync)
}
static inline void ontime_migration(void) { }
-static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; }
-static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
- struct sched_avg *sa) { }
-static inline void ontime_new_entity_load(struct task_struct *p,
- struct sched_entity *se) { }
+static inline int ontime_can_migration(struct task_struct *p, int cpu)
+{
+ return 1;
+}
+static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, struct sched_avg *sa) { }
+static inline void ontime_new_entity_load(struct task_struct *p, struct sched_entity *se) { }
static inline void ontime_trace_task_info(struct task_struct *p) { }
-static inline bool lbt_overutilized(int cpu, int level) { return false; }
+static inline bool lbt_overutilized(int cpu, int level)
+{
+ return false;
+}
static inline void update_lbt_overutil(int cpu, unsigned long capacity) { }
static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }