};
#ifdef CONFIG_SCHED_EHMP
+extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
+ struct task_struct *p);
extern void exynos_init_entity_util_avg(struct sched_entity *se);
extern int exynos_need_active_balance(enum cpu_idle_type idle,
struct sched_domain *sd, int src_cpu, int dst_cpu);
extern void request_kernel_prefer_perf(int grp_idx, int enable);
#else
+static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
+ struct task_struct *p) { return NULL; }
static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
static inline int exynos_need_active_balance(enum cpu_idle_type idle,
struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
#include "sched.h"
#include "tune.h"
+/**********************************************************************
+ * extern functions *
+ **********************************************************************/
+extern struct sched_entity *__pick_next_entity(struct sched_entity *se);
+extern unsigned long boosted_task_util(struct task_struct *task);
+extern unsigned long capacity_curr_of(int cpu);
+extern int find_best_target(struct task_struct *p, int *backup_cpu,
+ bool boosted, bool prefer_idle);
+extern u64 decay_load(u64 val, u64 n);
+extern int start_cpu(bool boosted);
+
static unsigned long task_util(struct task_struct *p)
{
return p->se.avg.util_avg;
return container_of(sa, struct sched_entity, avg);
}
+static inline int task_fits(struct task_struct *p, long capacity)
+{
+ return capacity * 1024 > boosted_task_util(p) * 1248;
+}
+
#define entity_is_cfs_rq(se) (se->my_q)
#define entity_is_task(se) (!se->my_q)
#define LOAD_AVG_MAX 47742
#define lb_sd_parent(sd) \
(sd->parent && sd->parent->groups != sd->parent->groups->next)
+struct sched_group *
+exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p)
+{
+ struct sched_group *group = sd->groups;
+ struct sched_group *fit_group = NULL;
+ unsigned long fit_capacity = ULONG_MAX;
+
+ do {
+ int i;
+
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpumask_intersects(sched_group_span(group),
+ &p->cpus_allowed))
+ continue;
+
+ for_each_cpu(i, sched_group_span(group)) {
+ if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) {
+ fit_capacity = capacity_of(i);
+ fit_group = group;
+ }
+ }
+ } while (group = group->next, group != sd->groups);
+
+ return fit_group;
+}
+
static inline int
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
{
#define TASK_TRACK_COUNT 5
-extern struct sched_entity *__pick_next_entity(struct sched_entity *se);
static struct task_struct *
ontime_pick_heavy_task(struct sched_entity *se, struct cpumask *dst_cpus,
int *boost_migration)
#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-extern u64 decay_load(u64 val, u64 n);
-
static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
{
u32 c1, c2, c3 = d3;
/**********************************************************************
* cpu selection *
**********************************************************************/
-extern unsigned long boosted_task_util(struct task_struct *task);
-extern unsigned long capacity_curr_of(int cpu);
-extern int find_best_target(struct task_struct *p, int *backup_cpu,
- bool boosted, bool prefer_idle);
-extern int start_cpu(bool boosted);
-
#define EAS_CPU_PRV 0
#define EAS_CPU_NXT 1
#define EAS_CPU_BKP 2