};
#ifdef CONFIG_SCHED_EMS
-extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
- struct task_struct *p);
extern void exynos_init_entity_util_avg(struct sched_entity *se);
extern int exynos_need_active_balance(enum cpu_idle_type idle,
struct sched_domain *sd, int src_cpu, int dst_cpu);
extern void request_kernel_prefer_perf(int grp_idx, int enable);
#else
-static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
- struct task_struct *p) { return NULL; }
static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
static inline int exynos_need_active_balance(enum cpu_idle_type idle,
struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
return (util >= capacity) ? capacity : util;
}
-static inline int task_fits(struct task_struct *p, long capacity)
-{
- return capacity * 1024 > task_util(p) * 1248;
-}
-
-struct sched_group *
-exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p)
-{
- struct sched_group *group = sd->groups;
- struct sched_group *fit_group = NULL;
- unsigned long fit_capacity = ULONG_MAX;
-
- do {
- int i;
-
- /* Skip over this group if it has no CPUs allowed */
- if (!cpumask_intersects(sched_group_span(group),
- &p->cpus_allowed))
- continue;
-
- for_each_cpu(i, sched_group_span(group)) {
- if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) {
- fit_capacity = capacity_of(i);
- fit_group = group;
- }
- }
- } while (group = group->next, group != sd->groups);
-
- return fit_group;
-}
-
static inline int
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
{
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
- if (sched_feat(EXYNOS_MS)) {
- idlest = exynos_fit_idlest_group(sd, p);
- if (idlest)
- return idlest;
- }
-
if (sd_flag & SD_BALANCE_WAKE)
load_idx = sd->wake_idx;