return (util >= capacity) ? capacity : util;
}
+static inline int task_fits(struct task_struct *p, long capacity)
+{
+ return capacity * 1024 > task_util(p) * 1248;
+}
+
+struct sched_group *
+exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p)
+{
+ struct sched_group *group = sd->groups;
+ struct sched_group *fit_group = NULL;
+ unsigned long fit_capacity = ULONG_MAX;
+
+ do {
+ int i;
+
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpumask_intersects(sched_group_span(group),
+ &p->cpus_allowed))
+ continue;
+
+ for_each_cpu(i, sched_group_span(group)) {
+ if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) {
+ fit_capacity = capacity_of(i);
+ fit_group = group;
+ }
+ }
+ } while (group = group->next, group != sd->groups);
+
+ return fit_group;
+}
+
+static inline int
+check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
+{
+ return ((rq->cpu_capacity * sd->imbalance_pct) <
+ (rq->cpu_capacity_orig * 100));
+}
+
+#define lb_sd_parent(sd) \
+ (sd->parent && sd->parent->groups != sd->parent->groups->next)
+
+int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd,
+ int src_cpu, int dst_cpu)
+{
+ unsigned int src_imb_pct = lb_sd_parent(sd) ? sd->imbalance_pct : 1;
+ unsigned int dst_imb_pct = lb_sd_parent(sd) ? 100 : 1;
+ unsigned long src_cap = capacity_of(src_cpu);
+ unsigned long dst_cap = capacity_of(dst_cpu);
+ int level = sd->level;
+
+ /* dst_cpu is idle */
+ if ((idle != CPU_NOT_IDLE) &&
+ (cpu_rq(src_cpu)->cfs.h_nr_running == 1)) {
+ if ((check_cpu_capacity(cpu_rq(src_cpu), sd)) &&
+ (src_cap * sd->imbalance_pct < dst_cap * 100)) {
+ return 1;
+ }
+
+ /* This domain is top and dst_cpu is bigger than src_cpu*/
+ if (!lb_sd_parent(sd) && src_cap < dst_cap)
+ if (lbt_overutilized(src_cpu, level) || global_boosted())
+ return 1;
+ }
+
+ if ((src_cap * src_imb_pct < dst_cap * dst_imb_pct) &&
+ cpu_rq(src_cpu)->cfs.h_nr_running == 1 &&
+ lbt_overutilized(src_cpu, level) &&
+ !lbt_overutilized(dst_cpu, level)) {
+ return 1;
+ }
+
+ return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2);
+}
+
static int select_proper_cpu(struct task_struct *p)
{
return -1;
return estimate_state;
}
-/**********************************************************************
- * load balance *
- **********************************************************************/
-#define lb_sd_parent(sd) \
- (sd->parent && sd->parent->groups != sd->parent->groups->next)
-
-struct sched_group *
-exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p)
-{
- struct sched_group *group = sd->groups;
- struct sched_group *fit_group = NULL;
- unsigned long fit_capacity = ULONG_MAX;
-
- do {
- int i;
-
- /* Skip over this group if it has no CPUs allowed */
- if (!cpumask_intersects(sched_group_span(group),
- &p->cpus_allowed))
- continue;
-
- for_each_cpu(i, sched_group_span(group)) {
- if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) {
- fit_capacity = capacity_of(i);
- fit_group = group;
- }
- }
- } while (group = group->next, group != sd->groups);
-
- return fit_group;
-}
-
-static inline int
-check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
-{
- return ((rq->cpu_capacity * sd->imbalance_pct) <
- (rq->cpu_capacity_orig * 100));
-}
-
-int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd,
- int src_cpu, int dst_cpu)
-{
- unsigned int src_imb_pct = lb_sd_parent(sd) ? sd->imbalance_pct : 1;
- unsigned int dst_imb_pct = lb_sd_parent(sd) ? 100 : 1;
- unsigned long src_cap = capacity_of(src_cpu);
- unsigned long dst_cap = capacity_of(dst_cpu);
- int level = sd->level;
-
- /* dst_cpu is idle */
- if ((idle != CPU_NOT_IDLE) &&
- (cpu_rq(src_cpu)->cfs.h_nr_running == 1)) {
- if ((check_cpu_capacity(cpu_rq(src_cpu), sd)) &&
- (src_cap * sd->imbalance_pct < dst_cap * 100)) {
- return 1;
- }
-
- /* This domain is top and dst_cpu is bigger than src_cpu*/
- if (!lb_sd_parent(sd) && src_cap < dst_cap)
- if (lbt_overutilized(src_cpu, level) || global_boosted())
- return 1;
- }
-
- if ((src_cap * src_imb_pct < dst_cap * dst_imb_pct) &&
- cpu_rq(src_cpu)->cfs.h_nr_running == 1 &&
- lbt_overutilized(src_cpu, level) &&
- !lbt_overutilized(dst_cpu, level)) {
- return 1;
- }
-
- return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2);
-}
-
/**********************************************************************
* Global boost *
**********************************************************************/