unsigned int capacity_margin = 1280; /* ~20% margin */
+static inline unsigned long boosted_task_util(struct task_struct *task);
+
static inline bool __task_fits(struct task_struct *p, int cpu, int util)
{
unsigned long capacity = capacity_of(cpu);
- util += task_util(p);
+ util += boosted_task_util(p);
return (capacity * 1024) > (util * capacity_margin);
}
return schedtune_margin(util, boost);
}
+static inline unsigned long
+schedtune_task_margin(struct task_struct *task)
+{
+ unsigned int boost;
+ unsigned long util;
+ unsigned long margin;
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ boost = schedtune_task_boost(task);
+#else
+ boost = get_sysctl_sched_cfs_boost();
+#endif
+ if (boost == 0)
+ return 0;
+
+ util = task_util(task);
+ margin = schedtune_margin(util, boost);
+
+ return margin;
+}
+
#else /* CONFIG_SCHED_TUNE */
static inline unsigned int
return 0;
}
+static inline unsigned int
+schedtune_task_margin(struct task_struct *task)
+{
+ return 0;
+}
+
#endif /* CONFIG_SCHED_TUNE */
static inline unsigned long
return util + margin;
}
+static inline unsigned long
+boosted_task_util(struct task_struct *task)
+{
+ unsigned long util = task_util(task);
+ unsigned long margin = schedtune_task_margin(task);
+
+ return util + margin;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
- int new_util = cpu_util(i) + task_util(p);
+ int new_util = cpu_util(i) + boosted_task_util(p);
if (new_util > capacity_orig_of(i))
continue;
#ifdef CONFIG_CGROUP_SCHEDTUNE
int schedtune_cpu_boost(int cpu);
+int schedtune_task_boost(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);