rt tasks are currently not eligible for schedtune boosting. Make it so
by adding enqueue/dequeue hooks.
For rt tasks, schedtune only acts as a frequency boosting framework, it
has no impact on placement decisions and the prefer_idle attribute is
not used.
Also prepare schedutil use of boosted util for rt task boosting
With this change, schedtune accounting will include rt class tasks,
however boosting currently only applies to the utilization provided by
fair class tasks. Sum up the tracked CPU utilization applying boost to
the aggregate util instead - this includes RT task util in the boosting
if any tasks are runnable.
Scenario 1, considering one CPU:
1x rt task running, util 250, boost 0
1x cfs task runnable, util 250, boost 50
previous util=250+(50pct_boosted_250) = 887
new util=50_pct_boosted_500 = 762
Scenario 2, considering one CPU:
1x rt task running, util 250, boost 50
1x cfs task runnable, util 250, boost 0
previous util=250+250 = 500
new util=50_pct_boosted_500 = 762
Scenario 3, considering one CPU:
1x rt task running, util 250, boost 50
1x cfs task runnable, util 250, boost 50
previous util=250+(50pct_boosted_250) = 887
new util=50_pct_boosted_500 = 762
Scenario 4:
1x rt task running, util 250, boost 50
previous util=250 = 250
new util=50_pct_boosted_250 = 637
Change-Id: Ie287cbd0692468525095b5024db9faac8b2f4878
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
#include "sched.h"
-unsigned long boosted_cpu_util(int cpu);
+unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
#define SUGOV_KTHREAD_PRIORITY 50
rt = sched_get_rt_rq_util(cpu);
- *util = boosted_cpu_util(cpu) + rt;
+ *util = boosted_cpu_util(cpu, rt);
*util = min(*util, max_cap);
*max = max_cap;
}
rcu_read_unlock();
}
-unsigned long boosted_cpu_util(int cpu);
+unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
#else
#define update_overutilized_status(rq) do {} while (0)
-#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
+#define boosted_cpu_util(cpu, other_util) cpu_util_freq(cpu)
#endif /* CONFIG_SMP */
#endif /* CONFIG_SCHED_TUNE */
unsigned long
-boosted_cpu_util(int cpu)
+boosted_cpu_util(int cpu, unsigned long other_util)
{
- unsigned long util = cpu_util_freq(cpu);
+ unsigned long util = cpu_util_freq(cpu) + other_util;
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
#include <linux/slab.h>
#include <linux/irq_work.h>
+#include "tune.h"
#include "walt.h"
{
struct sched_rt_entity *rt_se = &p->rt;
+ schedtune_enqueue_task(p, cpu_of(rq));
+
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
{
struct sched_rt_entity *rt_se = &p->rt;
+ schedtune_dequeue_task(p, cpu_of(rq));
+
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
walt_dec_cumulative_runnable_avg(rq, p);