sched: ems: remove group balancer
authorPark Bumgyu <bumgyu.park@samsung.com>
Wed, 18 Jul 2018 00:59:34 +0000 (09:59 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:56 +0000 (20:24 +0300)
group balancer is not used anymore, similar function is supported
by task band. Remove group balancer.

Change-Id: I5ff7573a927f69144f477e5d64c32084d70188c7
Signed-off-by: Park Bumgyu <bumgyu.park@samsung.com>
kernel/sched/ems/core.c
kernel/sched/ems/ems.h
kernel/sched/ems/st_addon.c
kernel/sched/fair.c
kernel/sched/tune.c
kernel/sched/tune.h

index b06f5d8f25fbc5157d6a64d3e170c4f320888531..82ae08d432ba8dea9c22d8a82d74b2159f8414ce 100644 (file)
@@ -274,16 +274,7 @@ int exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int
        }
 
        /*
-        * Priority 5 : group balancing
-        */
-       target_cpu = group_balancing(p);
-       if (cpu_selected(target_cpu)) {
-               strcpy(state, "group balancing");
-               goto out;
-       }
-
-       /*
-        * Priority 6 : prefer-idle
+        * Priority 5 : prefer-idle
         *
         * Prefer-idle is a function that operates on cgroup basis managed by
         * schedtune. When perfer-idle is set to 1, the tasks in the group are
@@ -299,7 +290,7 @@ int exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int
        }
 
        /*
-        * Priority 7 : energy cpu
+        * Priority 6 : energy cpu
         *
         * A scheduling scheme based on cpu energy, find the least power consumption
         * cpu referring energy table when assigning task.
@@ -311,7 +302,7 @@ int exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int
        }
 
        /*
-        * Priority 8 : proper cpu
+        * Priority 7 : proper cpu
         */
        target_cpu = select_proper_cpu(p, prev_cpu);
        if (cpu_selected(target_cpu))
index 5853ca86af3a0a0f69eab3d8420e2a79739d10a4..fa9ee36b71a8a24e99d46ccc47d7ca89d3a297c8 100644 (file)
@@ -32,7 +32,6 @@ extern int group_balancing(struct task_struct *p);
 #else
 static inline int prefer_perf_cpu(struct task_struct *p) { return -1; }
 static inline int prefer_idle_cpu(struct task_struct *p) { return -1; }
-static inline int group_balancing(struct task_struct *p) { return -1; }
 #endif
 
 extern unsigned long task_util(struct task_struct *p);
index 97944e50e47e061244f32509328c9bacc8710a0a..ae1b130955d58885260494e106b1c8a886216318 100644 (file)
@@ -150,14 +150,6 @@ int prefer_idle_cpu(struct task_struct *p)
        return select_idle_cpu(p);
 }
 
-/**********************************************************************
- *                           Group balancer                           *
- **********************************************************************/
-int group_balancing(struct task_struct *p)
-{
-       return -1;
-}
-
 /**********************************************************************
  *                          Sysfs interface                           *
  **********************************************************************/
index 4b904acecd5a67fc75ec99a2dfa746e6bdeab888..bf43144a56a5b4472b5b62182adb7f2ebad595f8 100644 (file)
@@ -11376,7 +11376,6 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
 #endif
 
        ontime_migration();
-       schedtune_group_util_update();
 }
 
 /*
index d004279000943bd3633396990f8e23d8bac3920e..95ee812faf5cbff8a2c809ce16608eeb95697df4 100644 (file)
@@ -18,45 +18,6 @@ extern struct reciprocal_value schedtune_spc_rdiv;
 /* We hold schedtune boost in effect for at least this long */
 #define SCHEDTUNE_BOOST_HOLD_NS 50000000ULL
 
-struct group_balancer {
-       /* sum of task utilization in group */
-       unsigned long util;
-
-       /* group balancing threshold */
-       unsigned long threshold;
-
-       /* imbalance ratio by heaviest task */
-       unsigned int imbalance_ratio;
-
-       /* balance ratio by heaviest task */
-       unsigned int balance_ratio;
-
-       /* heaviest task utilization in group */
-       unsigned long heaviest_util;
-
-       /* group utilization update interval */
-       unsigned long update_interval;
-
-       /* next group utilization update time */
-       unsigned long next_update_time;
-
-       /*
-        * group imbalance time = imbalance_count * update_interval
-        * imbalance_count >= imbalance_duration -> need balance
-        */
-       unsigned int imbalance_duration;
-       unsigned int imbalance_count;
-
-       /* utilization tracking window size */
-       unsigned long window;
-
-       /* group balancer locking */
-       raw_spinlock_t lock;
-
-       /* need group balancing? */
-       bool need_balance;
-};
-
 /*
  * EAS scheduler tunables for task groups.
  */
@@ -80,9 +41,6 @@ struct schedtune {
         * towards high performance CPUs */
        int prefer_perf;
 
-       /* SchedTune group balancer */
-       struct group_balancer gb;
-
        /* SchedTune util-est */
        int util_est_en;
 
@@ -564,296 +522,6 @@ int schedtune_prefer_perf(struct task_struct *p)
        return prefer_perf;
 }
 
-int schedtune_need_group_balance(struct task_struct *p)
-{
-       bool balance;
-
-       if (unlikely(!schedtune_initialized))
-               return 0;
-
-       rcu_read_lock();
-       balance = task_schedtune(p)->gb.need_balance;
-       rcu_read_unlock();
-
-       return balance;
-}
-
-static inline void
-check_need_group_balance(int group_idx, struct group_balancer *gb)
-{
-       int heaviest_ratio;
-
-       if (!gb->util) {
-               gb->imbalance_count = 0;
-               gb->need_balance = false;
-
-               goto out;
-       }
-
-       heaviest_ratio = gb->heaviest_util * 100 / gb->util;
-
-       if (gb->need_balance) {
-               if (gb->util < gb->threshold || heaviest_ratio < gb->balance_ratio) {
-                       gb->imbalance_count = 0;
-                       gb->need_balance = false;
-               }
-
-               goto out;
-       }
-
-       if (gb->util >= gb->threshold && heaviest_ratio > gb->imbalance_ratio) {
-               gb->imbalance_count++;
-
-               if (gb->imbalance_count >= gb->imbalance_duration)
-                       gb->need_balance = true;
-       } else {
-               gb->imbalance_count = 0;
-       }
-
-out:
-       trace_sched_tune_check_group_balance(group_idx,
-                               gb->imbalance_count, gb->need_balance);
-}
-
-static void __schedtune_group_util_update(struct schedtune *st)
-{
-       struct group_balancer *gb = &st->gb;
-       unsigned long now = cpu_rq(0)->clock_task;
-       struct css_task_iter it;
-       struct task_struct *p;
-       struct task_struct *heaviest_p = NULL;
-       unsigned long util_sum = 0;
-       unsigned long heaviest_util = 0;
-       unsigned int total = 0, accumulated = 0;
-
-       if (!raw_spin_trylock(&gb->lock))
-               return;
-
-       if (!gb->update_interval)
-               goto out;
-
-       if (time_before(now, gb->next_update_time))
-               goto out;
-
-       css_task_iter_start(&st->css, 0, &it);
-       while ((p = css_task_iter_next(&it))) {
-               unsigned long clock_task, delta, util;
-
-               total++;
-
-               clock_task = task_rq(p)->clock_task;
-               delta = clock_task - p->se.avg.last_update_time;
-               if (p->se.avg.last_update_time && delta > gb->window)
-                       continue;
-
-               util = p->se.avg.util_avg;
-               if (util > heaviest_util) {
-                       heaviest_util = util;
-                       heaviest_p = p;
-               }
-
-               util_sum += p->se.avg.util_avg;
-               accumulated++;
-       }
-       css_task_iter_end(&it);
-
-       gb->util = util_sum;
-       gb->heaviest_util = heaviest_util;
-       gb->next_update_time = now + gb->update_interval;
-
-       /* if there is no task in group, heaviest_p is always NULL */
-       if (heaviest_p)
-               trace_sched_tune_grouputil_update(st->idx, total, accumulated,
-                               gb->util, heaviest_p, gb->heaviest_util);
-
-       check_need_group_balance(st->idx, gb);
-out:
-       raw_spin_unlock(&gb->lock);
-}
-
-void schedtune_group_util_update(void)
-{
-       int idx;
-
-       if (unlikely(!schedtune_initialized))
-               return;
-
-       rcu_read_lock();
-
-       for (idx = 1; idx < BOOSTGROUPS_COUNT; idx++) {
-               struct schedtune *st = allocated_group[idx];
-
-               if (!st)
-                       continue;
-               __schedtune_group_util_update(st);
-       }
-
-       rcu_read_unlock();
-}
-
-static u64
-gb_util_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       return st->gb.util;
-}
-
-static u64
-gb_heaviest_ratio_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       if (!st->gb.util)
-               return 0;
-
-       return st->gb.heaviest_util * 100 / st->gb.util;
-}
-
-static u64
-gb_threshold_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       return st->gb.threshold;
-}
-
-static int
-gb_threshold_write(struct cgroup_subsys_state *css, struct cftype *cft,
-           u64 threshold)
-{
-       struct schedtune *st = css_st(css);
-       struct group_balancer *gb = &st->gb;
-
-       raw_spin_lock(&gb->lock);
-       gb->threshold = threshold;
-       check_need_group_balance(st->idx, gb);
-       raw_spin_unlock(&gb->lock);
-
-       return 0;
-}
-
-static u64
-gb_imbalance_ratio_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       return st->gb.imbalance_ratio;
-}
-
-static int
-gb_imbalance_ratio_write(struct cgroup_subsys_state *css, struct cftype *cft,
-           u64 ratio)
-{
-       struct schedtune *st = css_st(css);
-       struct group_balancer *gb = &st->gb;
-
-       ratio = min_t(u64, ratio, 100);
-
-       raw_spin_lock(&gb->lock);
-       gb->imbalance_ratio = ratio;
-       check_need_group_balance(st->idx, gb);
-       raw_spin_unlock(&gb->lock);
-
-       return 0;
-}
-
-static u64
-gb_balance_ratio_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       return st->gb.balance_ratio;
-}
-
-static int
-gb_balance_ratio_write(struct cgroup_subsys_state *css, struct cftype *cft,
-           u64 ratio)
-{
-       struct schedtune *st = css_st(css);
-       struct group_balancer *gb = &st->gb;
-
-       ratio = min_t(u64, ratio, 100);
-
-       raw_spin_lock(&gb->lock);
-       gb->balance_ratio = ratio;
-       check_need_group_balance(st->idx, gb);
-       raw_spin_unlock(&gb->lock);
-
-       return 0;
-}
-
-static u64
-gb_interval_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       return st->gb.update_interval / NSEC_PER_USEC;
-}
-
-static int
-gb_interval_write(struct cgroup_subsys_state *css, struct cftype *cft,
-           u64 interval_us)
-{
-       struct schedtune *st = css_st(css);
-       struct group_balancer *gb = &st->gb;
-
-       raw_spin_lock(&gb->lock);
-       gb->update_interval = interval_us * NSEC_PER_USEC;
-       if (!interval_us) {
-               gb->util = 0;
-               gb->need_balance = false;
-       }
-       raw_spin_unlock(&gb->lock);
-
-       return 0;
-}
-
-static u64
-gb_duration_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       return st->gb.imbalance_duration;
-}
-
-static int
-gb_duration_write(struct cgroup_subsys_state *css, struct cftype *cft,
-           u64 duration)
-{
-       struct schedtune *st = css_st(css);
-       struct group_balancer *gb = &st->gb;
-
-       raw_spin_lock(&gb->lock);
-       gb->imbalance_duration = duration;
-       check_need_group_balance(st->idx, gb);
-       raw_spin_unlock(&gb->lock);
-
-       return 0;
-}
-
-static u64
-gb_window_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct schedtune *st = css_st(css);
-
-       return st->gb.window / NSEC_PER_MSEC;
-}
-
-static int
-gb_window_write(struct cgroup_subsys_state *css, struct cftype *cft,
-           u64 window)
-{
-       struct schedtune *st = css_st(css);
-       struct group_balancer *gb = &st->gb;
-
-       raw_spin_lock(&gb->lock);
-       gb->window = window * NSEC_PER_MSEC;
-       raw_spin_unlock(&gb->lock);
-
-       return 0;
-}
-
 static u64
 util_est_en_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
@@ -972,44 +640,6 @@ static struct cftype files[] = {
                .read_u64 = band_read,
                .write_u64 = band_write,
        },
-       {
-               .name = "gb_util",
-               .read_u64 = gb_util_read,
-       },
-       {
-               .name = "gb_heaviest_ratio",
-               .read_u64 = gb_heaviest_ratio_read,
-       },
-       {
-               .name = "gb_threshold",
-               .read_u64 = gb_threshold_read,
-               .write_u64 = gb_threshold_write,
-       },
-       {
-               .name = "gb_imbalance_ratio",
-               .read_u64 = gb_imbalance_ratio_read,
-               .write_u64 = gb_imbalance_ratio_write,
-       },
-       {
-               .name = "gb_balance_ratio",
-               .read_u64 = gb_balance_ratio_read,
-               .write_u64 = gb_balance_ratio_write,
-       },
-       {
-               .name = "gb_interval_us",
-               .read_u64 = gb_interval_read,
-               .write_u64 = gb_interval_write,
-       },
-       {
-               .name = "gb_duration",
-               .read_u64 = gb_duration_read,
-               .write_u64 = gb_duration_write,
-       },
-       {
-               .name = "gb_window_ms",
-               .read_u64 = gb_window_read,
-               .write_u64 = gb_window_write,
-       },
        {
                .name = "util_est_en",
                .read_u64 = util_est_en_read,
@@ -1038,22 +668,6 @@ schedtune_boostgroup_init(struct schedtune *st)
        return 0;
 }
 
-static void
-schedtune_group_balancer_init(struct schedtune *st)
-{
-       raw_spin_lock_init(&st->gb.lock);
-
-       st->gb.threshold = ULONG_MAX;
-       st->gb.imbalance_ratio = 0;                             /* 0% */
-       st->gb.update_interval = 0;                             /* disable update */
-       st->gb.next_update_time = cpu_rq(0)->clock_task;
-
-       st->gb.imbalance_duration = 0;
-       st->gb.imbalance_count = 0;
-
-       st->gb.window = 100 * NSEC_PER_MSEC;            /* 100ms */
-}
-
 static struct cgroup_subsys_state *
 schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -1083,8 +697,6 @@ schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
        if (!st)
                goto out;
 
-       schedtune_group_balancer_init(st);
-
        /* Initialize per CPUs boost group support */
        st->idx = idx;
        if (schedtune_boostgroup_init(st))
index 135b1a49ba2bd83abf82fc466a39217e8ab08ecf..5b35686491882a7717aa213c115f7b4569167f17 100644 (file)
@@ -15,9 +15,6 @@ struct target_nrg {
 int schedtune_cpu_boost(int cpu);
 int schedtune_task_boost(struct task_struct *tsk);
 
-void schedtune_group_util_update(void);
-int schedtune_need_group_balance(struct task_struct *p);
-
 int schedtune_prefer_idle(struct task_struct *tsk);
 int schedtune_prefer_perf(struct task_struct *tsk);
 int schedtune_util_est_en(struct task_struct *tsk);
@@ -30,9 +27,6 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu);
 #define schedtune_cpu_boost(cpu)  0
 #define schedtune_task_boost(tsk) 0
 
-#define schedtune_group_util_update() do { } while (0)
-#define schedtune_need_group_balance(task) 0
-
 #define schedtune_prefer_idle(tsk) 0
 #define schedtune_prefer_perf(tsk) 0
 #define schedtune_util_est_en(tsk) 0