* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
-<<<<<<< HEAD
- struct kobj_attribute *attr, char *buf)
-=======
struct kobj_attribute *attr, char *buf)
->>>>>>> android-4.14-p
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
-<<<<<<< HEAD
- const char *buf, size_t count)
-=======
const char *buf, size_t count)
->>>>>>> android-4.14-p
{
int ret, enable;
#include <linux/cpufreq.h>
#include <linux/cpufreq_times.h>
-<<<<<<< HEAD
-#include <linux/jiffies.h>
-=======
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/proc_fs.h>
->>>>>>> 818299f6bdae
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-<<<<<<< HEAD
-static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
-=======
#define UID_HASH_BITS 10
static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
struct rcu_head rcu;
u64 time_in_state[0];
};
->>>>>>> 818299f6bdae
/**
* struct cpu_freqs - per-cpu frequency information
static unsigned int next_offset;
-<<<<<<< HEAD
-void cpufreq_task_times_init(struct task_struct *p)
-{
- void *temp;
- unsigned long flags;
- unsigned int max_state;
-=======
-
/* Caller must hold rcu_read_lock() */
static struct uid_entry *find_uid_entry_rcu(uid_t uid)
{
void cpufreq_task_times_init(struct task_struct *p)
{
unsigned long flags;
->>>>>>> 818299f6bdae
spin_lock_irqsave(&task_time_in_state_lock, flags);
p->time_in_state = NULL;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
p->max_state = 0;
-<<<<<<< HEAD
-
- max_state = READ_ONCE(next_offset);
-=======
}
void cpufreq_task_times_alloc(struct task_struct *p)
void *temp;
unsigned long flags;
unsigned int max_state = READ_ONCE(next_offset);
->>>>>>> 818299f6bdae
/* We use one array to avoid multiple allocs per task */
temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
unsigned long flags;
void *temp;
-<<<<<<< HEAD
-=======
if (!p->time_in_state)
return;
->>>>>>> 818299f6bdae
spin_lock_irqsave(&task_time_in_state_lock, flags);
temp = p->time_in_state;
p->time_in_state = NULL;
{
unsigned long flags;
unsigned int state;
-<<<<<<< HEAD
- struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
-=======
struct uid_entry *uid_entry;
struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
->>>>>>> 818299f6bdae
if (!freqs || p->flags & PF_EXITING)
return;
p->time_in_state)
p->time_in_state[state] += cputime;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
-<<<<<<< HEAD
-=======
spin_lock_irqsave(&uid_lock, flags);
uid_entry = find_or_register_uid_locked(uid);
if (uid_entry && state < uid_entry->max_state)
uid_entry->time_in_state[state] += cputime;
spin_unlock_irqrestore(&uid_lock, flags);
->>>>>>> 818299f6bdae
}
void cpufreq_times_create_policy(struct cpufreq_policy *policy)
all_freqs[cpu] = freqs;
}
-<<<<<<< HEAD
-=======
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
{
struct uid_entry *uid_entry;
spin_unlock_irqrestore(&uid_lock, flags);
}
->>>>>>> 818299f6bdae
void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
{
int index;
cpufreq_cpu_put(policy);
}
-<<<<<<< HEAD
-=======
static const struct seq_operations uid_time_in_state_seq_ops = {
.start = uid_seq_start,
}
early_initcall(cpufreq_times_init);
->>>>>>> 818299f6bdae
return count;
}
-static struct global_attr cpufreq_table =
+static struct kobj_attribute cpufreq_table =
__ATTR(cpufreq_table, S_IRUGO, show_cpufreq_table, NULL);
-static struct global_attr cpufreq_min_limit =
+static struct kobj_attribute cpufreq_min_limit =
__ATTR(cpufreq_min_limit, S_IRUGO | S_IWUSR,
show_cpufreq_min_limit, store_cpufreq_min_limit);
-static struct global_attr cpufreq_min_limit_wo_boost =
+static struct kobj_attribute cpufreq_min_limit_wo_boost =
__ATTR(cpufreq_min_limit_wo_boost, S_IRUGO | S_IWUSR,
show_cpufreq_min_limit, store_cpufreq_min_limit_wo_boost);
-static struct global_attr cpufreq_max_limit =
+static struct kobj_attribute cpufreq_max_limit =
__ATTR(cpufreq_max_limit, S_IRUGO | S_IWUSR,
show_cpufreq_max_limit, store_cpufreq_max_limit);
/* get sleep length of given cpu from tickless framework */
static s64 get_sleep_length(int cpu)
{
- return ktime_to_us(tick_nohz_get_sleep_length_cpu(cpu));
+ return ktime_to_us(ktime_sub(*(get_next_event_cpu(cpu)), ktime_get()));
}
static int cpus_busy(int target_residency, const struct cpumask *cpus)
#include <linux/of.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
-#include <linux/sched_energy.h>
+#include <linux/sched/energy.h>
#include <linux/slab.h>
#include <linux/string.h>
static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name)
-<<<<<<< HEAD
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct kobj_attribute *b,
- const char *c, size_t count);
-};
-
-=======
->>>>>>> android-4.14-p
#define define_one_global_ro(_name) \
static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
unsigned long load_avg;
unsigned long util_avg;
struct util_est util_est;
-<<<<<<< HEAD
};
struct ontime_avg {
struct ontime_avg avg;
int migrating;
int cpu;
-=======
->>>>>>> android-4.14-p
};
struct sched_statistics {
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
-<<<<<<< HEAD
-#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
-
-=======
#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
->>>>>>> android-4.14-p
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
-<<<<<<< HEAD
-TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
-TASK_PFA_SET(LMK_WAITING, lmk_waiting)
-=======
TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
->>>>>>> android-4.14-p
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
-<<<<<<< HEAD
-# include <linux/static_key.h>
-
-=======
->>>>>>> android-4.14-p
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
extern __read_mostly unsigned int sysctl_sched_time_avg;
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
-<<<<<<< HEAD
-extern ktime_t tick_nohz_get_sleep_length(void);
-extern ktime_t tick_nohz_get_sleep_length_cpu(int cpu);
-=======
extern bool tick_nohz_idle_got_tick(void);
extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
->>>>>>> android-4.14-p
extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
*delta_next = TICK_NSEC;
return *delta_next;
}
-static inline ktime_t tick_nohz_get_sleep_length_cpu(int cpu)
-{
- return NSEC_PER_SEC / HZ;
-}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
__tick_nohz_task_switch();
}
+extern ktime_t *get_next_event_cpu(unsigned int cpu);
+
#endif
);
#endif /* CONFIG_FAIR_GROUP_SCHED */
-/*
- * Tracepoint for tasks' estimated utilization.
- */
-TRACE_EVENT(sched_util_est_task,
-
- TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
-
- TP_ARGS(tsk, avg),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, cpu )
- __field( unsigned int, util_avg )
- __field( unsigned int, est_enqueued )
- __field( unsigned int, est_ewma )
-
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->cpu = task_cpu(tsk);
- __entry->util_avg = avg->util_avg;
- __entry->est_enqueued = avg->util_est.enqueued;
- __entry->est_ewma = avg->util_est.ewma;
- ),
-
- TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u",
- __entry->comm,
- __entry->pid,
- __entry->cpu,
- __entry->util_avg,
- __entry->est_ewma,
- __entry->est_enqueued)
-);
-
-/*
- * Tracepoint for root cfs_rq's estimated utilization.
- */
-TRACE_EVENT(sched_util_est_cpu,
-
- TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
-
- TP_ARGS(cpu, cfs_rq),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( unsigned int, util_avg )
- __field( unsigned int, util_est_enqueued )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->util_avg = cfs_rq->avg.util_avg;
- __entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued;
- ),
-
- TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u",
- __entry->cpu,
- __entry->util_avg,
- __entry->util_est_enqueued)
-);
-
/*
* Tracepoint for accounting CPU boosted utilization
*/
choice
prompt "Utilization's PELT half-Life"
-<<<<<<< HEAD
default PELT_UTIL_HALFLIFE_16
-=======
- default PELT_UTIL_HALFLIFE_32
->>>>>>> android-4.14-p
help
Allows choosing one of the possible values for the PELT half-life to
be used for the update of the utilization of tasks and CPUs.
build up to 50% utilization. The higher the half-life the longer it
takes for a task to be represented as a big one.
-<<<<<<< HEAD
- If not sure, use the deafult of 16 ms.
-
-config PELT_UTIL_HALFLIFE_32
- bool "32 ms, for server"
-
-config PELT_UTIL_HALFLIFE_16
- bool "16 ms, suggested for interactive workloads"
-
-config PELT_UTIL_HALFLIFE_8
- bool "8 ms, very fast"
-=======
If not sure, use the default of 32 ms.
config PELT_UTIL_HALFLIFE_32
Use 8ms as PELT half-life value. This will increase the ramp-up and
decay of utlization and load four time as fast as for the default
configuration using 32ms.
->>>>>>> android-4.14-p
endchoice
void cpus_write_unlock(void)
{
-<<<<<<< HEAD
cpu_hotplug.active_writer = NULL;
mutex_unlock(&cpu_hotplug.lock);
cpuhp_lock_release();
-=======
- /*
- * We can't have hotplug operations before userspace starts running,
- * and some init codepaths will knowingly not take the hotplug lock.
- * This is all valid, so mute lockdep until it makes sense to report
- * unheld locks.
- */
- if (system_state < SYSTEM_RUNNING)
- return;
-
- percpu_rwsem_assert_held(&cpu_hotplug_lock);
->>>>>>> android-4.14-p
}
/*
policy->governor_data = NULL;
sugov_tunables_free(tunables);
-<<<<<<< HEAD
free_sg_policy:
-=======
-stop_kthread:
- sugov_kthread_stop(sg_policy);
->>>>>>> android-4.14-p
mutex_unlock(&global_tunables_lock);
-
-free_sg_policy:
sugov_policy_free(sg_policy);
disable_fast_switch:
del_timer_sync(&sg_exynos->timer);
}
-static s64 get_next_event_time_ms(void)
+static s64 get_next_event_time_ms(int cpu)
{
- return ktime_to_us(tick_nohz_get_sleep_length());
+ return ktime_to_us(ktime_sub(*(get_next_event_cpu(cpu)), ktime_get()));
}
static int sugov_need_slack_timer(unsigned int cpu)
return 0;
if (sg_cpu->util > sg_exynos->min &&
- get_next_event_time_ms() > sg_exynos->expired_time)
+ get_next_event_time_ms(cpu) > sg_exynos->expired_time)
return 1;
return 0;
#include "ems.h"
#include "../sched.h"
+#include "../sched-pelt.h"
enum {
TYPE_BASE_CFS_RQ_UTIL = 0,
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/ems.h>
-#include <linux/sched_energy.h>
+#include <linux/sched/energy.h>
#include <trace/events/ems.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/ems.h>
-#include <linux/sched_energy.h>
+#include <linux/sched/energy.h>
#include <trace/events/ems.h>
#include "../sched.h"
#include "../tune.h"
#include "./ems.h"
+#include "../sched-pelt.h"
/****************************************************************/
/* On-time migration */
* CFS operations on generic schedulable entities:
*/
-#ifdef CONFIG_FAIR_GROUP_SCHED
+extern int cpu_util_wake(int cpu, struct task_struct *p);
-/* cpu runqueue to which this cfs_rq is attached */
-static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+#define entity_is_task(se) 1
+
+static inline struct task_struct *task_of(struct sched_entity *se)
{
- return cfs_rq->rq;
+ return container_of(se, struct task_struct, se);
}
-/* An entity is a task if it doesn't "own" a runqueue */
-#define entity_is_task(se) (!se->my_q)
+#ifdef CONFIG_FAIR_GROUP_SCHED
-static inline struct task_struct *task_of(struct sched_entity *se)
+/* cpu runqueue to which this cfs_rq is attached */
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
- SCHED_WARN_ON(!entity_is_task(se));
- return container_of(se, struct task_struct, se);
+ return cfs_rq->rq;
}
/* Walk up scheduling entities hierarchy */
#else /* !CONFIG_FAIR_GROUP_SCHED */
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- return container_of(se, struct task_struct, se);
-}
-
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
return container_of(cfs_rq, struct rq, cfs);
}
-#define entity_is_task(se) 1
-
#define for_each_sched_entity(se) \
for (; se; se = NULL)
return 1;
}
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
-static inline void cfs_se_util_change(struct sched_avg *avg)
-{
- unsigned int enqueued;
-
- if (!sched_feat(UTIL_EST))
- return;
-
- /* Avoid store if the flag has been already set */
- enqueued = avg->util_est.enqueued;
- if (!(enqueued & UTIL_AVG_UNCHANGED))
- return;
-
- /* Reset flag to report util_avg has been updated */
- enqueued &= ~UTIL_AVG_UNCHANGED;
- WRITE_ONCE(avg->util_est.enqueued, enqueued);
-}
-
static int
__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
{
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (___update_load_avg(now, cpu, &se->avg,
-<<<<<<< HEAD
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL, NULL)) {
if (schedtune_util_est_en(task_of(se)))
cfs_se_util_change(&se->avg);
-=======
- se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL, NULL)) {
- cfs_se_util_change(&se->avg);
-
#ifdef UTIL_EST_DEBUG
/*
* Trace utilization only for actual tasks.
trace_sched_util_est_cpu(cpu, cfs_rq);
}
#endif /* UTIL_EST_DEBUG */
->>>>>>> android-4.14-p
return 1;
}
static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
-<<<<<<< HEAD
-static inline unsigned long task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_task_util) {
- return (p->ravg.demand / (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
- }
-=======
static inline int task_fits_capacity(struct task_struct *p, long capacity);
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
return (p->ravg.demand /
(walt_ravg_window >> SCHED_CAPACITY_SHIFT));
->>>>>>> android-4.14-p
#endif
return READ_ONCE(p->se.avg.util_avg);
}
-<<<<<<< HEAD
-inline unsigned long _task_util_est(struct task_struct *p)
-{
- struct util_est ue = READ_ONCE(p->se.avg.util_est);
-
- return schedtune_util_est_en(p) ? max(ue.ewma, ue.enqueued)
- : task_util(p);
-}
-
-inline unsigned long task_util_est(struct task_struct *p)
-{
- return schedtune_util_est_en(p) ? max(READ_ONCE(p->se.avg.util_avg), _task_util_est(p))
- : task_util(p);
-=======
static inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
return max(ue.ewma, ue.enqueued);
}
-static inline unsigned long task_util_est(struct task_struct *p)
+unsigned long task_util_est(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
(walt_ravg_window >> SCHED_CAPACITY_SHIFT));
#endif
return max(task_util(p), _task_util_est(p));
->>>>>>> android-4.14-p
}
static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
-<<<<<<< HEAD
- /* Update plots for Task and CPU estimated utilization */
-=======
->>>>>>> android-4.14-p
trace_sched_util_est_task(p, &p->se.avg);
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
}
/*
* Check if a (signed) value is within a specified (unsigned) margin,
* based on the observation that:
-<<<<<<< HEAD
-=======
*
->>>>>>> android-4.14-p
* abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
*
* NOTE: this only works when value + maring < INT_MAX.
}
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
-<<<<<<< HEAD
/* Update plots for CPU's estimated utilization */
-=======
->>>>>>> android-4.14-p
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
/*
if (!task_sleep)
return;
-<<<<<<< HEAD
if (!schedtune_util_est_en(p))
return;
-=======
->>>>>>> android-4.14-p
/*
* If the PELT values haven't changed since enqueue time,
* skip the util_est update.
*/
ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
last_ewma_diff = ue.enqueued - ue.ewma;
-<<<<<<< HEAD
if (within_margin(last_ewma_diff, capacity_orig_of(task_cpu(p)) / 100))
-=======
- if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
->>>>>>> android-4.14-p
return;
/*
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
WRITE_ONCE(p->se.avg.util_est, ue);
-<<<<<<< HEAD
/* Update plots for Task's estimated utilization */
-=======
->>>>>>> android-4.14-p
trace_sched_util_est_task(p, &p->se.avg);
}
return 0;
}
-<<<<<<< HEAD
-=======
static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
->>>>>>> android-4.14-p
static inline void
util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
*
* Return: the (estimated) utilization for the specified CPU
*/
-static inline unsigned long cpu_util(int cpu)
+unsigned long cpu_util(int cpu)
{
struct cfs_rq *cfs_rq;
unsigned int util;
#endif
}
-/*
- * cpu_util_wake: Compute CPU utilization with any contributions from
- * the waking task p removed.
- */
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
-{
- struct cfs_rq *cfs_rq;
- unsigned int util;
-
-#ifdef CONFIG_SCHED_WALT
- /*
- * WALT does not decay idle tasks in the same manner
- * as PELT, so it makes little sense to subtract task
- * utilization from cpu utilization. Instead just use
- * cpu_util for this case.
- */
- if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util))
- return cpu_util(cpu);
-#endif
-
- /* Task has no contribution or is new */
- if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
- return cpu_util(cpu);
-
- cfs_rq = &cpu_rq(cpu)->cfs;
- util = READ_ONCE(cfs_rq->avg.util_avg);
-
- /* Discount task's blocked util from CPU's util */
- util -= min_t(unsigned int, util, task_util(p));
-
- /*
- * Covered cases:
- *
- * a) if *p is the only task sleeping on this CPU, then:
- * cpu_util (== task_util) > util_est (== 0)
- * and thus we return:
- * cpu_util_wake = (cpu_util - task_util) = 0
- *
- * b) if other tasks are SLEEPING on this CPU, which is now exiting
- * IDLE, then:
- * cpu_util >= task_util
- * cpu_util > util_est (== 0)
- * and thus we discount *p's blocked utilization to return:
- * cpu_util_wake = (cpu_util - task_util) >= 0
- *
- * c) if other tasks are RUNNABLE on that CPU and
- * util_est > cpu_util
- * then we use util_est since it returns a more restrictive
- * estimation of the spare capacity on that CPU, by just
- * considering the expected utilization of tasks already
- * runnable on that CPU.
- *
- * Cases a) and b) are covered by the above code, while case c) is
- * covered by the following code when estimated utilization is
- * enabled.
- */
- if (sched_feat(UTIL_EST))
- util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
-
- /*
- * Utilization (estimated) can exceed the CPU capacity, thus let's
- * clamp to the maximum CPU capacity to ensure consistency with
- * the cpu_util call.
- */
- return min_t(unsigned long, util, capacity_orig_of(cpu));
-}
-
static unsigned long group_max_util(struct energy_env *eenv, int cpu_idx)
{
unsigned long max_util = 0;
return select_idle_sibling_cstate_aware(p, prev, target);
}
-<<<<<<< HEAD
-/*
- * cpu_util_wake: Compute cpu utilization with any contributions from
- * the waking task p removed.
- */
-static int cpu_util_wake(int cpu, struct task_struct *p)
-{
- struct cfs_rq *cfs_rq;
- unsigned int util;
-
-#ifdef CONFIG_SCHED_WALT
- /*
- * WALT does not decay idle tasks in the same manner
- * as PELT, so it makes little sense to subtract task
- * utilization from cpu utilization. Instead just use
- * cpu_util for this case.
- */
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
- return cpu_util(cpu);
-#endif
- /* Task has no contribution or is new */
- if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
- return cpu_util(cpu);
-
- cfs_rq = &cpu_rq(cpu)->cfs;
- util = READ_ONCE(cfs_rq->avg.util_avg);
-
- /* Discount task's blocked util from CPU's util */
- util -= min_t(unsigned int, util, task_util(p));
-
- /*
- * Covered cases:
- *
- * a) if *p is the only task sleeping on this CPU, then:
- * cpu_util (== task_util) > util_est (== 0)
- * and thus we return:
- * cpu_util_wake = (cpu_util - task_util) = 0
- *
- * b) if other tasks are SLEEPING on this CPU, which is now exiting
- * IDLE, then:
- * cpu_util >= task_util
- * cpu_util > util_est (== 0)
- * and thus we discount *p's blocked utilization to return:
- * cpu_util_wake = (cpu_util - task_util) >= 0
- *
- * c) if other tasks are RUNNABLE on that CPU and
- * util_est > cpu_util
- * then we use util_est since it returns a more restrictive
- * estimation of the spare capacity on that CPU, by just
- * considering the expected utilization of tasks already
- * runnable on that CPU.
- *
- * Cases a) and b) are covered by the above code, while case c) is
- * covered by the following code when estimated utilization is
- * enabled.
- */
- if (sched_feat(UTIL_EST))
- util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
-
- /*
- * Utilization (estimated) can exceed the CPU capacity, thus let's
- * clamp to the maximum CPU capacity to ensure consistency with
- * the cpu_util call.
- */
- return min_t(unsigned long, util, capacity_orig_of(cpu));
-}
-
-=======
->>>>>>> android-4.14-p
static inline int task_fits_capacity(struct task_struct *p, long capacity)
{
return capacity * 1024 > boosted_task_util(p) * capacity_margin;
if (lbt_overutilized(i, env->sd->level)) {
*overutilized = true;
-<<<<<<< HEAD
- if (rq_has_misfit(rq))
+ if (rq->misfit_task_load)
*misfit_task = true;
}
} else {
if (cpu_overutilized(i)) {
*overutilized = true;
- if (rq_has_misfit(rq))
+ if (rq->misfit_task_load)
*misfit_task = true;
}
-=======
- if (rq->misfit_task_load)
- *misfit_task = true;
->>>>>>> android-4.14-p
}
}
SCHED_FEAT(WA_WEIGHT, true)
SCHED_FEAT(WA_BIAS, true)
-/*
- * UtilEstimation. Use estimated CPU utilization.
- */
-SCHED_FEAT(UTIL_EST, true)
-
/*
* Energy aware scheduling. Use platform energy model to guide scheduling
* decisions optimizing for energy efficiency.
SCHED_FEAT(FIND_BEST_TARGET, true)
SCHED_FEAT(FBT_STRICT_ORDER, true)
-<<<<<<< HEAD
SCHED_FEAT(EXYNOS_MS, true)
-=======
/*
* Apply schedtune boost hold to tasks of all sched classes.
* If enabled, schedtune will hold the boost applied to a CPU
* RT class.
*/
SCHED_FEAT(SCHEDTUNE_BOOST_HOLD_ALL, false)
->>>>>>> android-4.14-p
#include <linux/slab.h>
#include <linux/irq_work.h>
-<<<<<<< HEAD
#include <linux/ems.h>
-=======
#include "tune.h"
->>>>>>> android-4.14-p
#include "walt.h"
+#include "sched-pelt.h"
#include <trace/events/sched.h>
#ifdef CONFIG_SCHED_USE_FLUID_RT
+#ifndef SCHED_PELT_H
+
+#define SCHED_PELT_H
+
/* SPDX-License-Identifier: GPL-2.0 */
/* Generated by Documentation/scheduler/sched-pelt; do not modify. */
-<<<<<<< HEAD
-=======
-
->>>>>>> android-4.14-p
#ifdef CONFIG_PELT_UTIL_HALFLIFE_32
static const u32 runnable_avg_yN_inv[] = {
0xffffffff,0xfa83b2da,0xf5257d14,0xefe4b99a,
#define LOAD_AVG_MAX 47742
#define LOAD_AVG_MAX_N 345
-<<<<<<< HEAD
static const u32 __accumulated_sum_N32[] = {
0, 23371, 35056, 40899, 43820, 45281,
46011, 46376, 46559, 46650, 46696, 46719,
};
-=======
->>>>>>> android-4.14-p
#endif
#ifdef CONFIG_PELT_UTIL_HALFLIFE_16
#define LOAD_AVG_MAX 24152
#define LOAD_AVG_MAX_N 517
-<<<<<<< HEAD
static const u32 __accumulated_sum_N32[] = {
0, 22731, 34096, 39779, 42620, 44041,
44751, 45106, 45284, 45373, 45417, 45439,
45461, 45461, 45461, 45461,
};
-=======
->>>>>>> android-4.14-p
#endif
#ifdef CONFIG_PELT_UTIL_HALFLIFE_8
#define LOAD_AVG_MAX 12337
#define LOAD_AVG_MAX_N 603
-<<<<<<< HEAD
static const u32 __accumulated_sum_N32[] = {
0, 16507, 24760, 28887, 30950, 31982,
32498, 32756, 32885, 32949, 32981, 32997,
#endif
-=======
#endif
->>>>>>> android-4.14-p
extern unsigned int walt_ravg_window;
extern bool walt_disabled;
-<<<<<<< HEAD
-#ifdef CONFIG_SCHED_WALT
-#define walt_util(util_var, demand_sum) {\
- u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
- do_div(sum, walt_ravg_window);\
- util_var = (typeof(util_var))sum;\
- }
-#endif
-
-/**
- * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
- * @cpu: the CPU to get the utilization of
- *
- * The unit of the return value must be the one of capacity so we can compare
- * the utilization with the capacity of the CPU that is available for CFS task
- * (ie cpu_capacity).
- *
- * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
- * recent utilization of currently non-runnable tasks on a CPU. It represents
- * the amount of utilization of a CPU in the range [0..capacity_orig] where
- * capacity_orig is the cpu_capacity available at the highest frequency
- * (arch_scale_freq_capacity()).
- * The utilization of a CPU converges towards a sum equal to or less than the
- * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
- * the running time on this CPU scaled by capacity_curr.
- *
- * The estimated utilization of a CPU is defined to be the maximum between its
- * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
- * currently RUNNABLE on that CPU.
- * This allows to properly represent the expected utilization of a CPU which
- * has just got a big task running since a long sleep period. At the same time
- * however it preserves the benefits of the "blocked utilization" in
- * describing the potential for other tasks waking up on the same CPU.
- *
- * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
- * higher than capacity_orig because of unfortunate rounding in
- * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
- * the average stabilizes with the new running time. We need to check that the
- * utilization stays within the range of [0..capacity_orig] and cap it if
- * necessary. Without utilization capping, a group could be seen as overloaded
- * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
- * available capacity. We allow utilization to overshoot capacity_curr (but not
- * capacity_orig) as it useful for predicting the capacity required after task
- * migrations (scheduler-driven DVFS).
- *
- * Return: the (estimated) utilization for the specified CPU
- */
-static inline unsigned long __cpu_util(int cpu, int delta)
-{
- struct cfs_rq *cfs_rq;
- unsigned int util;
+extern unsigned long cpu_util(int cpu);
- cfs_rq = &cpu_rq(cpu)->cfs;
- util = READ_ONCE(cfs_rq->avg.util_avg);
-
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- walt_util(util, cpu_rq(cpu)->cumulative_runnable_avg);
- }
#endif
- if (sched_feat(UTIL_EST))
- util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
-
- delta += util;
- if (delta < 0)
- return 0;
-
- return min_t(unsigned long, delta, capacity_orig_of(cpu));
-}
-
-static inline unsigned long cpu_util(int cpu)
-{
- return __cpu_util(cpu, 0);
-}
-
-static inline unsigned long cpu_util_freq(int cpu)
-{
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- unsigned long capacity = capacity_orig_of(cpu);
- unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
-
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- walt_util(util, cpu_rq(cpu)->prev_runnable_sum);
- }
-#endif
- if (sched_feat(UTIL_EST)) {
- util = max_t(unsigned long, util,
- READ_ONCE(cfs_rq->avg.util_est.enqueued));
- }
-
- return (util >= capacity) ? capacity : util;
-}
-
-unsigned long _task_util_est(struct task_struct *p);
-unsigned long task_util_est(struct task_struct *p);
-
-#endif
-=======
-#endif /* CONFIG_SMP */
->>>>>>> android-4.14-p
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
return &per_cpu(tick_cpu_sched, cpu);
}
+ktime_t *get_next_event_cpu(unsigned int cpu)
+{
+ return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
+}
+
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
/*
* The time, when the last jiffy update happened. Protected by jiffies_lock.
return ktime_sub(next_event, now);
}
-/**
- * tick_nohz_get_sleep_length_cpu - return the length of the current sleep
- * for a particular CPU.
- *
- * Called from power state control code with interrupts disabled
- */
-ktime_t tick_nohz_get_sleep_length_cpu(int cpu)
-{
- struct tick_sched *ts = tick_get_tick_sched(cpu);
-
- return ts->sleep_length;
-}
-
/**
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
* for a particular CPU.