#define SCALE_SIZE 2
static ssize_t show_cpufreq_table(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct exynos_cpufreq_domain *domain;
ssize_t count = 0;
}
static ssize_t show_cpufreq_min_limit(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct exynos_cpufreq_domain *domain;
unsigned int pm_qos_min;
}
static ssize_t store_cpufreq_min_limit(struct kobject *kobj,
- struct attribute *attr, const char *buf,
+ struct kobj_attribute *attr, const char *buf,
size_t count)
{
struct exynos_cpufreq_domain *domain;
}
static ssize_t store_cpufreq_min_limit_wo_boost(struct kobject *kobj,
- struct attribute *attr, const char *buf,
+ struct kobj_attribute *attr, const char *buf,
size_t count)
{
struct exynos_cpufreq_domain *domain;
}
static ssize_t show_cpufreq_max_limit(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct exynos_cpufreq_domain *domain;
unsigned int pm_qos_max;
exynos_cpuhp_request("ACME", mask, 0);
}
-static ssize_t store_cpufreq_max_limit(struct kobject *kobj, struct attribute *attr,
+static ssize_t store_cpufreq_max_limit(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct exynos_cpufreq_domain *domain;
return count;
}
-static struct global_attr cpufreq_table =
+static struct kobj_attribute cpufreq_table =
__ATTR(cpufreq_table, S_IRUGO, show_cpufreq_table, NULL);
-static struct global_attr cpufreq_min_limit =
+static struct kobj_attribute cpufreq_min_limit =
__ATTR(cpufreq_min_limit, S_IRUGO | S_IWUSR,
show_cpufreq_min_limit, store_cpufreq_min_limit);
-static struct global_attr cpufreq_min_limit_wo_boost =
+static struct kobj_attribute cpufreq_min_limit_wo_boost =
__ATTR(cpufreq_min_limit_wo_boost, S_IRUGO | S_IWUSR,
show_cpufreq_min_limit, store_cpufreq_min_limit_wo_boost);
-static struct global_attr cpufreq_max_limit =
+static struct kobj_attribute cpufreq_max_limit =
__ATTR(cpufreq_max_limit, S_IRUGO | S_IWUSR,
show_cpufreq_max_limit, store_cpufreq_max_limit);
/* get sleep length of given cpu from tickless framework */
static s64 get_sleep_length(int cpu)
{
- return ktime_to_us(tick_nohz_get_sleep_length_cpu(cpu));
+ return ktime_to_us(ktime_sub(*(get_next_event_cpu(cpu)), ktime_get()));
}
static int cpus_busy(int target_residency, const struct cpumask *cpus)
#include <linux/of.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
-#include <linux/sched_energy.h>
+#include <linux/sched/energy.h>
#include <linux/slab.h>
#include <linux/string.h>
__tick_nohz_task_switch();
}
+extern ktime_t *get_next_event_cpu(unsigned int cpu);
+
#endif
choice
prompt "Utilization's PELT half-Life"
- default PELT_UTIL_HALFLIFE_32
+ default PELT_UTIL_HALFLIFE_16
help
Allows choosing one of the possible values for the PELT half-life to
be used for the update of the utilization of tasks and CPUs.
del_timer_sync(&sg_exynos->timer);
}
-static s64 get_next_event_time_ms(void)
+static s64 get_next_event_time_ms(int cpu)
{
- return ktime_to_us(tick_nohz_get_sleep_length());
+ return ktime_to_us(ktime_sub(*(get_next_event_cpu(cpu)), ktime_get()));
}
static int sugov_need_slack_timer(unsigned int cpu)
return 0;
if (sg_cpu->util > sg_exynos->min &&
- get_next_event_time_ms() > sg_exynos->expired_time)
+ get_next_event_time_ms(cpu) > sg_exynos->expired_time)
return 1;
return 0;
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/ems.h>
-#include <linux/sched_energy.h>
+#include <linux/sched/energy.h>
#include <trace/events/ems.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/ems.h>
-#include <linux/sched_energy.h>
+#include <linux/sched/energy.h>
#include <trace/events/ems.h>
* CFS operations on generic schedulable entities:
*/
-#ifdef CONFIG_FAIR_GROUP_SCHED
+extern int cpu_util_wake(int cpu, struct task_struct *p);
-/* cpu runqueue to which this cfs_rq is attached */
-static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+#define entity_is_task(se) 1
+
+static inline struct task_struct *task_of(struct sched_entity *se)
{
- return cfs_rq->rq;
+ return container_of(se, struct task_struct, se);
}
-/* An entity is a task if it doesn't "own" a runqueue */
-#define entity_is_task(se) (!se->my_q)
+#ifdef CONFIG_FAIR_GROUP_SCHED
-static inline struct task_struct *task_of(struct sched_entity *se)
+/* cpu runqueue to which this cfs_rq is attached */
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
- SCHED_WARN_ON(!entity_is_task(se));
- return container_of(se, struct task_struct, se);
+ return cfs_rq->rq;
}
/* Walk up scheduling entities hierarchy */
#else /* !CONFIG_FAIR_GROUP_SCHED */
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- return container_of(se, struct task_struct, se);
-}
-
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
return container_of(cfs_rq, struct rq, cfs);
}
-#define entity_is_task(se) 1
-
#define for_each_sched_entity(se) \
for (; se; se = NULL)
*
* Return: the (estimated) utilization for the specified CPU
*/
-static inline unsigned long cpu_util(int cpu)
+unsigned long cpu_util(int cpu)
{
struct cfs_rq *cfs_rq;
unsigned int util;
#endif
}
-/*
- * cpu_util_wake: Compute CPU utilization with any contributions from
- * the waking task p removed.
- */
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
-{
- struct cfs_rq *cfs_rq;
- unsigned int util;
-
-#ifdef CONFIG_SCHED_WALT
- /*
- * WALT does not decay idle tasks in the same manner
- * as PELT, so it makes little sense to subtract task
- * utilization from cpu utilization. Instead just use
- * cpu_util for this case.
- */
- if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util))
- return cpu_util(cpu);
-#endif
-
- /* Task has no contribution or is new */
- if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
- return cpu_util(cpu);
-
- cfs_rq = &cpu_rq(cpu)->cfs;
- util = READ_ONCE(cfs_rq->avg.util_avg);
-
- /* Discount task's blocked util from CPU's util */
- util -= min_t(unsigned int, util, task_util(p));
-
- /*
- * Covered cases:
- *
- * a) if *p is the only task sleeping on this CPU, then:
- * cpu_util (== task_util) > util_est (== 0)
- * and thus we return:
- * cpu_util_wake = (cpu_util - task_util) = 0
- *
- * b) if other tasks are SLEEPING on this CPU, which is now exiting
- * IDLE, then:
- * cpu_util >= task_util
- * cpu_util > util_est (== 0)
- * and thus we discount *p's blocked utilization to return:
- * cpu_util_wake = (cpu_util - task_util) >= 0
- *
- * c) if other tasks are RUNNABLE on that CPU and
- * util_est > cpu_util
- * then we use util_est since it returns a more restrictive
- * estimation of the spare capacity on that CPU, by just
- * considering the expected utilization of tasks already
- * runnable on that CPU.
- *
- * Cases a) and b) are covered by the above code, while case c) is
- * covered by the following code when estimated utilization is
- * enabled.
- */
- if (sched_feat(UTIL_EST))
- util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
-
- /*
- * Utilization (estimated) can exceed the CPU capacity, thus let's
- * clamp to the maximum CPU capacity to ensure consistency with
- * the cpu_util call.
- */
- return min_t(unsigned long, util, capacity_orig_of(cpu));
-}
-
static unsigned long group_max_util(struct energy_env *eenv, int cpu_idx)
{
unsigned long max_util = 0;
extern unsigned int walt_ravg_window;
extern bool walt_disabled;
+extern unsigned long cpu_util(int cpu);
+
#endif /* CONFIG_SMP */
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
return &per_cpu(tick_cpu_sched, cpu);
}
+ktime_t *get_next_event_cpu(unsigned int cpu)
+{
+ return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
+}
+
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
/*
* The time, when the last jiffy update happened. Protected by jiffies_lock.