const char *name;
struct cpumask cpus;
struct cpumask boost_cpus;
+ unsigned int ldsum_thr;
unsigned int cal_id;
unsigned int max_freq;
unsigned int change_latency;
struct list_head domains;
struct list_head modes;
- struct emc_mode *cur_mode; /* current mode */
- struct emc_mode *req_mode; /* requested mode */
- struct emc_mode *user_mode; /* user requesting mode */
+ struct emc_mode *cur_mode; /* current mode */
+ struct emc_mode *req_mode; /* requested mode */
+ struct emc_mode *user_mode; /* user requesting mode */
struct cpumask heavy_cpus; /* cpus need to boost */
struct cpumask busy_cpus; /* cpus need to online */
+ /* loadsum of boostable and trigger domain */
+ unsigned int ldsum;
/* member for mode change */
struct task_struct *task;
/* update domains's cpus status whether busy or idle */
static int emc_update_domain_status(struct emc_domain *domain)
{
- struct cpumask online_cpus, imbal_heavy_cpus;
struct cpumask heavy_cpus, busy_cpus, idle_cpus;
int cpu;
- cpumask_clear(&online_cpus);
- cpumask_clear(&imbal_heavy_cpus);
cpumask_clear(&heavy_cpus);
cpumask_clear(&busy_cpus);
cpumask_clear(&idle_cpus);
+ emc.ldsum = 0;
/*
* Takes offline core like idle core
* IDLE_CPU : util_avg < domain->cpu_idle_thr
* BUSY_CPU : domain->cpu_idle_thr <= util_avg < domain->cpu_heavy_thr
* HEAVY_CPU : util_avg >= domain->cpu_heavy_thr
- * IMBAL_HEAVY_CPU : If there is heavy_cpu and idle_cpu in a sams domain,
- * heavy_cpu is treated as imbalance_heavy_cpu
*/
- cpumask_and(&online_cpus, &domain->cpus, cpu_online_mask);
- for_each_cpu(cpu, &online_cpus) {
+ for_each_cpu_and(cpu, &domain->cpus, cpu_online_mask) {
struct rq *rq = cpu_rq(cpu);
struct sched_avg *sa = &rq->cfs.avg;
if (sa->util_avg >= domain->cpu_heavy_thr) {
cpumask_set_cpu(cpu, &heavy_cpus);
cpumask_set_cpu(cpu, &busy_cpus);
- } else if (sa->util_avg >= domain->cpu_idle_thr)
+ emc.ldsum += sa->util_avg;
+ } else if (sa->util_avg >= domain->cpu_idle_thr) {
cpumask_set_cpu(cpu, &busy_cpus);
- else
+ emc.ldsum += sa->util_avg;
+ } else
cpumask_set_cpu(cpu, &idle_cpus);
-
}
- /*
- * if domains has only one cpu or heavy_cpus with idle_cpus,
- * heavy_cpus is treated as imbalance_heavy_ cpus
- */
- if (cpumask_weight(&idle_cpus) || cpumask_weight(&online_cpus) == 1)
- cpumask_copy(&imbal_heavy_cpus, &heavy_cpus);
-
/* domain cpus status updated system cpus mask */
- cpumask_or(&emc.heavy_cpus, &emc.heavy_cpus, &imbal_heavy_cpus);
+ cpumask_or(&emc.heavy_cpus, &emc.heavy_cpus, &heavy_cpus);
cpumask_or(&emc.busy_cpus, &emc.busy_cpus, &busy_cpus);
- trace_emc_domain_status(domain->name, *(unsigned int *)cpumask_bits(&emc.heavy_cpus),
+ trace_emc_domain_status(domain->name,
+ *(unsigned int *)cpumask_bits(&emc.heavy_cpus),
*(unsigned int *)cpumask_bits(&emc.busy_cpus),
- *(unsigned int *)cpumask_bits(&imbal_heavy_cpus),
*(unsigned int *)cpumask_bits(&heavy_cpus),
*(unsigned int *)cpumask_bits(&busy_cpus));
if (cpumask_empty(&mask))
continue;
- if (!emc_domain_busy(domain)) {
+ if (domain->busy_ratio && !emc_domain_busy(domain)) {
trace_emc_domain_busy(domain->name, domain->load, false);
return false;
}
static struct emc_mode* emc_select_mode(void)
{
struct emc_mode *mode, *target_mode = NULL;
- int need_online_cnt, need_boost_cnt;
+ int need_online_cnt;
/* if there is no boostable cpu, we don't need to booting */
if (!emc_has_boostable_cpu())
return emc_get_base_mode();
/*
- * need_boost_cnt: number of cpus that need boosting
* need_online_cnt: number of cpus that need online
*/
- need_boost_cnt = cpumask_weight(&emc.heavy_cpus);
need_online_cnt = cpumask_weight(&emc.busy_cpus);
/* In reverse order to find the most boostable mode */
list_for_each_entry_reverse(mode, &emc.modes, list) {
if (!mode->enabled)
continue;
- if (need_boost_cnt > cpumask_weight(&mode->boost_cpus))
- continue;
- if (need_online_cnt > cpumask_weight(&mode->cpus))
+ /* if ldsum_thr is 0, it means ldsum is disabled */
+ if (!mode->ldsum_thr && emc.ldsum <= mode->ldsum_thr) {
+ target_mode = mode;
+ break;
+ }
+ if (need_online_cnt > cpumask_weight(&mode->boost_cpus))
continue;
target_mode = mode;
break;
}
+
if (!target_mode)
target_mode = emc_get_base_mode();
- trace_emc_select_mode(target_mode->name, need_boost_cnt, need_online_cnt);
+ trace_emc_select_mode(target_mode->name, need_online_cnt);
return target_mode;
}
/* return latest adaptive mode */
static struct emc_mode* emc_get_mode(bool updated)
{
- /* if user set user_mode, always return user mode */
- if (emc.user_mode)
- return emc.user_mode;
-
- /* if current system is not boostable, always uses base_mode */
- if (!emc.boostable)
- return emc_get_base_mode();
-
/*
* if system is busy overall, return base mode.
* becuase system is busy, maybe base mode will be
if (!raw_spin_trylock_irqsave(&emc_load_lock, flags))
return;
+ /* if user set user_mode, always return user mode */
+ if (unlikely(emc.user_mode)) {
+ target_mode = emc.user_mode;
+ goto skip_load_check;
+ }
+
+ /* if current system is not boostable, always uses base_mode */
+ if (!emc.boostable) {
+ target_mode = emc_get_base_mode();
+ goto skip_load_check;
+ }
+
/* update sched_load */
emc_update_load();
/* get mode */
target_mode = emc_get_mode(updated);
+skip_load_check:
/* request mode */
if (emc.req_mode != target_mode)
emc_request_mode_change(target_mode);
emc_mode_store(max_freq, max_freq);
emc_mode_store(change_latency, change_latency);
+emc_mode_store(ldsum_thr, ldsum_thr);
emc_mode_store(mode_enabled, enabled);
emc_mode_show(max_freq, max_freq);
emc_mode_show(change_latency, change_latency);
+emc_mode_show(ldsum_thr, ldsum_thr);
emc_mode_show(mode_enabled, enabled);
static int emc_set_enable(bool enable);
emc_attr_ro(mode_name);
emc_attr_rw(max_freq);
emc_attr_rw(change_latency);
+emc_attr_rw(ldsum_thr);
emc_attr_rw(mode_enabled);
static struct attribute *emc_attrs[] = {
&mode_name.attr,
&max_freq.attr,
&change_latency.attr,
+ &ldsum_thr.attr,
&mode_enabled.attr,
NULL
};
scnprintf(buf, sizeof(buf), "%*pbl",
cpumask_pr_args(&mode->boost_cpus));
pr_info("mode%d boost_cpus: %s\n", i, buf);
+ pr_info("mode%d ldsum_thr: %u\n", mode->ldsum_thr);
pr_info("mode%d cal-id: %u\n", i, mode->cal_id);
pr_info("mode%d max_freq: %u\n", i, mode->max_freq);
pr_info("mode%d change_latency: %u\n", i, mode->change_latency);
if(of_property_read_u32(dn, "change_latency", &mode->change_latency))
goto free;
+ if(of_property_read_u32(dn, "ldsum_thr", &mode->ldsum_thr))
+ mode->ldsum_thr = 0;
+
if (of_property_read_u32(dn, "enabled", &mode->enabled))
goto free;
TRACE_EVENT(emc_domain_status,
TP_PROTO(const char *domain, unsigned int sys_heavy, unsigned int sys_busy,
- unsigned int dom_imbal_heavy, unsigned int dom_heavy, unsigned int dom_busy),
+ unsigned int dom_heavy, unsigned int dom_busy),
- TP_ARGS(domain, sys_heavy, sys_busy, dom_imbal_heavy, dom_heavy, dom_busy),
+ TP_ARGS(domain, sys_heavy, sys_busy, dom_heavy, dom_busy),
TP_STRUCT__entry(
__string(domain, domain)
__field(unsigned int, sys_heavy)
__field(unsigned int, sys_busy)
- __field(unsigned int, dom_imbal_heavy)
__field(unsigned int, dom_heavy)
__field(unsigned int, dom_busy)
),
__assign_str(domain, domain);
__entry->sys_heavy = sys_heavy;
__entry->sys_busy = sys_busy;
- __entry->dom_imbal_heavy = dom_imbal_heavy;
__entry->dom_heavy = dom_heavy;
__entry->dom_busy = dom_busy;
),
- TP_printk("domain:%s s_heavy =%x, s_busy=%x, d_imbal_heavy=%x d_heavy=%x, d_busy=%x",
+ TP_printk("domain:%s s_heavy =%x, s_busy=%x, d_heavy=%x, d_busy=%x",
__get_str(domain), __entry->sys_heavy, __entry->sys_busy,
- __entry->dom_imbal_heavy, __entry->dom_heavy, __entry->dom_busy)
+ __entry->dom_heavy, __entry->dom_busy)
);
TRACE_EVENT(emc_update_system_status,
TRACE_EVENT(emc_select_mode,
- TP_PROTO(const char *mode, int need_boost_cnt, int need_online_cnt),
+ TP_PROTO(const char *mode, int need_online_cnt),
- TP_ARGS(mode, need_boost_cnt, need_online_cnt),
+ TP_ARGS(mode, need_online_cnt),
TP_STRUCT__entry(
__string(mode, mode)
- __field(int, need_boost_cnt)
__field(int, need_online_cnt)
),
TP_fast_assign(
__assign_str(mode, mode);
- __entry->need_boost_cnt = need_boost_cnt;
__entry->need_online_cnt = need_online_cnt;
),
- TP_printk("mode:%s need_boost_cnt=%d, need_online_cnt=%d",
- __get_str(mode), __entry->need_boost_cnt, __entry->need_online_cnt)
+ TP_printk("mode:%s need_online_cnt=%d",
+ __get_str(mode), __entry->need_online_cnt)
);
TRACE_EVENT(emc_domain_busy,