update_group_capacity(sd, cpu);
}
+#define energy_eff(e, n) \
+ ((e->cap_states[n].cap << SCHED_CAPACITY_SHIFT)/e->cap_states[n].power)
+
+static void init_sched_groups_energy(int cpu, struct sched_domain *sd,
+ sched_domain_energy_f fn)
+{
+ struct sched_group *sg = sd->groups;
+ const struct sched_group_energy *sge;
+ int i;
+
+ if (!(fn && fn(cpu)))
+ return;
+
+ if (cpu != group_balance_cpu(sg))
+ return;
+
+ if (sd->flags & SD_OVERLAP) {
+ pr_err("BUG: EAS does not support overlapping sd spans\n");
+#ifdef CONFIG_SCHED_DEBUG
+ pr_err(" the %s domain has SD_OVERLAP set\n", sd->name);
+#endif
+ return;
+ }
+
+ if (sd->child && !sd->child->groups->sge) {
+ pr_err("BUG: EAS setup borken for CPU%d\n", cpu);
+#ifdef CONFIG_SCHED_DEBUG
+ pr_err(" energy data on %s but not on %s domain\n",
+ sd->name, sd->child->name);
+#endif
+ return;
+ }
+
+ sge = fn(cpu);
+
+ /*
+ * Check that the per-cpu provided sd energy data is consistent for all
+ * cpus within the mask.
+ */
+ if (cpumask_weight(sched_group_span(sg)) > 1) {
+ struct cpumask mask;
+
+ cpumask_xor(&mask, sched_group_span(sg), get_cpu_mask(cpu));
+
+ for_each_cpu(i, &mask)
+ BUG_ON(sge != fn(i));
+ }
+
+ /* Check that energy efficiency (capacity/power) is monotonically
+ * decreasing in the capacity state vector with higher indexes
+ */
+ for (i = 0; i < (sge->nr_cap_states - 1); i++) {
+ if (energy_eff(sge, i) > energy_eff(sge, i+1))
+ continue;
+#ifdef CONFIG_SCHED_DEBUG
+ pr_warn("WARN: cpu=%d, domain=%s: incr. energy eff %lu[%d]->%lu[%d]\n",
+ cpu, sd->name, energy_eff(sge, i), i,
+ energy_eff(sge, i+1), i+1);
+#else
+ pr_warn("WARN: cpu=%d: incr. energy eff %lu[%d]->%lu[%d]\n",
+ cpu, energy_eff(sge, i), i, energy_eff(sge, i+1), i+1);
+#endif
+ }
+
+ sd->groups->sge = fn(cpu);
+}
+
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
/* Calculate CPU capacity for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
+ struct sched_domain_topology_level *tl = sched_domain_topology;
+
if (!cpumask_test_cpu(i, cpu_map))
continue;
- for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+ init_sched_groups_energy(i, sd, tl->energy);
claim_allocations(i, sd);
init_sched_groups_capacity(i, sd);
}