extern int __weak arch_sd_sibiling_asym_packing(void);
-struct sched_group_power {
- atomic_t ref;
- /*
- * CPU power of this group, SCHED_LOAD_SCALE being max power for a
- * single CPU.
- */
- unsigned int power, power_orig;
- unsigned long next_update;
- /*
- * Number of busy cpus in this group.
- */
- atomic_t nr_busy_cpus;
-
- unsigned long cpumask[0]; /* iteration mask */
-};
-
-struct sched_group {
- struct sched_group *next; /* Must be a circular list */
- atomic_t ref;
-
- unsigned int group_weight;
- struct sched_group_power *sgp;
-
- /*
- * The CPUs this group covers.
- *
- * NOTE: this field is variable length. (Allocated dynamically
- * by attaching extra space to the end of the structure,
- * depending on how many CPUs the kernel has booted up with)
- */
- unsigned long cpumask[0];
-};
-
-static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
-{
- return to_cpumask(sg->cpumask);
-}
-
-/*
- * cpumask masking which cpus in the group are allowed to iterate up the domain
- * tree.
- */
-static inline struct cpumask *sched_group_mask(struct sched_group *sg)
-{
- return to_cpumask(sg->sgp->cpumask);
-}
-
-/**
- * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
- * @group: The group whose first cpu is to be returned.
- */
-static inline unsigned int group_first_cpu(struct sched_group *group)
-{
- return cpumask_first(sched_group_cpus(group));
-}
-
struct sched_domain_attr {
int relax_domain_level;
};
extern int sched_domain_level_max;
+struct sched_group;
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_id);
+struct sched_group_power {
+ atomic_t ref;
+ /*
+ * CPU power of this group, SCHED_LOAD_SCALE being max power for a
+ * single CPU.
+ */
+ unsigned int power, power_orig;
+ unsigned long next_update;
+ /*
+ * Number of busy cpus in this group.
+ */
+ atomic_t nr_busy_cpus;
+
+ unsigned long cpumask[0]; /* iteration mask */
+};
+
+struct sched_group {
+ struct sched_group *next; /* Must be a circular list */
+ atomic_t ref;
+
+ unsigned int group_weight;
+ struct sched_group_power *sgp;
+
+ /*
+ * The CPUs this group covers.
+ *
+ * NOTE: this field is variable length. (Allocated dynamically
+ * by attaching extra space to the end of the structure,
+ * depending on how many CPUs the kernel has booted up with)
+ */
+ unsigned long cpumask[0];
+};
+
+static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+{
+ return to_cpumask(sg->cpumask);
+}
+
+/*
+ * cpumask masking which cpus in the group are allowed to iterate up the domain
+ * tree.
+ */
+static inline struct cpumask *sched_group_mask(struct sched_group *sg)
+{
+ return to_cpumask(sg->sgp->cpumask);
+}
+
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+ return cpumask_first(sched_group_cpus(group));
+}
+
extern int group_balance_cpu(struct sched_group *sg);
#endif /* CONFIG_SMP */