when dealing with UltraSPARC cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ default y
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
source "kernel/Kconfig.preempt"
config CMDLINE_BOOL
}
}
+static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
+{
+ int i;
+
+ for (i = 0; i < mp->num_arcs; i++) {
+ struct mdesc_node *t = mp->arcs[i].arc;
+ const u64 *id;
+
+ if (strcmp(mp->arcs[i].name, "back"))
+ continue;
+
+ if (strcmp(t->name, "cpu"))
+ continue;
+
+ id = md_get_property(t, "id", NULL);
+ if (*id < NR_CPUS)
+ cpu_data(*id).proc_id = proc_id;
+ }
+}
+
+static void __init __set_proc_ids(const char *exec_unit_name)
+{
+ struct mdesc_node *mp;
+ int idx;
+
+ idx = 0;
+ md_for_each_node_by_name(mp, exec_unit_name) {
+ const char *type;
+ int len;
+
+ type = md_get_property(mp, "type", &len);
+ if (!find_in_proplist(type, "int", len) &&
+ !find_in_proplist(type, "integer", len))
+ continue;
+
+ mark_proc_ids(mp, idx);
+
+ idx++;
+ }
+}
+
+static void __init set_proc_ids(void)
+{
+ __set_proc_ids("exec_unit");
+ __set_proc_ids("exec-unit");
+}
+
static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
{
u64 val;
#endif
c->core_id = 0;
+ c->proc_id = -1;
}
set_core_ids();
+ set_proc_ids();
smp_fill_in_sib_core_maps();
}
cpu_data(cpuid).core_id = 0;
}
+ cpu_data(cpuid).proc_id = -1;
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);
cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+ { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;
unsigned int j;
if (cpu_data(i).core_id == 0) {
- cpu_set(i, cpu_sibling_map[i]);
+ cpu_set(i, cpu_core_map[i]);
continue;
}
for_each_possible_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
+ cpu_set(j, cpu_core_map[i]);
+ }
+ }
+
+ for_each_possible_cpu(i) {
+ unsigned int j;
+
+ if (cpu_data(i).proc_id == -1) {
+ cpu_set(i, cpu_sibling_map[i]);
+ continue;
+ }
+
+ for_each_possible_cpu(j) {
+ if (cpu_data(i).proc_id ==
+ cpu_data(j).proc_id)
cpu_set(j, cpu_sibling_map[i]);
}
}
unsigned int ecache_size;
unsigned int ecache_line_size;
int core_id;
- unsigned int __pad3;
+ int proc_id;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_possible_map phys_cpu_present_map
extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern cpumask_t cpu_core_map[NR_CPUS];
/*
* General functions that each host system must provide.
#ifndef _ASM_SPARC64_TOPOLOGY_H
#define _ASM_SPARC64_TOPOLOGY_H
+#ifdef CONFIG_SMP
#include <asm/spitfire.h>
-#define smt_capable() (tlb_type == hypervisor)
-
-#include <asm-generic/topology.h>
+#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
+#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
+#define mc_capable() (tlb_type == hypervisor)
+#define smt_capable() (tlb_type == hypervisor)
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/topology.h>
+
+#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
#endif /* _ASM_SPARC64_TOPOLOGY_H */