resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
+ mdesc_populate_present_mask(mask);
mdesc_fill_in_cpu_data(mask);
for_each_cpu_mask(cpu, *mask) {
if (tlb_type == hypervisor)
return;
- of_populate_present_mask();
of_iterate_over_cpus(fill_in_one_cpu, 0);
smp_fill_in_sib_core_maps();
/* Setup %g5 for the boot cpu. */
__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
+
+ of_fill_in_cpu_data();
+ if (tlb_type == hypervisor)
+ mdesc_fill_in_cpu_data(CPU_MASK_ALL_PTR);
}
if (tlb_type == hypervisor)
sun4v_ktsb_register();
- /* We must setup the per-cpu areas before we pull in the
- * PROM and the MDESC. The code there fills in cpu and
- * other information into per-cpu data structures.
- */
- real_setup_per_cpu_areas();
-
prom_build_devicetree();
- of_fill_in_cpu_data();
+ of_populate_present_mask();
if (tlb_type == hypervisor) {
sun4v_mdesc_init();
- mdesc_fill_in_cpu_data(CPU_MASK_ALL_PTR);
+ mdesc_populate_present_mask(CPU_MASK_ALL_PTR);
}
+ real_setup_per_cpu_areas();
+
/* Once the OF device tree and MDESC have been setup, we know
* the list of possible cpus. Therefore we can allocate the
* IRQ stacks.