From: Heiko Carstens Date: Fri, 2 Dec 2016 09:38:37 +0000 (+0100) Subject: s390/topology: use cpu_topology array instead of per cpu variable X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=30fc4ca2a8ab508d160a917b89b7e1c27f893354;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git s390/topology: use cpu_topology array instead of per cpu variable CPU topology information like cpu to node mapping must be setup in setup_arch already. Topology information is currently made available with a per cpu variable; this however will not work when the initialization will be moved to setup_arch, since the generic percpu setup will be done much later. Therefore convert back to a cpu_topology array. Reviewed-by: Michael Holzheu Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index f15f5571ca2b..bc6f45421c98 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -22,18 +22,17 @@ struct cpu_topology_s390 { cpumask_t drawer_mask; }; -DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology); - -#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) -#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) -#define topology_sibling_cpumask(cpu) \ - (&per_cpu(cpu_topology, cpu).thread_mask) -#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) -#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) -#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) -#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) -#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id) -#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask) +extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) +#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) +#define topology_book_id(cpu) (cpu_topology[cpu].book_id) +#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) +#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id) +#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask) #define mc_capable() 1 @@ -65,7 +64,7 @@ static inline void topology_expect_change(void) { } #define cpu_to_node cpu_to_node static inline int cpu_to_node(int cpu) { - return per_cpu(cpu_topology, cpu).node_id; + return cpu_topology[cpu].node_id; } /* Returns a pointer to the cpumask of CPUs on node 'node'. */ diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 8705ee66c087..7169d112c91a 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -41,15 +41,15 @@ static bool topology_enabled = true; static DECLARE_WORK(topology_work, topology_work_fn); /* - * Socket/Book linked lists and per_cpu(cpu_topology) updates are + * Socket/Book linked lists and cpu_topology updates are * protected by "sched_domains_mutex". */ static struct mask_info socket_info; static struct mask_info book_info; static struct mask_info drawer_info; -DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology); -EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology); +struct cpu_topology_s390 cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { @@ -97,7 +97,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core, if (lcpu < 0) continue; for (i = 0; i <= smp_cpu_mtid; i++) { - topo = &per_cpu(cpu_topology, lcpu + i); + topo = &cpu_topology[lcpu + i]; topo->drawer_id = drawer->id; topo->book_id = book->id; topo->socket_id = socket->id; @@ -220,7 +220,7 @@ static void update_cpu_masks(void) int cpu; for_each_possible_cpu(cpu) { - topo = &per_cpu(cpu_topology, cpu); + topo = &cpu_topology[cpu]; topo->thread_mask = cpu_thread_map(cpu); topo->core_mask = cpu_group_map(&socket_info, cpu); topo->book_mask = cpu_group_map(&book_info, cpu); @@ -394,23 +394,23 @@ int topology_cpu_init(struct cpu *cpu) static const struct cpumask *cpu_thread_mask(int cpu) { - return &per_cpu(cpu_topology, cpu).thread_mask; + return &cpu_topology[cpu].thread_mask; } const struct cpumask *cpu_coregroup_mask(int cpu) { - return &per_cpu(cpu_topology, cpu).core_mask; + return &cpu_topology[cpu].core_mask; } static const struct cpumask *cpu_book_mask(int cpu) { - return &per_cpu(cpu_topology, cpu).book_mask; + return &cpu_topology[cpu].book_mask; } static const struct cpumask *cpu_drawer_mask(int cpu) { - return &per_cpu(cpu_topology, cpu).drawer_mask; + return &cpu_topology[cpu].drawer_mask; } static int __init early_parse_topology(char *p) diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index b83109328fec..2ed27e8eb4d4 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -355,7 +355,7 @@ static struct toptree *toptree_from_topology(void) phys = toptree_new(TOPTREE_ID_PHYS, 1); for_each_online_cpu(cpu) { - top = &per_cpu(cpu_topology, cpu); + top = &cpu_topology[cpu]; node = toptree_get_child(phys, 0); drawer = toptree_get_child(node, top->drawer_id); book = toptree_get_child(drawer, top->book_id); @@ -378,7 +378,7 @@ static void topology_add_core(struct toptree *core) int cpu; for_each_cpu(cpu, &core->mask) { - top = &per_cpu(cpu_topology, cpu); + top = &cpu_topology[cpu]; cpumask_copy(&top->thread_mask, &core->mask); cpumask_copy(&top->core_mask, &core_mc(core)->mask); cpumask_copy(&top->book_mask, &core_book(core)->mask);