From: Ingo Molnar <mingo@elte.hu>
Date: Tue, 8 Jul 2008 09:59:23 +0000 (+0200)
Subject: Merge branch 'x86/numa' into x86/devel
X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=2b4fa851b2f06fdb04cac808b57324f5e51e1578;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git

Merge branch 'x86/numa' into x86/devel

Conflicts:

	arch/x86/Kconfig
	arch/x86/kernel/e820.c
	arch/x86/kernel/efi_64.c
	arch/x86/kernel/mpparse.c
	arch/x86/kernel/setup.c
	arch/x86/kernel/setup_32.c
	arch/x86/mm/init_64.c
	include/asm-x86/proto.h

Signed-off-by: Ingo Molnar <mingo@elte.hu>
---

2b4fa851b2f06fdb04cac808b57324f5e51e1578
diff --cc arch/x86/kernel/apic_64.c
index d7406aa1c985,4fd21f7d698c..e494809fc508
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@@ -1071,13 -1087,10 +1068,13 @@@ void __cpuinit generic_processor_info(i
  		 */
  		cpu = 0;
  	}
 +	if (apicid > max_physical_apicid)
 +		max_physical_apicid = apicid;
 +
  	/* are we being called early in kernel startup? */
- 	if (x86_cpu_to_apicid_early_ptr) {
- 		u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
- 		u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
+ 	if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
+ 		u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
+ 		u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
  
  		cpu_to_apicid[cpu] = apicid;
  		bios_cpu_apicid[cpu] = apicid;
diff --cc arch/x86/kernel/nmi_64.c
index 0060e44e8989,2861b9408ac9..d62f3b66b529
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@@ -90,9 -88,9 +90,9 @@@ int __init check_nmi_watchdog(void
  	if (!atomic_read(&nmi_active))
  		return 0;
  
- 	prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
+ 	prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
  	if (!prev_nmi_count)
 -		return -1;
 +		goto error;
  
  	printk(KERN_INFO "Testing NMI watchdog ... ");
  
diff --cc arch/x86/kernel/setup.c
index 5b0de38cde48,d4eaa4eb481d..ebb0a2bcdc08
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@@ -17,12 -17,8 +17,9 @@@ unsigned int num_processors
  unsigned disabled_cpus __cpuinitdata;
  /* Processor that is doing the boot up */
  unsigned int boot_cpu_physical_apicid = -1U;
 +unsigned int max_physical_apicid;
  EXPORT_SYMBOL(boot_cpu_physical_apicid);
  
- DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
- EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
- 
  /* Bitmask of physically existing CPUs */
  physid_mask_t phys_cpu_present_map;
  #endif
@@@ -139,27 -206,175 +207,200 @@@ void __init setup_per_cpu_areas(void
  
  #endif
  
 +void __init parse_setup_data(void)
 +{
 +	struct setup_data *data;
 +	u64 pa_data;
 +
 +	if (boot_params.hdr.version < 0x0209)
 +		return;
 +	pa_data = boot_params.hdr.setup_data;
 +	while (pa_data) {
 +		data = early_ioremap(pa_data, PAGE_SIZE);
 +		switch (data->type) {
 +		case SETUP_E820_EXT:
 +			parse_e820_ext(data, pa_data);
 +			break;
 +		default:
 +			break;
 +		}
 +#ifndef CONFIG_DEBUG_BOOT_PARAMS
 +		free_early(pa_data, pa_data+sizeof(*data)+data->len);
 +#endif
 +		pa_data = data->next;
 +		early_iounmap(data, PAGE_SIZE);
 +	}
 +}
++
+ #ifdef X86_64_NUMA
+ 
+ /*
+  * Allocate node_to_cpumask_map based on number of available nodes
+  * Requires node_possible_map to be valid.
+  *
+  * Note: node_to_cpumask() is not valid until after this is done.
+  */
+ static void __init setup_node_to_cpumask_map(void)
+ {
+ 	unsigned int node, num = 0;
+ 	cpumask_t *map;
+ 
+ 	/* setup nr_node_ids if not done yet */
+ 	if (nr_node_ids == MAX_NUMNODES) {
+ 		for_each_node_mask(node, node_possible_map)
+ 			num = node;
+ 		nr_node_ids = num + 1;
+ 	}
+ 
+ 	/* allocate the map */
+ 	map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
+ 
+ 	Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
+ 		map, nr_node_ids);
+ 
+ 	/* node_to_cpumask() will now work */
+ 	node_to_cpumask_map = map;
+ }
+ 
+ void __cpuinit numa_set_node(int cpu, int node)
+ {
+ 	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
+ 
+ 	if (cpu_pda(cpu) && node != NUMA_NO_NODE)
+ 		cpu_pda(cpu)->nodenumber = node;
+ 
+ 	if (cpu_to_node_map)
+ 		cpu_to_node_map[cpu] = node;
+ 
+ 	else if (per_cpu_offset(cpu))
+ 		per_cpu(x86_cpu_to_node_map, cpu) = node;
+ 
+ 	else
+ 		Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
+ }
+ 
+ void __cpuinit numa_clear_node(int cpu)
+ {
+ 	numa_set_node(cpu, NUMA_NO_NODE);
+ }
+ 
+ #ifndef CONFIG_DEBUG_PER_CPU_MAPS
+ 
+ void __cpuinit numa_add_cpu(int cpu)
+ {
+ 	cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+ }
+ 
+ void __cpuinit numa_remove_cpu(int cpu)
+ {
+ 	cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
+ }
+ 
+ #else /* CONFIG_DEBUG_PER_CPU_MAPS */
+ 
+ /*
+  * --------- debug versions of the numa functions ---------
+  */
+ static void __cpuinit numa_set_cpumask(int cpu, int enable)
+ {
+ 	int node = cpu_to_node(cpu);
+ 	cpumask_t *mask;
+ 	char buf[64];
+ 
+ 	if (node_to_cpumask_map == NULL) {
+ 		printk(KERN_ERR "node_to_cpumask_map NULL\n");
+ 		dump_stack();
+ 		return;
+ 	}
+ 
+ 	mask = &node_to_cpumask_map[node];
+ 	if (enable)
+ 		cpu_set(cpu, *mask);
+ 	else
+ 		cpu_clear(cpu, *mask);
+ 
+ 	cpulist_scnprintf(buf, sizeof(buf), *mask);
+ 	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+ 		enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
+  }
+ 
+ void __cpuinit numa_add_cpu(int cpu)
+ {
+ 	numa_set_cpumask(cpu, 1);
+ }
+ 
+ void __cpuinit numa_remove_cpu(int cpu)
+ {
+ 	numa_set_cpumask(cpu, 0);
+ }
+ 
+ int cpu_to_node(int cpu)
+ {
+ 	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
+ 		printk(KERN_WARNING
+ 			"cpu_to_node(%d): usage too early!\n", cpu);
+ 		dump_stack();
+ 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+ 	}
+ 	return per_cpu(x86_cpu_to_node_map, cpu);
+ }
+ EXPORT_SYMBOL(cpu_to_node);
+ 
+ /*
+  * Same function as cpu_to_node() but used if called before the
+  * per_cpu areas are setup.
+  */
+ int early_cpu_to_node(int cpu)
+ {
+ 	if (early_per_cpu_ptr(x86_cpu_to_node_map))
+ 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+ 
+ 	if (!per_cpu_offset(cpu)) {
+ 		printk(KERN_WARNING
+ 			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
+ 		dump_stack();
+ 		return NUMA_NO_NODE;
+ 	}
+ 	return per_cpu(x86_cpu_to_node_map, cpu);
+ }
+ 
+ /*
+  * Returns a pointer to the bitmask of CPUs on Node 'node'.
+  */
+ cpumask_t *_node_to_cpumask_ptr(int node)
+ {
+ 	if (node_to_cpumask_map == NULL) {
+ 		printk(KERN_WARNING
+ 			"_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
+ 			node);
+ 		dump_stack();
+ 		return &cpu_online_map;
+ 	}
+ 	BUG_ON(node >= nr_node_ids);
+ 	return &node_to_cpumask_map[node];
+ }
+ EXPORT_SYMBOL(_node_to_cpumask_ptr);
+ 
+ /*
+  * Returns a bitmask of CPUs on Node 'node'.
+  */
+ cpumask_t node_to_cpumask(int node)
+ {
+ 	if (node_to_cpumask_map == NULL) {
+ 		printk(KERN_WARNING
+ 			"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
+ 		dump_stack();
+ 		return cpu_online_map;
+ 	}
+ 	BUG_ON(node >= nr_node_ids);
+ 	return node_to_cpumask_map[node];
+ }
+ EXPORT_SYMBOL(node_to_cpumask);
+ 
+ /*
+  * --------- end of debug versions of the numa functions ---------
+  */
+ 
+ #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+ 
+ #endif /* X86_64_NUMA */
diff --cc arch/x86/kernel/setup_32.c
index 7e06ecd83174,ccd5f5cdbbe6..a9b19ad24edb
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@@ -659,19 -731,11 +659,7 @@@ static void set_mca_bus(int x
  static void set_mca_bus(int x) { }
  #endif
  
- #ifdef CONFIG_NUMA
- /*
-  * In the golden day, when everything among i386 and x86_64 will be
-  * integrated, this will not live here
-  */
- void *x86_cpu_to_node_map_early_ptr;
- int x86_cpu_to_node_map_init[NR_CPUS] = {
- 	[0 ... NR_CPUS-1] = NUMA_NO_NODE
- };
- DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
- #endif
- 
 -/* Overridden in paravirt.c if CONFIG_PARAVIRT */
 -char * __init __attribute__((weak)) memory_setup(void)
 -{
 -	return machine_specific_memory_setup();
 -}
 +static void probe_roms(void);
  
  /*
   * Determine if we were loaded by an EFI loader.  If so, then we have also been
@@@ -860,24 -869,12 +848,12 @@@ void __init setup_arch(char **cmdline_p
  	relocate_initrd();
  #endif
  
 -	paravirt_post_allocator_init();
 -
 -	dmi_scan_machine();
 +	remapped_pgdat_init();
 +	sparse_init();
 +	zone_sizes_init();
  
 -	io_delay_init();
 +	paravirt_post_allocator_init();
  
- #ifdef CONFIG_X86_SMP
- 	/*
- 	 * setup to use the early static init tables during kernel startup
- 	 * X86_SMP will exclude sub-arches that don't deal well with it.
- 	 */
- 	x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
- 	x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
- #ifdef CONFIG_NUMA
- 	x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
- #endif
- #endif
- 
  #ifdef CONFIG_X86_GENERICARCH
  	generic_apic_probe();
  #endif