Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / mm / numa.c
1 /*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/sparsemem.h>
31 #include <asm/prom.h>
32 #include <asm/smp.h>
33 #include <asm/firmware.h>
34 #include <asm/paca.h>
35 #include <asm/hvcall.h>
36 #include <asm/setup.h>
37 #include <asm/vdso.h>
38
39 static int numa_enabled = 1;
40
41 static char *cmdline __initdata;
42
43 static int numa_debug;
44 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
45
46 int numa_cpu_lookup_table[NR_CPUS];
47 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
48 struct pglist_data *node_data[MAX_NUMNODES];
49
50 EXPORT_SYMBOL(numa_cpu_lookup_table);
51 EXPORT_SYMBOL(node_to_cpumask_map);
52 EXPORT_SYMBOL(node_data);
53
54 static int min_common_depth;
55 static int n_mem_addr_cells, n_mem_size_cells;
56 static int form1_affinity;
57
58 #define MAX_DISTANCE_REF_POINTS 4
59 static int distance_ref_points_depth;
60 static const unsigned int *distance_ref_points;
61 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
62
63 /*
64 * Allocate node_to_cpumask_map based on number of available nodes
65 * Requires node_possible_map to be valid.
66 *
67 * Note: cpumask_of_node() is not valid until after this is done.
68 */
69 static void __init setup_node_to_cpumask_map(void)
70 {
71 unsigned int node;
72
73 /* setup nr_node_ids if not done yet */
74 if (nr_node_ids == MAX_NUMNODES)
75 setup_nr_node_ids();
76
77 /* allocate the map */
78 for (node = 0; node < nr_node_ids; node++)
79 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
80
81 /* cpumask_of_node() will now work */
82 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
83 }
84
85 static int __init fake_numa_create_new_node(unsigned long end_pfn,
86 unsigned int *nid)
87 {
88 unsigned long long mem;
89 char *p = cmdline;
90 static unsigned int fake_nid;
91 static unsigned long long curr_boundary;
92
93 /*
94 * Modify node id, iff we started creating NUMA nodes
95 * We want to continue from where we left of the last time
96 */
97 if (fake_nid)
98 *nid = fake_nid;
99 /*
100 * In case there are no more arguments to parse, the
101 * node_id should be the same as the last fake node id
102 * (we've handled this above).
103 */
104 if (!p)
105 return 0;
106
107 mem = memparse(p, &p);
108 if (!mem)
109 return 0;
110
111 if (mem < curr_boundary)
112 return 0;
113
114 curr_boundary = mem;
115
116 if ((end_pfn << PAGE_SHIFT) > mem) {
117 /*
118 * Skip commas and spaces
119 */
120 while (*p == ',' || *p == ' ' || *p == '\t')
121 p++;
122
123 cmdline = p;
124 fake_nid++;
125 *nid = fake_nid;
126 dbg("created new fake_node with id %d\n", fake_nid);
127 return 1;
128 }
129 return 0;
130 }
131
132 /*
133 * get_node_active_region - Return active region containing pfn
134 * Active range returned is empty if none found.
135 * @pfn: The page to return the region for
136 * @node_ar: Returned set to the active region containing @pfn
137 */
138 static void __init get_node_active_region(unsigned long pfn,
139 struct node_active_region *node_ar)
140 {
141 unsigned long start_pfn, end_pfn;
142 int i, nid;
143
144 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
145 if (pfn >= start_pfn && pfn < end_pfn) {
146 node_ar->nid = nid;
147 node_ar->start_pfn = start_pfn;
148 node_ar->end_pfn = end_pfn;
149 break;
150 }
151 }
152 }
153
154 static void map_cpu_to_node(int cpu, int node)
155 {
156 numa_cpu_lookup_table[cpu] = node;
157
158 dbg("adding cpu %d to node %d\n", cpu, node);
159
160 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
161 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
162 }
163
164 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
165 static void unmap_cpu_from_node(unsigned long cpu)
166 {
167 int node = numa_cpu_lookup_table[cpu];
168
169 dbg("removing cpu %lu from node %d\n", cpu, node);
170
171 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
172 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
173 } else {
174 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
175 cpu, node);
176 }
177 }
178 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
179
180 /* must hold reference to node during call */
181 static const int *of_get_associativity(struct device_node *dev)
182 {
183 return of_get_property(dev, "ibm,associativity", NULL);
184 }
185
186 /*
187 * Returns the property linux,drconf-usable-memory if
188 * it exists (the property exists only in kexec/kdump kernels,
189 * added by kexec-tools)
190 */
191 static const u32 *of_get_usable_memory(struct device_node *memory)
192 {
193 const u32 *prop;
194 u32 len;
195 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
196 if (!prop || len < sizeof(unsigned int))
197 return 0;
198 return prop;
199 }
200
201 int __node_distance(int a, int b)
202 {
203 int i;
204 int distance = LOCAL_DISTANCE;
205
206 if (!form1_affinity)
207 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
208
209 for (i = 0; i < distance_ref_points_depth; i++) {
210 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
211 break;
212
213 /* Double the distance for each NUMA level */
214 distance *= 2;
215 }
216
217 return distance;
218 }
219
220 static void initialize_distance_lookup_table(int nid,
221 const unsigned int *associativity)
222 {
223 int i;
224
225 if (!form1_affinity)
226 return;
227
228 for (i = 0; i < distance_ref_points_depth; i++) {
229 distance_lookup_table[nid][i] =
230 associativity[distance_ref_points[i]];
231 }
232 }
233
234 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
235 * info is found.
236 */
237 static int associativity_to_nid(const unsigned int *associativity)
238 {
239 int nid = -1;
240
241 if (min_common_depth == -1)
242 goto out;
243
244 if (associativity[0] >= min_common_depth)
245 nid = associativity[min_common_depth];
246
247 /* POWER4 LPAR uses 0xffff as invalid node */
248 if (nid == 0xffff || nid >= MAX_NUMNODES)
249 nid = -1;
250
251 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
252 initialize_distance_lookup_table(nid, associativity);
253
254 out:
255 return nid;
256 }
257
258 /* Returns the nid associated with the given device tree node,
259 * or -1 if not found.
260 */
261 static int of_node_to_nid_single(struct device_node *device)
262 {
263 int nid = -1;
264 const unsigned int *tmp;
265
266 tmp = of_get_associativity(device);
267 if (tmp)
268 nid = associativity_to_nid(tmp);
269 return nid;
270 }
271
272 /* Walk the device tree upwards, looking for an associativity id */
273 int of_node_to_nid(struct device_node *device)
274 {
275 struct device_node *tmp;
276 int nid = -1;
277
278 of_node_get(device);
279 while (device) {
280 nid = of_node_to_nid_single(device);
281 if (nid != -1)
282 break;
283
284 tmp = device;
285 device = of_get_parent(tmp);
286 of_node_put(tmp);
287 }
288 of_node_put(device);
289
290 return nid;
291 }
292 EXPORT_SYMBOL_GPL(of_node_to_nid);
293
294 static int __init find_min_common_depth(void)
295 {
296 int depth;
297 struct device_node *root;
298
299 if (firmware_has_feature(FW_FEATURE_OPAL))
300 root = of_find_node_by_path("/ibm,opal");
301 else
302 root = of_find_node_by_path("/rtas");
303 if (!root)
304 root = of_find_node_by_path("/");
305
306 /*
307 * This property is a set of 32-bit integers, each representing
308 * an index into the ibm,associativity nodes.
309 *
310 * With form 0 affinity the first integer is for an SMP configuration
311 * (should be all 0's) and the second is for a normal NUMA
312 * configuration. We have only one level of NUMA.
313 *
314 * With form 1 affinity the first integer is the most significant
315 * NUMA boundary and the following are progressively less significant
316 * boundaries. There can be more than one level of NUMA.
317 */
318 distance_ref_points = of_get_property(root,
319 "ibm,associativity-reference-points",
320 &distance_ref_points_depth);
321
322 if (!distance_ref_points) {
323 dbg("NUMA: ibm,associativity-reference-points not found.\n");
324 goto err;
325 }
326
327 distance_ref_points_depth /= sizeof(int);
328
329 if (firmware_has_feature(FW_FEATURE_OPAL) ||
330 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
331 dbg("Using form 1 affinity\n");
332 form1_affinity = 1;
333 }
334
335 if (form1_affinity) {
336 depth = distance_ref_points[0];
337 } else {
338 if (distance_ref_points_depth < 2) {
339 printk(KERN_WARNING "NUMA: "
340 "short ibm,associativity-reference-points\n");
341 goto err;
342 }
343
344 depth = distance_ref_points[1];
345 }
346
347 /*
348 * Warn and cap if the hardware supports more than
349 * MAX_DISTANCE_REF_POINTS domains.
350 */
351 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
352 printk(KERN_WARNING "NUMA: distance array capped at "
353 "%d entries\n", MAX_DISTANCE_REF_POINTS);
354 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
355 }
356
357 of_node_put(root);
358 return depth;
359
360 err:
361 of_node_put(root);
362 return -1;
363 }
364
365 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
366 {
367 struct device_node *memory = NULL;
368
369 memory = of_find_node_by_type(memory, "memory");
370 if (!memory)
371 panic("numa.c: No memory nodes found!");
372
373 *n_addr_cells = of_n_addr_cells(memory);
374 *n_size_cells = of_n_size_cells(memory);
375 of_node_put(memory);
376 }
377
378 static unsigned long read_n_cells(int n, const unsigned int **buf)
379 {
380 unsigned long result = 0;
381
382 while (n--) {
383 result = (result << 32) | **buf;
384 (*buf)++;
385 }
386 return result;
387 }
388
389 /*
390 * Read the next memblock list entry from the ibm,dynamic-memory property
391 * and return the information in the provided of_drconf_cell structure.
392 */
393 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
394 {
395 const u32 *cp;
396
397 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
398
399 cp = *cellp;
400 drmem->drc_index = cp[0];
401 drmem->reserved = cp[1];
402 drmem->aa_index = cp[2];
403 drmem->flags = cp[3];
404
405 *cellp = cp + 4;
406 }
407
408 /*
409 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
410 *
411 * The layout of the ibm,dynamic-memory property is a number N of memblock
412 * list entries followed by N memblock list entries. Each memblock list entry
413 * contains information as laid out in the of_drconf_cell struct above.
414 */
415 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
416 {
417 const u32 *prop;
418 u32 len, entries;
419
420 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
421 if (!prop || len < sizeof(unsigned int))
422 return 0;
423
424 entries = *prop++;
425
426 /* Now that we know the number of entries, revalidate the size
427 * of the property read in to ensure we have everything
428 */
429 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
430 return 0;
431
432 *dm = prop;
433 return entries;
434 }
435
436 /*
437 * Retrieve and validate the ibm,lmb-size property for drconf memory
438 * from the device tree.
439 */
440 static u64 of_get_lmb_size(struct device_node *memory)
441 {
442 const u32 *prop;
443 u32 len;
444
445 prop = of_get_property(memory, "ibm,lmb-size", &len);
446 if (!prop || len < sizeof(unsigned int))
447 return 0;
448
449 return read_n_cells(n_mem_size_cells, &prop);
450 }
451
452 struct assoc_arrays {
453 u32 n_arrays;
454 u32 array_sz;
455 const u32 *arrays;
456 };
457
458 /*
459 * Retrieve and validate the list of associativity arrays for drconf
460 * memory from the ibm,associativity-lookup-arrays property of the
461 * device tree..
462 *
463 * The layout of the ibm,associativity-lookup-arrays property is a number N
464 * indicating the number of associativity arrays, followed by a number M
465 * indicating the size of each associativity array, followed by a list
466 * of N associativity arrays.
467 */
468 static int of_get_assoc_arrays(struct device_node *memory,
469 struct assoc_arrays *aa)
470 {
471 const u32 *prop;
472 u32 len;
473
474 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
475 if (!prop || len < 2 * sizeof(unsigned int))
476 return -1;
477
478 aa->n_arrays = *prop++;
479 aa->array_sz = *prop++;
480
481 /* Now that we know the number of arrays and size of each array,
482 * revalidate the size of the property read in.
483 */
484 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
485 return -1;
486
487 aa->arrays = prop;
488 return 0;
489 }
490
491 /*
492 * This is like of_node_to_nid_single() for memory represented in the
493 * ibm,dynamic-reconfiguration-memory node.
494 */
495 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
496 struct assoc_arrays *aa)
497 {
498 int default_nid = 0;
499 int nid = default_nid;
500 int index;
501
502 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
503 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
504 drmem->aa_index < aa->n_arrays) {
505 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
506 nid = aa->arrays[index];
507
508 if (nid == 0xffff || nid >= MAX_NUMNODES)
509 nid = default_nid;
510 }
511
512 return nid;
513 }
514
515 /*
516 * Figure out to which domain a cpu belongs and stick it there.
517 * Return the id of the domain used.
518 */
519 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
520 {
521 int nid = 0;
522 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
523
524 if (!cpu) {
525 WARN_ON(1);
526 goto out;
527 }
528
529 nid = of_node_to_nid_single(cpu);
530
531 if (nid < 0 || !node_online(nid))
532 nid = first_online_node;
533 out:
534 map_cpu_to_node(lcpu, nid);
535
536 of_node_put(cpu);
537
538 return nid;
539 }
540
541 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
542 unsigned long action,
543 void *hcpu)
544 {
545 unsigned long lcpu = (unsigned long)hcpu;
546 int ret = NOTIFY_DONE;
547
548 switch (action) {
549 case CPU_UP_PREPARE:
550 case CPU_UP_PREPARE_FROZEN:
551 numa_setup_cpu(lcpu);
552 ret = NOTIFY_OK;
553 break;
554 #ifdef CONFIG_HOTPLUG_CPU
555 case CPU_DEAD:
556 case CPU_DEAD_FROZEN:
557 case CPU_UP_CANCELED:
558 case CPU_UP_CANCELED_FROZEN:
559 unmap_cpu_from_node(lcpu);
560 break;
561 ret = NOTIFY_OK;
562 #endif
563 }
564 return ret;
565 }
566
567 /*
568 * Check and possibly modify a memory region to enforce the memory limit.
569 *
570 * Returns the size the region should have to enforce the memory limit.
571 * This will either be the original value of size, a truncated value,
572 * or zero. If the returned value of size is 0 the region should be
573 * discarded as it lies wholly above the memory limit.
574 */
575 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
576 unsigned long size)
577 {
578 /*
579 * We use memblock_end_of_DRAM() in here instead of memory_limit because
580 * we've already adjusted it for the limit and it takes care of
581 * having memory holes below the limit. Also, in the case of
582 * iommu_is_off, memory_limit is not set but is implicitly enforced.
583 */
584
585 if (start + size <= memblock_end_of_DRAM())
586 return size;
587
588 if (start >= memblock_end_of_DRAM())
589 return 0;
590
591 return memblock_end_of_DRAM() - start;
592 }
593
594 /*
595 * Reads the counter for a given entry in
596 * linux,drconf-usable-memory property
597 */
598 static inline int __init read_usm_ranges(const u32 **usm)
599 {
600 /*
601 * For each lmb in ibm,dynamic-memory a corresponding
602 * entry in linux,drconf-usable-memory property contains
603 * a counter followed by that many (base, size) duple.
604 * read the counter from linux,drconf-usable-memory
605 */
606 return read_n_cells(n_mem_size_cells, usm);
607 }
608
609 /*
610 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
611 * node. This assumes n_mem_{addr,size}_cells have been set.
612 */
613 static void __init parse_drconf_memory(struct device_node *memory)
614 {
615 const u32 *uninitialized_var(dm), *usm;
616 unsigned int n, rc, ranges, is_kexec_kdump = 0;
617 unsigned long lmb_size, base, size, sz;
618 int nid;
619 struct assoc_arrays aa = { .arrays = NULL };
620
621 n = of_get_drconf_memory(memory, &dm);
622 if (!n)
623 return;
624
625 lmb_size = of_get_lmb_size(memory);
626 if (!lmb_size)
627 return;
628
629 rc = of_get_assoc_arrays(memory, &aa);
630 if (rc)
631 return;
632
633 /* check if this is a kexec/kdump kernel */
634 usm = of_get_usable_memory(memory);
635 if (usm != NULL)
636 is_kexec_kdump = 1;
637
638 for (; n != 0; --n) {
639 struct of_drconf_cell drmem;
640
641 read_drconf_cell(&drmem, &dm);
642
643 /* skip this block if the reserved bit is set in flags (0x80)
644 or if the block is not assigned to this partition (0x8) */
645 if ((drmem.flags & DRCONF_MEM_RESERVED)
646 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
647 continue;
648
649 base = drmem.base_addr;
650 size = lmb_size;
651 ranges = 1;
652
653 if (is_kexec_kdump) {
654 ranges = read_usm_ranges(&usm);
655 if (!ranges) /* there are no (base, size) duple */
656 continue;
657 }
658 do {
659 if (is_kexec_kdump) {
660 base = read_n_cells(n_mem_addr_cells, &usm);
661 size = read_n_cells(n_mem_size_cells, &usm);
662 }
663 nid = of_drconf_to_nid_single(&drmem, &aa);
664 fake_numa_create_new_node(
665 ((base + size) >> PAGE_SHIFT),
666 &nid);
667 node_set_online(nid);
668 sz = numa_enforce_memory_limit(base, size);
669 if (sz)
670 memblock_set_node(base, sz, nid);
671 } while (--ranges);
672 }
673 }
674
675 static int __init parse_numa_properties(void)
676 {
677 struct device_node *memory;
678 int default_nid = 0;
679 unsigned long i;
680
681 if (numa_enabled == 0) {
682 printk(KERN_WARNING "NUMA disabled by user\n");
683 return -1;
684 }
685
686 min_common_depth = find_min_common_depth();
687
688 if (min_common_depth < 0)
689 return min_common_depth;
690
691 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
692
693 /*
694 * Even though we connect cpus to numa domains later in SMP
695 * init, we need to know the node ids now. This is because
696 * each node to be onlined must have NODE_DATA etc backing it.
697 */
698 for_each_present_cpu(i) {
699 struct device_node *cpu;
700 int nid;
701
702 cpu = of_get_cpu_node(i, NULL);
703 BUG_ON(!cpu);
704 nid = of_node_to_nid_single(cpu);
705 of_node_put(cpu);
706
707 /*
708 * Don't fall back to default_nid yet -- we will plug
709 * cpus into nodes once the memory scan has discovered
710 * the topology.
711 */
712 if (nid < 0)
713 continue;
714 node_set_online(nid);
715 }
716
717 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
718
719 for_each_node_by_type(memory, "memory") {
720 unsigned long start;
721 unsigned long size;
722 int nid;
723 int ranges;
724 const unsigned int *memcell_buf;
725 unsigned int len;
726
727 memcell_buf = of_get_property(memory,
728 "linux,usable-memory", &len);
729 if (!memcell_buf || len <= 0)
730 memcell_buf = of_get_property(memory, "reg", &len);
731 if (!memcell_buf || len <= 0)
732 continue;
733
734 /* ranges in cell */
735 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
736 new_range:
737 /* these are order-sensitive, and modify the buffer pointer */
738 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
739 size = read_n_cells(n_mem_size_cells, &memcell_buf);
740
741 /*
742 * Assumption: either all memory nodes or none will
743 * have associativity properties. If none, then
744 * everything goes to default_nid.
745 */
746 nid = of_node_to_nid_single(memory);
747 if (nid < 0)
748 nid = default_nid;
749
750 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
751 node_set_online(nid);
752
753 if (!(size = numa_enforce_memory_limit(start, size))) {
754 if (--ranges)
755 goto new_range;
756 else
757 continue;
758 }
759
760 memblock_set_node(start, size, nid);
761
762 if (--ranges)
763 goto new_range;
764 }
765
766 /*
767 * Now do the same thing for each MEMBLOCK listed in the
768 * ibm,dynamic-memory property in the
769 * ibm,dynamic-reconfiguration-memory node.
770 */
771 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
772 if (memory)
773 parse_drconf_memory(memory);
774
775 return 0;
776 }
777
778 static void __init setup_nonnuma(void)
779 {
780 unsigned long top_of_ram = memblock_end_of_DRAM();
781 unsigned long total_ram = memblock_phys_mem_size();
782 unsigned long start_pfn, end_pfn;
783 unsigned int nid = 0;
784 struct memblock_region *reg;
785
786 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
787 top_of_ram, total_ram);
788 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
789 (top_of_ram - total_ram) >> 20);
790
791 for_each_memblock(memory, reg) {
792 start_pfn = memblock_region_memory_base_pfn(reg);
793 end_pfn = memblock_region_memory_end_pfn(reg);
794
795 fake_numa_create_new_node(end_pfn, &nid);
796 memblock_set_node(PFN_PHYS(start_pfn),
797 PFN_PHYS(end_pfn - start_pfn), nid);
798 node_set_online(nid);
799 }
800 }
801
802 void __init dump_numa_cpu_topology(void)
803 {
804 unsigned int node;
805 unsigned int cpu, count;
806
807 if (min_common_depth == -1 || !numa_enabled)
808 return;
809
810 for_each_online_node(node) {
811 printk(KERN_DEBUG "Node %d CPUs:", node);
812
813 count = 0;
814 /*
815 * If we used a CPU iterator here we would miss printing
816 * the holes in the cpumap.
817 */
818 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
819 if (cpumask_test_cpu(cpu,
820 node_to_cpumask_map[node])) {
821 if (count == 0)
822 printk(" %u", cpu);
823 ++count;
824 } else {
825 if (count > 1)
826 printk("-%u", cpu - 1);
827 count = 0;
828 }
829 }
830
831 if (count > 1)
832 printk("-%u", nr_cpu_ids - 1);
833 printk("\n");
834 }
835 }
836
837 static void __init dump_numa_memory_topology(void)
838 {
839 unsigned int node;
840 unsigned int count;
841
842 if (min_common_depth == -1 || !numa_enabled)
843 return;
844
845 for_each_online_node(node) {
846 unsigned long i;
847
848 printk(KERN_DEBUG "Node %d Memory:", node);
849
850 count = 0;
851
852 for (i = 0; i < memblock_end_of_DRAM();
853 i += (1 << SECTION_SIZE_BITS)) {
854 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
855 if (count == 0)
856 printk(" 0x%lx", i);
857 ++count;
858 } else {
859 if (count > 0)
860 printk("-0x%lx", i);
861 count = 0;
862 }
863 }
864
865 if (count > 0)
866 printk("-0x%lx", i);
867 printk("\n");
868 }
869 }
870
871 /*
872 * Allocate some memory, satisfying the memblock or bootmem allocator where
873 * required. nid is the preferred node and end is the physical address of
874 * the highest address in the node.
875 *
876 * Returns the virtual address of the memory.
877 */
878 static void __init *careful_zallocation(int nid, unsigned long size,
879 unsigned long align,
880 unsigned long end_pfn)
881 {
882 void *ret;
883 int new_nid;
884 unsigned long ret_paddr;
885
886 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
887
888 /* retry over all memory */
889 if (!ret_paddr)
890 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
891
892 if (!ret_paddr)
893 panic("numa.c: cannot allocate %lu bytes for node %d",
894 size, nid);
895
896 ret = __va(ret_paddr);
897
898 /*
899 * We initialize the nodes in numeric order: 0, 1, 2...
900 * and hand over control from the MEMBLOCK allocator to the
901 * bootmem allocator. If this function is called for
902 * node 5, then we know that all nodes <5 are using the
903 * bootmem allocator instead of the MEMBLOCK allocator.
904 *
905 * So, check the nid from which this allocation came
906 * and double check to see if we need to use bootmem
907 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
908 * since it would be useless.
909 */
910 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
911 if (new_nid < nid) {
912 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
913 size, align, 0);
914
915 dbg("alloc_bootmem %p %lx\n", ret, size);
916 }
917
918 memset(ret, 0, size);
919 return ret;
920 }
921
922 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
923 .notifier_call = cpu_numa_callback,
924 .priority = 1 /* Must run before sched domains notifier. */
925 };
926
927 static void __init mark_reserved_regions_for_nid(int nid)
928 {
929 struct pglist_data *node = NODE_DATA(nid);
930 struct memblock_region *reg;
931
932 for_each_memblock(reserved, reg) {
933 unsigned long physbase = reg->base;
934 unsigned long size = reg->size;
935 unsigned long start_pfn = physbase >> PAGE_SHIFT;
936 unsigned long end_pfn = PFN_UP(physbase + size);
937 struct node_active_region node_ar;
938 unsigned long node_end_pfn = node->node_start_pfn +
939 node->node_spanned_pages;
940
941 /*
942 * Check to make sure that this memblock.reserved area is
943 * within the bounds of the node that we care about.
944 * Checking the nid of the start and end points is not
945 * sufficient because the reserved area could span the
946 * entire node.
947 */
948 if (end_pfn <= node->node_start_pfn ||
949 start_pfn >= node_end_pfn)
950 continue;
951
952 get_node_active_region(start_pfn, &node_ar);
953 while (start_pfn < end_pfn &&
954 node_ar.start_pfn < node_ar.end_pfn) {
955 unsigned long reserve_size = size;
956 /*
957 * if reserved region extends past active region
958 * then trim size to active region
959 */
960 if (end_pfn > node_ar.end_pfn)
961 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
962 - physbase;
963 /*
964 * Only worry about *this* node, others may not
965 * yet have valid NODE_DATA().
966 */
967 if (node_ar.nid == nid) {
968 dbg("reserve_bootmem %lx %lx nid=%d\n",
969 physbase, reserve_size, node_ar.nid);
970 reserve_bootmem_node(NODE_DATA(node_ar.nid),
971 physbase, reserve_size,
972 BOOTMEM_DEFAULT);
973 }
974 /*
975 * if reserved region is contained in the active region
976 * then done.
977 */
978 if (end_pfn <= node_ar.end_pfn)
979 break;
980
981 /*
982 * reserved region extends past the active region
983 * get next active region that contains this
984 * reserved region
985 */
986 start_pfn = node_ar.end_pfn;
987 physbase = start_pfn << PAGE_SHIFT;
988 size = size - reserve_size;
989 get_node_active_region(start_pfn, &node_ar);
990 }
991 }
992 }
993
994
995 void __init do_init_bootmem(void)
996 {
997 int nid;
998
999 min_low_pfn = 0;
1000 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1001 max_pfn = max_low_pfn;
1002
1003 if (parse_numa_properties())
1004 setup_nonnuma();
1005 else
1006 dump_numa_memory_topology();
1007
1008 for_each_online_node(nid) {
1009 unsigned long start_pfn, end_pfn;
1010 void *bootmem_vaddr;
1011 unsigned long bootmap_pages;
1012
1013 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1014
1015 /*
1016 * Allocate the node structure node local if possible
1017 *
1018 * Be careful moving this around, as it relies on all
1019 * previous nodes' bootmem to be initialized and have
1020 * all reserved areas marked.
1021 */
1022 NODE_DATA(nid) = careful_zallocation(nid,
1023 sizeof(struct pglist_data),
1024 SMP_CACHE_BYTES, end_pfn);
1025
1026 dbg("node %d\n", nid);
1027 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1028
1029 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1030 NODE_DATA(nid)->node_start_pfn = start_pfn;
1031 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1032
1033 if (NODE_DATA(nid)->node_spanned_pages == 0)
1034 continue;
1035
1036 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1037 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1038
1039 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1040 bootmem_vaddr = careful_zallocation(nid,
1041 bootmap_pages << PAGE_SHIFT,
1042 PAGE_SIZE, end_pfn);
1043
1044 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1045
1046 init_bootmem_node(NODE_DATA(nid),
1047 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1048 start_pfn, end_pfn);
1049
1050 free_bootmem_with_active_regions(nid, end_pfn);
1051 /*
1052 * Be very careful about moving this around. Future
1053 * calls to careful_zallocation() depend on this getting
1054 * done correctly.
1055 */
1056 mark_reserved_regions_for_nid(nid);
1057 sparse_memory_present_with_active_regions(nid);
1058 }
1059
1060 init_bootmem_done = 1;
1061
1062 /*
1063 * Now bootmem is initialised we can create the node to cpumask
1064 * lookup tables and setup the cpu callback to populate them.
1065 */
1066 setup_node_to_cpumask_map();
1067
1068 register_cpu_notifier(&ppc64_numa_nb);
1069 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1070 (void *)(unsigned long)boot_cpuid);
1071 }
1072
1073 void __init paging_init(void)
1074 {
1075 unsigned long max_zone_pfns[MAX_NR_ZONES];
1076 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1077 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1078 free_area_init_nodes(max_zone_pfns);
1079 }
1080
1081 static int __init early_numa(char *p)
1082 {
1083 if (!p)
1084 return 0;
1085
1086 if (strstr(p, "off"))
1087 numa_enabled = 0;
1088
1089 if (strstr(p, "debug"))
1090 numa_debug = 1;
1091
1092 p = strstr(p, "fake=");
1093 if (p)
1094 cmdline = p + strlen("fake=");
1095
1096 return 0;
1097 }
1098 early_param("numa", early_numa);
1099
1100 #ifdef CONFIG_MEMORY_HOTPLUG
1101 /*
1102 * Find the node associated with a hot added memory section for
1103 * memory represented in the device tree by the property
1104 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1105 */
1106 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1107 unsigned long scn_addr)
1108 {
1109 const u32 *dm;
1110 unsigned int drconf_cell_cnt, rc;
1111 unsigned long lmb_size;
1112 struct assoc_arrays aa;
1113 int nid = -1;
1114
1115 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1116 if (!drconf_cell_cnt)
1117 return -1;
1118
1119 lmb_size = of_get_lmb_size(memory);
1120 if (!lmb_size)
1121 return -1;
1122
1123 rc = of_get_assoc_arrays(memory, &aa);
1124 if (rc)
1125 return -1;
1126
1127 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1128 struct of_drconf_cell drmem;
1129
1130 read_drconf_cell(&drmem, &dm);
1131
1132 /* skip this block if it is reserved or not assigned to
1133 * this partition */
1134 if ((drmem.flags & DRCONF_MEM_RESERVED)
1135 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1136 continue;
1137
1138 if ((scn_addr < drmem.base_addr)
1139 || (scn_addr >= (drmem.base_addr + lmb_size)))
1140 continue;
1141
1142 nid = of_drconf_to_nid_single(&drmem, &aa);
1143 break;
1144 }
1145
1146 return nid;
1147 }
1148
1149 /*
1150 * Find the node associated with a hot added memory section for memory
1151 * represented in the device tree as a node (i.e. memory@XXXX) for
1152 * each memblock.
1153 */
1154 int hot_add_node_scn_to_nid(unsigned long scn_addr)
1155 {
1156 struct device_node *memory;
1157 int nid = -1;
1158
1159 for_each_node_by_type(memory, "memory") {
1160 unsigned long start, size;
1161 int ranges;
1162 const unsigned int *memcell_buf;
1163 unsigned int len;
1164
1165 memcell_buf = of_get_property(memory, "reg", &len);
1166 if (!memcell_buf || len <= 0)
1167 continue;
1168
1169 /* ranges in cell */
1170 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1171
1172 while (ranges--) {
1173 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1174 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1175
1176 if ((scn_addr < start) || (scn_addr >= (start + size)))
1177 continue;
1178
1179 nid = of_node_to_nid_single(memory);
1180 break;
1181 }
1182
1183 if (nid >= 0)
1184 break;
1185 }
1186
1187 of_node_put(memory);
1188
1189 return nid;
1190 }
1191
1192 /*
1193 * Find the node associated with a hot added memory section. Section
1194 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1195 * sections are fully contained within a single MEMBLOCK.
1196 */
1197 int hot_add_scn_to_nid(unsigned long scn_addr)
1198 {
1199 struct device_node *memory = NULL;
1200 int nid, found = 0;
1201
1202 if (!numa_enabled || (min_common_depth < 0))
1203 return first_online_node;
1204
1205 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1206 if (memory) {
1207 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1208 of_node_put(memory);
1209 } else {
1210 nid = hot_add_node_scn_to_nid(scn_addr);
1211 }
1212
1213 if (nid < 0 || !node_online(nid))
1214 nid = first_online_node;
1215
1216 if (NODE_DATA(nid)->node_spanned_pages)
1217 return nid;
1218
1219 for_each_online_node(nid) {
1220 if (NODE_DATA(nid)->node_spanned_pages) {
1221 found = 1;
1222 break;
1223 }
1224 }
1225
1226 BUG_ON(!found);
1227 return nid;
1228 }
1229
1230 static u64 hot_add_drconf_memory_max(void)
1231 {
1232 struct device_node *memory = NULL;
1233 unsigned int drconf_cell_cnt = 0;
1234 u64 lmb_size = 0;
1235 const u32 *dm = 0;
1236
1237 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1238 if (memory) {
1239 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1240 lmb_size = of_get_lmb_size(memory);
1241 of_node_put(memory);
1242 }
1243 return lmb_size * drconf_cell_cnt;
1244 }
1245
1246 /*
1247 * memory_hotplug_max - return max address of memory that may be added
1248 *
1249 * This is currently only used on systems that support drconfig memory
1250 * hotplug.
1251 */
1252 u64 memory_hotplug_max(void)
1253 {
1254 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1255 }
1256 #endif /* CONFIG_MEMORY_HOTPLUG */
1257
1258 /* Virtual Processor Home Node (VPHN) support */
1259 #ifdef CONFIG_PPC_SPLPAR
1260 struct topology_update_data {
1261 struct topology_update_data *next;
1262 unsigned int cpu;
1263 int old_nid;
1264 int new_nid;
1265 };
1266
1267 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1268 static cpumask_t cpu_associativity_changes_mask;
1269 static int vphn_enabled;
1270 static int prrn_enabled;
1271 static void reset_topology_timer(void);
1272
1273 /*
1274 * Store the current values of the associativity change counters in the
1275 * hypervisor.
1276 */
1277 static void setup_cpu_associativity_change_counters(void)
1278 {
1279 int cpu;
1280
1281 /* The VPHN feature supports a maximum of 8 reference points */
1282 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1283
1284 for_each_possible_cpu(cpu) {
1285 int i;
1286 u8 *counts = vphn_cpu_change_counts[cpu];
1287 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1288
1289 for (i = 0; i < distance_ref_points_depth; i++)
1290 counts[i] = hypervisor_counts[i];
1291 }
1292 }
1293
1294 /*
1295 * The hypervisor maintains a set of 8 associativity change counters in
1296 * the VPA of each cpu that correspond to the associativity levels in the
1297 * ibm,associativity-reference-points property. When an associativity
1298 * level changes, the corresponding counter is incremented.
1299 *
1300 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1301 * node associativity levels have changed.
1302 *
1303 * Returns the number of cpus with unhandled associativity changes.
1304 */
1305 static int update_cpu_associativity_changes_mask(void)
1306 {
1307 int cpu;
1308 cpumask_t *changes = &cpu_associativity_changes_mask;
1309
1310 for_each_possible_cpu(cpu) {
1311 int i, changed = 0;
1312 u8 *counts = vphn_cpu_change_counts[cpu];
1313 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1314
1315 for (i = 0; i < distance_ref_points_depth; i++) {
1316 if (hypervisor_counts[i] != counts[i]) {
1317 counts[i] = hypervisor_counts[i];
1318 changed = 1;
1319 }
1320 }
1321 if (changed) {
1322 cpumask_set_cpu(cpu, changes);
1323 }
1324 }
1325
1326 return cpumask_weight(changes);
1327 }
1328
1329 /*
1330 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1331 * the complete property we have to add the length in the first cell.
1332 */
1333 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1334
1335 /*
1336 * Convert the associativity domain numbers returned from the hypervisor
1337 * to the sequence they would appear in the ibm,associativity property.
1338 */
1339 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1340 {
1341 int i, nr_assoc_doms = 0;
1342 const u16 *field = (const u16*) packed;
1343
1344 #define VPHN_FIELD_UNUSED (0xffff)
1345 #define VPHN_FIELD_MSB (0x8000)
1346 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1347
1348 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1349 if (*field == VPHN_FIELD_UNUSED) {
1350 /* All significant fields processed, and remaining
1351 * fields contain the reserved value of all 1's.
1352 * Just store them.
1353 */
1354 unpacked[i] = *((u32*)field);
1355 field += 2;
1356 } else if (*field & VPHN_FIELD_MSB) {
1357 /* Data is in the lower 15 bits of this field */
1358 unpacked[i] = *field & VPHN_FIELD_MASK;
1359 field++;
1360 nr_assoc_doms++;
1361 } else {
1362 /* Data is in the lower 15 bits of this field
1363 * concatenated with the next 16 bit field
1364 */
1365 unpacked[i] = *((u32*)field);
1366 field += 2;
1367 nr_assoc_doms++;
1368 }
1369 }
1370
1371 /* The first cell contains the length of the property */
1372 unpacked[0] = nr_assoc_doms;
1373
1374 return nr_assoc_doms;
1375 }
1376
1377 /*
1378 * Retrieve the new associativity information for a virtual processor's
1379 * home node.
1380 */
1381 static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1382 {
1383 long rc;
1384 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1385 u64 flags = 1;
1386 int hwcpu = get_hard_smp_processor_id(cpu);
1387
1388 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1389 vphn_unpack_associativity(retbuf, associativity);
1390
1391 return rc;
1392 }
1393
1394 static long vphn_get_associativity(unsigned long cpu,
1395 unsigned int *associativity)
1396 {
1397 long rc;
1398
1399 rc = hcall_vphn(cpu, associativity);
1400
1401 switch (rc) {
1402 case H_FUNCTION:
1403 printk(KERN_INFO
1404 "VPHN is not supported. Disabling polling...\n");
1405 stop_topology_update();
1406 break;
1407 case H_HARDWARE:
1408 printk(KERN_ERR
1409 "hcall_vphn() experienced a hardware fault "
1410 "preventing VPHN. Disabling polling...\n");
1411 stop_topology_update();
1412 }
1413
1414 return rc;
1415 }
1416
1417 /*
1418 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1419 * characteristics change. This function doesn't perform any locking and is
1420 * only safe to call from stop_machine().
1421 */
1422 static int update_cpu_topology(void *data)
1423 {
1424 struct topology_update_data *update;
1425 unsigned long cpu;
1426
1427 if (!data)
1428 return -EINVAL;
1429
1430 cpu = get_cpu();
1431
1432 for (update = data; update; update = update->next) {
1433 if (cpu != update->cpu)
1434 continue;
1435
1436 unregister_cpu_under_node(update->cpu, update->old_nid);
1437 unmap_cpu_from_node(update->cpu);
1438 map_cpu_to_node(update->cpu, update->new_nid);
1439 vdso_getcpu_init();
1440 register_cpu_under_node(update->cpu, update->new_nid);
1441 }
1442
1443 return 0;
1444 }
1445
1446 /*
1447 * Update the node maps and sysfs entries for each cpu whose home node
1448 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1449 */
1450 int arch_update_cpu_topology(void)
1451 {
1452 unsigned int cpu, changed = 0;
1453 struct topology_update_data *updates, *ud;
1454 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1455 cpumask_t updated_cpus;
1456 struct device *dev;
1457 int weight, i = 0;
1458
1459 weight = cpumask_weight(&cpu_associativity_changes_mask);
1460 if (!weight)
1461 return 0;
1462
1463 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1464 if (!updates)
1465 return 0;
1466
1467 cpumask_clear(&updated_cpus);
1468
1469 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1470 ud = &updates[i++];
1471 ud->cpu = cpu;
1472 vphn_get_associativity(cpu, associativity);
1473 ud->new_nid = associativity_to_nid(associativity);
1474
1475 if (ud->new_nid < 0 || !node_online(ud->new_nid))
1476 ud->new_nid = first_online_node;
1477
1478 ud->old_nid = numa_cpu_lookup_table[cpu];
1479 cpumask_set_cpu(cpu, &updated_cpus);
1480
1481 if (i < weight)
1482 ud->next = &updates[i];
1483 }
1484
1485 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1486
1487 for (ud = &updates[0]; ud; ud = ud->next) {
1488 dev = get_cpu_device(ud->cpu);
1489 if (dev)
1490 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1491 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1492 changed = 1;
1493 }
1494
1495 kfree(updates);
1496 return changed;
1497 }
1498
1499 static void topology_work_fn(struct work_struct *work)
1500 {
1501 rebuild_sched_domains();
1502 }
1503 static DECLARE_WORK(topology_work, topology_work_fn);
1504
1505 void topology_schedule_update(void)
1506 {
1507 schedule_work(&topology_work);
1508 }
1509
1510 static void topology_timer_fn(unsigned long ignored)
1511 {
1512 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1513 topology_schedule_update();
1514 else if (vphn_enabled) {
1515 if (update_cpu_associativity_changes_mask() > 0)
1516 topology_schedule_update();
1517 reset_topology_timer();
1518 }
1519 }
1520 static struct timer_list topology_timer =
1521 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1522
1523 static void reset_topology_timer(void)
1524 {
1525 topology_timer.data = 0;
1526 topology_timer.expires = jiffies + 60 * HZ;
1527 mod_timer(&topology_timer, topology_timer.expires);
1528 }
1529
1530 #ifdef CONFIG_SMP
1531
1532 static void stage_topology_update(int core_id)
1533 {
1534 cpumask_or(&cpu_associativity_changes_mask,
1535 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1536 reset_topology_timer();
1537 }
1538
1539 static int dt_update_callback(struct notifier_block *nb,
1540 unsigned long action, void *data)
1541 {
1542 struct of_prop_reconfig *update;
1543 int rc = NOTIFY_DONE;
1544
1545 switch (action) {
1546 case OF_RECONFIG_UPDATE_PROPERTY:
1547 update = (struct of_prop_reconfig *)data;
1548 if (!of_prop_cmp(update->dn->type, "cpu") &&
1549 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1550 u32 core_id;
1551 of_property_read_u32(update->dn, "reg", &core_id);
1552 stage_topology_update(core_id);
1553 rc = NOTIFY_OK;
1554 }
1555 break;
1556 }
1557
1558 return rc;
1559 }
1560
1561 static struct notifier_block dt_update_nb = {
1562 .notifier_call = dt_update_callback,
1563 };
1564
1565 #endif
1566
1567 /*
1568 * Start polling for associativity changes.
1569 */
1570 int start_topology_update(void)
1571 {
1572 int rc = 0;
1573
1574 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1575 if (!prrn_enabled) {
1576 prrn_enabled = 1;
1577 vphn_enabled = 0;
1578 #ifdef CONFIG_SMP
1579 rc = of_reconfig_notifier_register(&dt_update_nb);
1580 #endif
1581 }
1582 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1583 get_lppaca()->shared_proc) {
1584 if (!vphn_enabled) {
1585 prrn_enabled = 0;
1586 vphn_enabled = 1;
1587 setup_cpu_associativity_change_counters();
1588 init_timer_deferrable(&topology_timer);
1589 reset_topology_timer();
1590 }
1591 }
1592
1593 return rc;
1594 }
1595
1596 /*
1597 * Disable polling for VPHN associativity changes.
1598 */
1599 int stop_topology_update(void)
1600 {
1601 int rc = 0;
1602
1603 if (prrn_enabled) {
1604 prrn_enabled = 0;
1605 #ifdef CONFIG_SMP
1606 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1607 #endif
1608 } else if (vphn_enabled) {
1609 vphn_enabled = 0;
1610 rc = del_timer_sync(&topology_timer);
1611 }
1612
1613 return rc;
1614 }
1615
1616 int prrn_is_enabled(void)
1617 {
1618 return prrn_enabled;
1619 }
1620
1621 static int topology_read(struct seq_file *file, void *v)
1622 {
1623 if (vphn_enabled || prrn_enabled)
1624 seq_puts(file, "on\n");
1625 else
1626 seq_puts(file, "off\n");
1627
1628 return 0;
1629 }
1630
1631 static int topology_open(struct inode *inode, struct file *file)
1632 {
1633 return single_open(file, topology_read, NULL);
1634 }
1635
1636 static ssize_t topology_write(struct file *file, const char __user *buf,
1637 size_t count, loff_t *off)
1638 {
1639 char kbuf[4]; /* "on" or "off" plus null. */
1640 int read_len;
1641
1642 read_len = count < 3 ? count : 3;
1643 if (copy_from_user(kbuf, buf, read_len))
1644 return -EINVAL;
1645
1646 kbuf[read_len] = '\0';
1647
1648 if (!strncmp(kbuf, "on", 2))
1649 start_topology_update();
1650 else if (!strncmp(kbuf, "off", 3))
1651 stop_topology_update();
1652 else
1653 return -EINVAL;
1654
1655 return count;
1656 }
1657
1658 static const struct file_operations topology_ops = {
1659 .read = seq_read,
1660 .write = topology_write,
1661 .open = topology_open,
1662 .release = single_release
1663 };
1664
1665 static int topology_update_init(void)
1666 {
1667 start_topology_update();
1668 proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
1669
1670 return 0;
1671 }
1672 device_initcall(topology_update_init);
1673 #endif /* CONFIG_PPC_SPLPAR */