ARM: 7456/1: ptrace: provide separate functions for tracing syscall {entry,exit}
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / mm / numa.c
1 /*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <asm/sparsemem.h>
26 #include <asm/prom.h>
27 #include <asm/smp.h>
28 #include <asm/firmware.h>
29 #include <asm/paca.h>
30 #include <asm/hvcall.h>
31 #include <asm/setup.h>
32
33 static int numa_enabled = 1;
34
35 static char *cmdline __initdata;
36
37 static int numa_debug;
38 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
39
40 int numa_cpu_lookup_table[NR_CPUS];
41 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
42 struct pglist_data *node_data[MAX_NUMNODES];
43
44 EXPORT_SYMBOL(numa_cpu_lookup_table);
45 EXPORT_SYMBOL(node_to_cpumask_map);
46 EXPORT_SYMBOL(node_data);
47
48 static int min_common_depth;
49 static int n_mem_addr_cells, n_mem_size_cells;
50 static int form1_affinity;
51
52 #define MAX_DISTANCE_REF_POINTS 4
53 static int distance_ref_points_depth;
54 static const unsigned int *distance_ref_points;
55 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
56
57 /*
58 * Allocate node_to_cpumask_map based on number of available nodes
59 * Requires node_possible_map to be valid.
60 *
61 * Note: cpumask_of_node() is not valid until after this is done.
62 */
63 static void __init setup_node_to_cpumask_map(void)
64 {
65 unsigned int node, num = 0;
66
67 /* setup nr_node_ids if not done yet */
68 if (nr_node_ids == MAX_NUMNODES) {
69 for_each_node_mask(node, node_possible_map)
70 num = node;
71 nr_node_ids = num + 1;
72 }
73
74 /* allocate the map */
75 for (node = 0; node < nr_node_ids; node++)
76 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
77
78 /* cpumask_of_node() will now work */
79 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
80 }
81
82 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
83 unsigned int *nid)
84 {
85 unsigned long long mem;
86 char *p = cmdline;
87 static unsigned int fake_nid;
88 static unsigned long long curr_boundary;
89
90 /*
91 * Modify node id, iff we started creating NUMA nodes
92 * We want to continue from where we left of the last time
93 */
94 if (fake_nid)
95 *nid = fake_nid;
96 /*
97 * In case there are no more arguments to parse, the
98 * node_id should be the same as the last fake node id
99 * (we've handled this above).
100 */
101 if (!p)
102 return 0;
103
104 mem = memparse(p, &p);
105 if (!mem)
106 return 0;
107
108 if (mem < curr_boundary)
109 return 0;
110
111 curr_boundary = mem;
112
113 if ((end_pfn << PAGE_SHIFT) > mem) {
114 /*
115 * Skip commas and spaces
116 */
117 while (*p == ',' || *p == ' ' || *p == '\t')
118 p++;
119
120 cmdline = p;
121 fake_nid++;
122 *nid = fake_nid;
123 dbg("created new fake_node with id %d\n", fake_nid);
124 return 1;
125 }
126 return 0;
127 }
128
129 /*
130 * get_node_active_region - Return active region containing pfn
131 * Active range returned is empty if none found.
132 * @pfn: The page to return the region for
133 * @node_ar: Returned set to the active region containing @pfn
134 */
135 static void __init get_node_active_region(unsigned long pfn,
136 struct node_active_region *node_ar)
137 {
138 unsigned long start_pfn, end_pfn;
139 int i, nid;
140
141 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
142 if (pfn >= start_pfn && pfn < end_pfn) {
143 node_ar->nid = nid;
144 node_ar->start_pfn = start_pfn;
145 node_ar->end_pfn = end_pfn;
146 break;
147 }
148 }
149 }
150
151 static void map_cpu_to_node(int cpu, int node)
152 {
153 numa_cpu_lookup_table[cpu] = node;
154
155 dbg("adding cpu %d to node %d\n", cpu, node);
156
157 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
158 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
159 }
160
161 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
162 static void unmap_cpu_from_node(unsigned long cpu)
163 {
164 int node = numa_cpu_lookup_table[cpu];
165
166 dbg("removing cpu %lu from node %d\n", cpu, node);
167
168 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
169 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
170 } else {
171 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
172 cpu, node);
173 }
174 }
175 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
176
177 /* must hold reference to node during call */
178 static const int *of_get_associativity(struct device_node *dev)
179 {
180 return of_get_property(dev, "ibm,associativity", NULL);
181 }
182
183 /*
184 * Returns the property linux,drconf-usable-memory if
185 * it exists (the property exists only in kexec/kdump kernels,
186 * added by kexec-tools)
187 */
188 static const u32 *of_get_usable_memory(struct device_node *memory)
189 {
190 const u32 *prop;
191 u32 len;
192 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
193 if (!prop || len < sizeof(unsigned int))
194 return 0;
195 return prop;
196 }
197
198 int __node_distance(int a, int b)
199 {
200 int i;
201 int distance = LOCAL_DISTANCE;
202
203 if (!form1_affinity)
204 return distance;
205
206 for (i = 0; i < distance_ref_points_depth; i++) {
207 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
208 break;
209
210 /* Double the distance for each NUMA level */
211 distance *= 2;
212 }
213
214 return distance;
215 }
216
217 static void initialize_distance_lookup_table(int nid,
218 const unsigned int *associativity)
219 {
220 int i;
221
222 if (!form1_affinity)
223 return;
224
225 for (i = 0; i < distance_ref_points_depth; i++) {
226 distance_lookup_table[nid][i] =
227 associativity[distance_ref_points[i]];
228 }
229 }
230
231 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
232 * info is found.
233 */
234 static int associativity_to_nid(const unsigned int *associativity)
235 {
236 int nid = -1;
237
238 if (min_common_depth == -1)
239 goto out;
240
241 if (associativity[0] >= min_common_depth)
242 nid = associativity[min_common_depth];
243
244 /* POWER4 LPAR uses 0xffff as invalid node */
245 if (nid == 0xffff || nid >= MAX_NUMNODES)
246 nid = -1;
247
248 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
249 initialize_distance_lookup_table(nid, associativity);
250
251 out:
252 return nid;
253 }
254
255 /* Returns the nid associated with the given device tree node,
256 * or -1 if not found.
257 */
258 static int of_node_to_nid_single(struct device_node *device)
259 {
260 int nid = -1;
261 const unsigned int *tmp;
262
263 tmp = of_get_associativity(device);
264 if (tmp)
265 nid = associativity_to_nid(tmp);
266 return nid;
267 }
268
269 /* Walk the device tree upwards, looking for an associativity id */
270 int of_node_to_nid(struct device_node *device)
271 {
272 struct device_node *tmp;
273 int nid = -1;
274
275 of_node_get(device);
276 while (device) {
277 nid = of_node_to_nid_single(device);
278 if (nid != -1)
279 break;
280
281 tmp = device;
282 device = of_get_parent(tmp);
283 of_node_put(tmp);
284 }
285 of_node_put(device);
286
287 return nid;
288 }
289 EXPORT_SYMBOL_GPL(of_node_to_nid);
290
291 static int __init find_min_common_depth(void)
292 {
293 int depth;
294 struct device_node *chosen;
295 struct device_node *root;
296 const char *vec5;
297
298 if (firmware_has_feature(FW_FEATURE_OPAL))
299 root = of_find_node_by_path("/ibm,opal");
300 else
301 root = of_find_node_by_path("/rtas");
302 if (!root)
303 root = of_find_node_by_path("/");
304
305 /*
306 * This property is a set of 32-bit integers, each representing
307 * an index into the ibm,associativity nodes.
308 *
309 * With form 0 affinity the first integer is for an SMP configuration
310 * (should be all 0's) and the second is for a normal NUMA
311 * configuration. We have only one level of NUMA.
312 *
313 * With form 1 affinity the first integer is the most significant
314 * NUMA boundary and the following are progressively less significant
315 * boundaries. There can be more than one level of NUMA.
316 */
317 distance_ref_points = of_get_property(root,
318 "ibm,associativity-reference-points",
319 &distance_ref_points_depth);
320
321 if (!distance_ref_points) {
322 dbg("NUMA: ibm,associativity-reference-points not found.\n");
323 goto err;
324 }
325
326 distance_ref_points_depth /= sizeof(int);
327
328 #define VEC5_AFFINITY_BYTE 5
329 #define VEC5_AFFINITY 0x80
330
331 if (firmware_has_feature(FW_FEATURE_OPAL))
332 form1_affinity = 1;
333 else {
334 chosen = of_find_node_by_path("/chosen");
335 if (chosen) {
336 vec5 = of_get_property(chosen,
337 "ibm,architecture-vec-5", NULL);
338 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
339 VEC5_AFFINITY)) {
340 dbg("Using form 1 affinity\n");
341 form1_affinity = 1;
342 }
343 }
344 }
345
346 if (form1_affinity) {
347 depth = distance_ref_points[0];
348 } else {
349 if (distance_ref_points_depth < 2) {
350 printk(KERN_WARNING "NUMA: "
351 "short ibm,associativity-reference-points\n");
352 goto err;
353 }
354
355 depth = distance_ref_points[1];
356 }
357
358 /*
359 * Warn and cap if the hardware supports more than
360 * MAX_DISTANCE_REF_POINTS domains.
361 */
362 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
363 printk(KERN_WARNING "NUMA: distance array capped at "
364 "%d entries\n", MAX_DISTANCE_REF_POINTS);
365 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
366 }
367
368 of_node_put(root);
369 return depth;
370
371 err:
372 of_node_put(root);
373 return -1;
374 }
375
376 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
377 {
378 struct device_node *memory = NULL;
379
380 memory = of_find_node_by_type(memory, "memory");
381 if (!memory)
382 panic("numa.c: No memory nodes found!");
383
384 *n_addr_cells = of_n_addr_cells(memory);
385 *n_size_cells = of_n_size_cells(memory);
386 of_node_put(memory);
387 }
388
389 static unsigned long read_n_cells(int n, const unsigned int **buf)
390 {
391 unsigned long result = 0;
392
393 while (n--) {
394 result = (result << 32) | **buf;
395 (*buf)++;
396 }
397 return result;
398 }
399
400 struct of_drconf_cell {
401 u64 base_addr;
402 u32 drc_index;
403 u32 reserved;
404 u32 aa_index;
405 u32 flags;
406 };
407
408 #define DRCONF_MEM_ASSIGNED 0x00000008
409 #define DRCONF_MEM_AI_INVALID 0x00000040
410 #define DRCONF_MEM_RESERVED 0x00000080
411
412 /*
413 * Read the next memblock list entry from the ibm,dynamic-memory property
414 * and return the information in the provided of_drconf_cell structure.
415 */
416 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
417 {
418 const u32 *cp;
419
420 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
421
422 cp = *cellp;
423 drmem->drc_index = cp[0];
424 drmem->reserved = cp[1];
425 drmem->aa_index = cp[2];
426 drmem->flags = cp[3];
427
428 *cellp = cp + 4;
429 }
430
431 /*
432 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
433 *
434 * The layout of the ibm,dynamic-memory property is a number N of memblock
435 * list entries followed by N memblock list entries. Each memblock list entry
436 * contains information as laid out in the of_drconf_cell struct above.
437 */
438 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
439 {
440 const u32 *prop;
441 u32 len, entries;
442
443 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
444 if (!prop || len < sizeof(unsigned int))
445 return 0;
446
447 entries = *prop++;
448
449 /* Now that we know the number of entries, revalidate the size
450 * of the property read in to ensure we have everything
451 */
452 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
453 return 0;
454
455 *dm = prop;
456 return entries;
457 }
458
459 /*
460 * Retrieve and validate the ibm,lmb-size property for drconf memory
461 * from the device tree.
462 */
463 static u64 of_get_lmb_size(struct device_node *memory)
464 {
465 const u32 *prop;
466 u32 len;
467
468 prop = of_get_property(memory, "ibm,lmb-size", &len);
469 if (!prop || len < sizeof(unsigned int))
470 return 0;
471
472 return read_n_cells(n_mem_size_cells, &prop);
473 }
474
475 struct assoc_arrays {
476 u32 n_arrays;
477 u32 array_sz;
478 const u32 *arrays;
479 };
480
481 /*
482 * Retrieve and validate the list of associativity arrays for drconf
483 * memory from the ibm,associativity-lookup-arrays property of the
484 * device tree..
485 *
486 * The layout of the ibm,associativity-lookup-arrays property is a number N
487 * indicating the number of associativity arrays, followed by a number M
488 * indicating the size of each associativity array, followed by a list
489 * of N associativity arrays.
490 */
491 static int of_get_assoc_arrays(struct device_node *memory,
492 struct assoc_arrays *aa)
493 {
494 const u32 *prop;
495 u32 len;
496
497 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
498 if (!prop || len < 2 * sizeof(unsigned int))
499 return -1;
500
501 aa->n_arrays = *prop++;
502 aa->array_sz = *prop++;
503
504 /* Now that we know the number of arrays and size of each array,
505 * revalidate the size of the property read in.
506 */
507 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
508 return -1;
509
510 aa->arrays = prop;
511 return 0;
512 }
513
514 /*
515 * This is like of_node_to_nid_single() for memory represented in the
516 * ibm,dynamic-reconfiguration-memory node.
517 */
518 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
519 struct assoc_arrays *aa)
520 {
521 int default_nid = 0;
522 int nid = default_nid;
523 int index;
524
525 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
526 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
527 drmem->aa_index < aa->n_arrays) {
528 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
529 nid = aa->arrays[index];
530
531 if (nid == 0xffff || nid >= MAX_NUMNODES)
532 nid = default_nid;
533 }
534
535 return nid;
536 }
537
538 /*
539 * Figure out to which domain a cpu belongs and stick it there.
540 * Return the id of the domain used.
541 */
542 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
543 {
544 int nid = 0;
545 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
546
547 if (!cpu) {
548 WARN_ON(1);
549 goto out;
550 }
551
552 nid = of_node_to_nid_single(cpu);
553
554 if (nid < 0 || !node_online(nid))
555 nid = first_online_node;
556 out:
557 map_cpu_to_node(lcpu, nid);
558
559 of_node_put(cpu);
560
561 return nid;
562 }
563
564 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
565 unsigned long action,
566 void *hcpu)
567 {
568 unsigned long lcpu = (unsigned long)hcpu;
569 int ret = NOTIFY_DONE;
570
571 switch (action) {
572 case CPU_UP_PREPARE:
573 case CPU_UP_PREPARE_FROZEN:
574 numa_setup_cpu(lcpu);
575 ret = NOTIFY_OK;
576 break;
577 #ifdef CONFIG_HOTPLUG_CPU
578 case CPU_DEAD:
579 case CPU_DEAD_FROZEN:
580 case CPU_UP_CANCELED:
581 case CPU_UP_CANCELED_FROZEN:
582 unmap_cpu_from_node(lcpu);
583 break;
584 ret = NOTIFY_OK;
585 #endif
586 }
587 return ret;
588 }
589
590 /*
591 * Check and possibly modify a memory region to enforce the memory limit.
592 *
593 * Returns the size the region should have to enforce the memory limit.
594 * This will either be the original value of size, a truncated value,
595 * or zero. If the returned value of size is 0 the region should be
596 * discarded as it lies wholly above the memory limit.
597 */
598 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
599 unsigned long size)
600 {
601 /*
602 * We use memblock_end_of_DRAM() in here instead of memory_limit because
603 * we've already adjusted it for the limit and it takes care of
604 * having memory holes below the limit. Also, in the case of
605 * iommu_is_off, memory_limit is not set but is implicitly enforced.
606 */
607
608 if (start + size <= memblock_end_of_DRAM())
609 return size;
610
611 if (start >= memblock_end_of_DRAM())
612 return 0;
613
614 return memblock_end_of_DRAM() - start;
615 }
616
617 /*
618 * Reads the counter for a given entry in
619 * linux,drconf-usable-memory property
620 */
621 static inline int __init read_usm_ranges(const u32 **usm)
622 {
623 /*
624 * For each lmb in ibm,dynamic-memory a corresponding
625 * entry in linux,drconf-usable-memory property contains
626 * a counter followed by that many (base, size) duple.
627 * read the counter from linux,drconf-usable-memory
628 */
629 return read_n_cells(n_mem_size_cells, usm);
630 }
631
632 /*
633 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
634 * node. This assumes n_mem_{addr,size}_cells have been set.
635 */
636 static void __init parse_drconf_memory(struct device_node *memory)
637 {
638 const u32 *uninitialized_var(dm), *usm;
639 unsigned int n, rc, ranges, is_kexec_kdump = 0;
640 unsigned long lmb_size, base, size, sz;
641 int nid;
642 struct assoc_arrays aa;
643
644 n = of_get_drconf_memory(memory, &dm);
645 if (!n)
646 return;
647
648 lmb_size = of_get_lmb_size(memory);
649 if (!lmb_size)
650 return;
651
652 rc = of_get_assoc_arrays(memory, &aa);
653 if (rc)
654 return;
655
656 /* check if this is a kexec/kdump kernel */
657 usm = of_get_usable_memory(memory);
658 if (usm != NULL)
659 is_kexec_kdump = 1;
660
661 for (; n != 0; --n) {
662 struct of_drconf_cell drmem;
663
664 read_drconf_cell(&drmem, &dm);
665
666 /* skip this block if the reserved bit is set in flags (0x80)
667 or if the block is not assigned to this partition (0x8) */
668 if ((drmem.flags & DRCONF_MEM_RESERVED)
669 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
670 continue;
671
672 base = drmem.base_addr;
673 size = lmb_size;
674 ranges = 1;
675
676 if (is_kexec_kdump) {
677 ranges = read_usm_ranges(&usm);
678 if (!ranges) /* there are no (base, size) duple */
679 continue;
680 }
681 do {
682 if (is_kexec_kdump) {
683 base = read_n_cells(n_mem_addr_cells, &usm);
684 size = read_n_cells(n_mem_size_cells, &usm);
685 }
686 nid = of_drconf_to_nid_single(&drmem, &aa);
687 fake_numa_create_new_node(
688 ((base + size) >> PAGE_SHIFT),
689 &nid);
690 node_set_online(nid);
691 sz = numa_enforce_memory_limit(base, size);
692 if (sz)
693 memblock_set_node(base, sz, nid);
694 } while (--ranges);
695 }
696 }
697
698 static int __init parse_numa_properties(void)
699 {
700 struct device_node *memory;
701 int default_nid = 0;
702 unsigned long i;
703
704 if (numa_enabled == 0) {
705 printk(KERN_WARNING "NUMA disabled by user\n");
706 return -1;
707 }
708
709 min_common_depth = find_min_common_depth();
710
711 if (min_common_depth < 0)
712 return min_common_depth;
713
714 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
715
716 /*
717 * Even though we connect cpus to numa domains later in SMP
718 * init, we need to know the node ids now. This is because
719 * each node to be onlined must have NODE_DATA etc backing it.
720 */
721 for_each_present_cpu(i) {
722 struct device_node *cpu;
723 int nid;
724
725 cpu = of_get_cpu_node(i, NULL);
726 BUG_ON(!cpu);
727 nid = of_node_to_nid_single(cpu);
728 of_node_put(cpu);
729
730 /*
731 * Don't fall back to default_nid yet -- we will plug
732 * cpus into nodes once the memory scan has discovered
733 * the topology.
734 */
735 if (nid < 0)
736 continue;
737 node_set_online(nid);
738 }
739
740 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
741
742 for_each_node_by_type(memory, "memory") {
743 unsigned long start;
744 unsigned long size;
745 int nid;
746 int ranges;
747 const unsigned int *memcell_buf;
748 unsigned int len;
749
750 memcell_buf = of_get_property(memory,
751 "linux,usable-memory", &len);
752 if (!memcell_buf || len <= 0)
753 memcell_buf = of_get_property(memory, "reg", &len);
754 if (!memcell_buf || len <= 0)
755 continue;
756
757 /* ranges in cell */
758 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
759 new_range:
760 /* these are order-sensitive, and modify the buffer pointer */
761 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
762 size = read_n_cells(n_mem_size_cells, &memcell_buf);
763
764 /*
765 * Assumption: either all memory nodes or none will
766 * have associativity properties. If none, then
767 * everything goes to default_nid.
768 */
769 nid = of_node_to_nid_single(memory);
770 if (nid < 0)
771 nid = default_nid;
772
773 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
774 node_set_online(nid);
775
776 if (!(size = numa_enforce_memory_limit(start, size))) {
777 if (--ranges)
778 goto new_range;
779 else
780 continue;
781 }
782
783 memblock_set_node(start, size, nid);
784
785 if (--ranges)
786 goto new_range;
787 }
788
789 /*
790 * Now do the same thing for each MEMBLOCK listed in the
791 * ibm,dynamic-memory property in the
792 * ibm,dynamic-reconfiguration-memory node.
793 */
794 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
795 if (memory)
796 parse_drconf_memory(memory);
797
798 return 0;
799 }
800
801 static void __init setup_nonnuma(void)
802 {
803 unsigned long top_of_ram = memblock_end_of_DRAM();
804 unsigned long total_ram = memblock_phys_mem_size();
805 unsigned long start_pfn, end_pfn;
806 unsigned int nid = 0;
807 struct memblock_region *reg;
808
809 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
810 top_of_ram, total_ram);
811 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
812 (top_of_ram - total_ram) >> 20);
813
814 for_each_memblock(memory, reg) {
815 start_pfn = memblock_region_memory_base_pfn(reg);
816 end_pfn = memblock_region_memory_end_pfn(reg);
817
818 fake_numa_create_new_node(end_pfn, &nid);
819 memblock_set_node(PFN_PHYS(start_pfn),
820 PFN_PHYS(end_pfn - start_pfn), nid);
821 node_set_online(nid);
822 }
823 }
824
825 void __init dump_numa_cpu_topology(void)
826 {
827 unsigned int node;
828 unsigned int cpu, count;
829
830 if (min_common_depth == -1 || !numa_enabled)
831 return;
832
833 for_each_online_node(node) {
834 printk(KERN_DEBUG "Node %d CPUs:", node);
835
836 count = 0;
837 /*
838 * If we used a CPU iterator here we would miss printing
839 * the holes in the cpumap.
840 */
841 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
842 if (cpumask_test_cpu(cpu,
843 node_to_cpumask_map[node])) {
844 if (count == 0)
845 printk(" %u", cpu);
846 ++count;
847 } else {
848 if (count > 1)
849 printk("-%u", cpu - 1);
850 count = 0;
851 }
852 }
853
854 if (count > 1)
855 printk("-%u", nr_cpu_ids - 1);
856 printk("\n");
857 }
858 }
859
860 static void __init dump_numa_memory_topology(void)
861 {
862 unsigned int node;
863 unsigned int count;
864
865 if (min_common_depth == -1 || !numa_enabled)
866 return;
867
868 for_each_online_node(node) {
869 unsigned long i;
870
871 printk(KERN_DEBUG "Node %d Memory:", node);
872
873 count = 0;
874
875 for (i = 0; i < memblock_end_of_DRAM();
876 i += (1 << SECTION_SIZE_BITS)) {
877 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
878 if (count == 0)
879 printk(" 0x%lx", i);
880 ++count;
881 } else {
882 if (count > 0)
883 printk("-0x%lx", i);
884 count = 0;
885 }
886 }
887
888 if (count > 0)
889 printk("-0x%lx", i);
890 printk("\n");
891 }
892 }
893
894 /*
895 * Allocate some memory, satisfying the memblock or bootmem allocator where
896 * required. nid is the preferred node and end is the physical address of
897 * the highest address in the node.
898 *
899 * Returns the virtual address of the memory.
900 */
901 static void __init *careful_zallocation(int nid, unsigned long size,
902 unsigned long align,
903 unsigned long end_pfn)
904 {
905 void *ret;
906 int new_nid;
907 unsigned long ret_paddr;
908
909 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
910
911 /* retry over all memory */
912 if (!ret_paddr)
913 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
914
915 if (!ret_paddr)
916 panic("numa.c: cannot allocate %lu bytes for node %d",
917 size, nid);
918
919 ret = __va(ret_paddr);
920
921 /*
922 * We initialize the nodes in numeric order: 0, 1, 2...
923 * and hand over control from the MEMBLOCK allocator to the
924 * bootmem allocator. If this function is called for
925 * node 5, then we know that all nodes <5 are using the
926 * bootmem allocator instead of the MEMBLOCK allocator.
927 *
928 * So, check the nid from which this allocation came
929 * and double check to see if we need to use bootmem
930 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
931 * since it would be useless.
932 */
933 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
934 if (new_nid < nid) {
935 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
936 size, align, 0);
937
938 dbg("alloc_bootmem %p %lx\n", ret, size);
939 }
940
941 memset(ret, 0, size);
942 return ret;
943 }
944
945 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
946 .notifier_call = cpu_numa_callback,
947 .priority = 1 /* Must run before sched domains notifier. */
948 };
949
950 static void __init mark_reserved_regions_for_nid(int nid)
951 {
952 struct pglist_data *node = NODE_DATA(nid);
953 struct memblock_region *reg;
954
955 for_each_memblock(reserved, reg) {
956 unsigned long physbase = reg->base;
957 unsigned long size = reg->size;
958 unsigned long start_pfn = physbase >> PAGE_SHIFT;
959 unsigned long end_pfn = PFN_UP(physbase + size);
960 struct node_active_region node_ar;
961 unsigned long node_end_pfn = node->node_start_pfn +
962 node->node_spanned_pages;
963
964 /*
965 * Check to make sure that this memblock.reserved area is
966 * within the bounds of the node that we care about.
967 * Checking the nid of the start and end points is not
968 * sufficient because the reserved area could span the
969 * entire node.
970 */
971 if (end_pfn <= node->node_start_pfn ||
972 start_pfn >= node_end_pfn)
973 continue;
974
975 get_node_active_region(start_pfn, &node_ar);
976 while (start_pfn < end_pfn &&
977 node_ar.start_pfn < node_ar.end_pfn) {
978 unsigned long reserve_size = size;
979 /*
980 * if reserved region extends past active region
981 * then trim size to active region
982 */
983 if (end_pfn > node_ar.end_pfn)
984 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
985 - physbase;
986 /*
987 * Only worry about *this* node, others may not
988 * yet have valid NODE_DATA().
989 */
990 if (node_ar.nid == nid) {
991 dbg("reserve_bootmem %lx %lx nid=%d\n",
992 physbase, reserve_size, node_ar.nid);
993 reserve_bootmem_node(NODE_DATA(node_ar.nid),
994 physbase, reserve_size,
995 BOOTMEM_DEFAULT);
996 }
997 /*
998 * if reserved region is contained in the active region
999 * then done.
1000 */
1001 if (end_pfn <= node_ar.end_pfn)
1002 break;
1003
1004 /*
1005 * reserved region extends past the active region
1006 * get next active region that contains this
1007 * reserved region
1008 */
1009 start_pfn = node_ar.end_pfn;
1010 physbase = start_pfn << PAGE_SHIFT;
1011 size = size - reserve_size;
1012 get_node_active_region(start_pfn, &node_ar);
1013 }
1014 }
1015 }
1016
1017
1018 void __init do_init_bootmem(void)
1019 {
1020 int nid;
1021
1022 min_low_pfn = 0;
1023 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1024 max_pfn = max_low_pfn;
1025
1026 if (parse_numa_properties())
1027 setup_nonnuma();
1028 else
1029 dump_numa_memory_topology();
1030
1031 for_each_online_node(nid) {
1032 unsigned long start_pfn, end_pfn;
1033 void *bootmem_vaddr;
1034 unsigned long bootmap_pages;
1035
1036 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1037
1038 /*
1039 * Allocate the node structure node local if possible
1040 *
1041 * Be careful moving this around, as it relies on all
1042 * previous nodes' bootmem to be initialized and have
1043 * all reserved areas marked.
1044 */
1045 NODE_DATA(nid) = careful_zallocation(nid,
1046 sizeof(struct pglist_data),
1047 SMP_CACHE_BYTES, end_pfn);
1048
1049 dbg("node %d\n", nid);
1050 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1051
1052 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1053 NODE_DATA(nid)->node_start_pfn = start_pfn;
1054 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1055
1056 if (NODE_DATA(nid)->node_spanned_pages == 0)
1057 continue;
1058
1059 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1060 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1061
1062 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1063 bootmem_vaddr = careful_zallocation(nid,
1064 bootmap_pages << PAGE_SHIFT,
1065 PAGE_SIZE, end_pfn);
1066
1067 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1068
1069 init_bootmem_node(NODE_DATA(nid),
1070 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1071 start_pfn, end_pfn);
1072
1073 free_bootmem_with_active_regions(nid, end_pfn);
1074 /*
1075 * Be very careful about moving this around. Future
1076 * calls to careful_zallocation() depend on this getting
1077 * done correctly.
1078 */
1079 mark_reserved_regions_for_nid(nid);
1080 sparse_memory_present_with_active_regions(nid);
1081 }
1082
1083 init_bootmem_done = 1;
1084
1085 /*
1086 * Now bootmem is initialised we can create the node to cpumask
1087 * lookup tables and setup the cpu callback to populate them.
1088 */
1089 setup_node_to_cpumask_map();
1090
1091 register_cpu_notifier(&ppc64_numa_nb);
1092 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1093 (void *)(unsigned long)boot_cpuid);
1094 }
1095
1096 void __init paging_init(void)
1097 {
1098 unsigned long max_zone_pfns[MAX_NR_ZONES];
1099 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1100 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1101 free_area_init_nodes(max_zone_pfns);
1102 }
1103
1104 static int __init early_numa(char *p)
1105 {
1106 if (!p)
1107 return 0;
1108
1109 if (strstr(p, "off"))
1110 numa_enabled = 0;
1111
1112 if (strstr(p, "debug"))
1113 numa_debug = 1;
1114
1115 p = strstr(p, "fake=");
1116 if (p)
1117 cmdline = p + strlen("fake=");
1118
1119 return 0;
1120 }
1121 early_param("numa", early_numa);
1122
1123 #ifdef CONFIG_MEMORY_HOTPLUG
1124 /*
1125 * Find the node associated with a hot added memory section for
1126 * memory represented in the device tree by the property
1127 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1128 */
1129 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1130 unsigned long scn_addr)
1131 {
1132 const u32 *dm;
1133 unsigned int drconf_cell_cnt, rc;
1134 unsigned long lmb_size;
1135 struct assoc_arrays aa;
1136 int nid = -1;
1137
1138 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1139 if (!drconf_cell_cnt)
1140 return -1;
1141
1142 lmb_size = of_get_lmb_size(memory);
1143 if (!lmb_size)
1144 return -1;
1145
1146 rc = of_get_assoc_arrays(memory, &aa);
1147 if (rc)
1148 return -1;
1149
1150 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1151 struct of_drconf_cell drmem;
1152
1153 read_drconf_cell(&drmem, &dm);
1154
1155 /* skip this block if it is reserved or not assigned to
1156 * this partition */
1157 if ((drmem.flags & DRCONF_MEM_RESERVED)
1158 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1159 continue;
1160
1161 if ((scn_addr < drmem.base_addr)
1162 || (scn_addr >= (drmem.base_addr + lmb_size)))
1163 continue;
1164
1165 nid = of_drconf_to_nid_single(&drmem, &aa);
1166 break;
1167 }
1168
1169 return nid;
1170 }
1171
1172 /*
1173 * Find the node associated with a hot added memory section for memory
1174 * represented in the device tree as a node (i.e. memory@XXXX) for
1175 * each memblock.
1176 */
1177 int hot_add_node_scn_to_nid(unsigned long scn_addr)
1178 {
1179 struct device_node *memory;
1180 int nid = -1;
1181
1182 for_each_node_by_type(memory, "memory") {
1183 unsigned long start, size;
1184 int ranges;
1185 const unsigned int *memcell_buf;
1186 unsigned int len;
1187
1188 memcell_buf = of_get_property(memory, "reg", &len);
1189 if (!memcell_buf || len <= 0)
1190 continue;
1191
1192 /* ranges in cell */
1193 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1194
1195 while (ranges--) {
1196 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1197 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1198
1199 if ((scn_addr < start) || (scn_addr >= (start + size)))
1200 continue;
1201
1202 nid = of_node_to_nid_single(memory);
1203 break;
1204 }
1205
1206 if (nid >= 0)
1207 break;
1208 }
1209
1210 of_node_put(memory);
1211
1212 return nid;
1213 }
1214
1215 /*
1216 * Find the node associated with a hot added memory section. Section
1217 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1218 * sections are fully contained within a single MEMBLOCK.
1219 */
1220 int hot_add_scn_to_nid(unsigned long scn_addr)
1221 {
1222 struct device_node *memory = NULL;
1223 int nid, found = 0;
1224
1225 if (!numa_enabled || (min_common_depth < 0))
1226 return first_online_node;
1227
1228 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1229 if (memory) {
1230 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1231 of_node_put(memory);
1232 } else {
1233 nid = hot_add_node_scn_to_nid(scn_addr);
1234 }
1235
1236 if (nid < 0 || !node_online(nid))
1237 nid = first_online_node;
1238
1239 if (NODE_DATA(nid)->node_spanned_pages)
1240 return nid;
1241
1242 for_each_online_node(nid) {
1243 if (NODE_DATA(nid)->node_spanned_pages) {
1244 found = 1;
1245 break;
1246 }
1247 }
1248
1249 BUG_ON(!found);
1250 return nid;
1251 }
1252
1253 static u64 hot_add_drconf_memory_max(void)
1254 {
1255 struct device_node *memory = NULL;
1256 unsigned int drconf_cell_cnt = 0;
1257 u64 lmb_size = 0;
1258 const u32 *dm = 0;
1259
1260 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1261 if (memory) {
1262 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1263 lmb_size = of_get_lmb_size(memory);
1264 of_node_put(memory);
1265 }
1266 return lmb_size * drconf_cell_cnt;
1267 }
1268
1269 /*
1270 * memory_hotplug_max - return max address of memory that may be added
1271 *
1272 * This is currently only used on systems that support drconfig memory
1273 * hotplug.
1274 */
1275 u64 memory_hotplug_max(void)
1276 {
1277 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1278 }
1279 #endif /* CONFIG_MEMORY_HOTPLUG */
1280
1281 /* Virtual Processor Home Node (VPHN) support */
1282 #ifdef CONFIG_PPC_SPLPAR
1283 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1284 static cpumask_t cpu_associativity_changes_mask;
1285 static int vphn_enabled;
1286 static void set_topology_timer(void);
1287
1288 /*
1289 * Store the current values of the associativity change counters in the
1290 * hypervisor.
1291 */
1292 static void setup_cpu_associativity_change_counters(void)
1293 {
1294 int cpu;
1295
1296 /* The VPHN feature supports a maximum of 8 reference points */
1297 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1298
1299 for_each_possible_cpu(cpu) {
1300 int i;
1301 u8 *counts = vphn_cpu_change_counts[cpu];
1302 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1303
1304 for (i = 0; i < distance_ref_points_depth; i++)
1305 counts[i] = hypervisor_counts[i];
1306 }
1307 }
1308
1309 /*
1310 * The hypervisor maintains a set of 8 associativity change counters in
1311 * the VPA of each cpu that correspond to the associativity levels in the
1312 * ibm,associativity-reference-points property. When an associativity
1313 * level changes, the corresponding counter is incremented.
1314 *
1315 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1316 * node associativity levels have changed.
1317 *
1318 * Returns the number of cpus with unhandled associativity changes.
1319 */
1320 static int update_cpu_associativity_changes_mask(void)
1321 {
1322 int cpu, nr_cpus = 0;
1323 cpumask_t *changes = &cpu_associativity_changes_mask;
1324
1325 cpumask_clear(changes);
1326
1327 for_each_possible_cpu(cpu) {
1328 int i, changed = 0;
1329 u8 *counts = vphn_cpu_change_counts[cpu];
1330 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1331
1332 for (i = 0; i < distance_ref_points_depth; i++) {
1333 if (hypervisor_counts[i] != counts[i]) {
1334 counts[i] = hypervisor_counts[i];
1335 changed = 1;
1336 }
1337 }
1338 if (changed) {
1339 cpumask_set_cpu(cpu, changes);
1340 nr_cpus++;
1341 }
1342 }
1343
1344 return nr_cpus;
1345 }
1346
1347 /*
1348 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1349 * the complete property we have to add the length in the first cell.
1350 */
1351 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1352
1353 /*
1354 * Convert the associativity domain numbers returned from the hypervisor
1355 * to the sequence they would appear in the ibm,associativity property.
1356 */
1357 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1358 {
1359 int i, nr_assoc_doms = 0;
1360 const u16 *field = (const u16*) packed;
1361
1362 #define VPHN_FIELD_UNUSED (0xffff)
1363 #define VPHN_FIELD_MSB (0x8000)
1364 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1365
1366 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1367 if (*field == VPHN_FIELD_UNUSED) {
1368 /* All significant fields processed, and remaining
1369 * fields contain the reserved value of all 1's.
1370 * Just store them.
1371 */
1372 unpacked[i] = *((u32*)field);
1373 field += 2;
1374 } else if (*field & VPHN_FIELD_MSB) {
1375 /* Data is in the lower 15 bits of this field */
1376 unpacked[i] = *field & VPHN_FIELD_MASK;
1377 field++;
1378 nr_assoc_doms++;
1379 } else {
1380 /* Data is in the lower 15 bits of this field
1381 * concatenated with the next 16 bit field
1382 */
1383 unpacked[i] = *((u32*)field);
1384 field += 2;
1385 nr_assoc_doms++;
1386 }
1387 }
1388
1389 /* The first cell contains the length of the property */
1390 unpacked[0] = nr_assoc_doms;
1391
1392 return nr_assoc_doms;
1393 }
1394
1395 /*
1396 * Retrieve the new associativity information for a virtual processor's
1397 * home node.
1398 */
1399 static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1400 {
1401 long rc;
1402 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1403 u64 flags = 1;
1404 int hwcpu = get_hard_smp_processor_id(cpu);
1405
1406 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1407 vphn_unpack_associativity(retbuf, associativity);
1408
1409 return rc;
1410 }
1411
1412 static long vphn_get_associativity(unsigned long cpu,
1413 unsigned int *associativity)
1414 {
1415 long rc;
1416
1417 rc = hcall_vphn(cpu, associativity);
1418
1419 switch (rc) {
1420 case H_FUNCTION:
1421 printk(KERN_INFO
1422 "VPHN is not supported. Disabling polling...\n");
1423 stop_topology_update();
1424 break;
1425 case H_HARDWARE:
1426 printk(KERN_ERR
1427 "hcall_vphn() experienced a hardware fault "
1428 "preventing VPHN. Disabling polling...\n");
1429 stop_topology_update();
1430 }
1431
1432 return rc;
1433 }
1434
1435 /*
1436 * Update the node maps and sysfs entries for each cpu whose home node
1437 * has changed.
1438 */
1439 int arch_update_cpu_topology(void)
1440 {
1441 int cpu, nid, old_nid;
1442 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1443 struct device *dev;
1444
1445 for_each_cpu(cpu,&cpu_associativity_changes_mask) {
1446 vphn_get_associativity(cpu, associativity);
1447 nid = associativity_to_nid(associativity);
1448
1449 if (nid < 0 || !node_online(nid))
1450 nid = first_online_node;
1451
1452 old_nid = numa_cpu_lookup_table[cpu];
1453
1454 /* Disable hotplug while we update the cpu
1455 * masks and sysfs.
1456 */
1457 get_online_cpus();
1458 unregister_cpu_under_node(cpu, old_nid);
1459 unmap_cpu_from_node(cpu);
1460 map_cpu_to_node(cpu, nid);
1461 register_cpu_under_node(cpu, nid);
1462 put_online_cpus();
1463
1464 dev = get_cpu_device(cpu);
1465 if (dev)
1466 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1467 }
1468
1469 return 1;
1470 }
1471
1472 static void topology_work_fn(struct work_struct *work)
1473 {
1474 rebuild_sched_domains();
1475 }
1476 static DECLARE_WORK(topology_work, topology_work_fn);
1477
1478 void topology_schedule_update(void)
1479 {
1480 schedule_work(&topology_work);
1481 }
1482
1483 static void topology_timer_fn(unsigned long ignored)
1484 {
1485 if (!vphn_enabled)
1486 return;
1487 if (update_cpu_associativity_changes_mask() > 0)
1488 topology_schedule_update();
1489 set_topology_timer();
1490 }
1491 static struct timer_list topology_timer =
1492 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1493
1494 static void set_topology_timer(void)
1495 {
1496 topology_timer.data = 0;
1497 topology_timer.expires = jiffies + 60 * HZ;
1498 add_timer(&topology_timer);
1499 }
1500
1501 /*
1502 * Start polling for VPHN associativity changes.
1503 */
1504 int start_topology_update(void)
1505 {
1506 int rc = 0;
1507
1508 /* Disabled until races with load balancing are fixed */
1509 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1510 get_lppaca()->shared_proc) {
1511 vphn_enabled = 1;
1512 setup_cpu_associativity_change_counters();
1513 init_timer_deferrable(&topology_timer);
1514 set_topology_timer();
1515 rc = 1;
1516 }
1517
1518 return rc;
1519 }
1520 __initcall(start_topology_update);
1521
1522 /*
1523 * Disable polling for VPHN associativity changes.
1524 */
1525 int stop_topology_update(void)
1526 {
1527 vphn_enabled = 0;
1528 return del_timer_sync(&topology_timer);
1529 }
1530 #endif /* CONFIG_PPC_SPLPAR */