Fix oops in acer_wmi driver (acer_wmi_init)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / mm / init.c
CommitLineData
b00dc837 1/*
1da177e4
LT
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
c4bce90e 8#include <linux/module.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/slab.h>
17#include <linux/initrd.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
c9cf5528 20#include <linux/poison.h>
1da177e4
LT
21#include <linux/fs.h>
22#include <linux/seq_file.h>
05e14cb3 23#include <linux/kprobes.h>
1ac4f5eb 24#include <linux/cache.h>
13edad7a 25#include <linux/sort.h>
5cbc3073 26#include <linux/percpu.h>
3b2a7e23 27#include <linux/lmb.h>
919ee677 28#include <linux/mmzone.h>
1da177e4
LT
29
30#include <asm/head.h>
31#include <asm/system.h>
32#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
517af332 46#include <asm/tsb.h>
481295f9 47#include <asm/hypervisor.h>
372b07bb 48#include <asm/prom.h>
22d6a1cb 49#include <asm/sstate.h>
5cbc3073 50#include <asm/mdesc.h>
3d5ae6b6 51#include <asm/cpudata.h>
4f70f7a9 52#include <asm/irq.h>
1da177e4 53
9cc3a1ac
DM
54#define MAX_PHYS_ADDRESS (1UL << 42UL)
55#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
56#define KPTE_BITMAP_BYTES \
57 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
58
59unsigned long kern_linear_pte_xor[2] __read_mostly;
60
61/* A bitmap, one bit for every 256MB of physical memory. If the bit
62 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
63 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
64 */
65unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
66
d1acb421 67#ifndef CONFIG_DEBUG_PAGEALLOC
2d9e2763
DM
68/* A special kernel TSB for 4MB and 256MB linear mappings.
69 * Space is allocated for this right after the trap table
70 * in arch/sparc64/kernel/head.S
71 */
72extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
d1acb421 73#endif
d7744a09 74
13edad7a
DM
75#define MAX_BANKS 32
76
77static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
13edad7a 78static int pavail_ents __initdata;
13edad7a
DM
79
80static int cmp_p64(const void *a, const void *b)
81{
82 const struct linux_prom64_registers *x = a, *y = b;
83
84 if (x->phys_addr > y->phys_addr)
85 return 1;
86 if (x->phys_addr < y->phys_addr)
87 return -1;
88 return 0;
89}
90
91static void __init read_obp_memory(const char *property,
92 struct linux_prom64_registers *regs,
93 int *num_ents)
94{
95 int node = prom_finddevice("/memory");
96 int prop_size = prom_getproplen(node, property);
97 int ents, ret, i;
98
99 ents = prop_size / sizeof(struct linux_prom64_registers);
100 if (ents > MAX_BANKS) {
101 prom_printf("The machine has more %s property entries than "
102 "this kernel can support (%d).\n",
103 property, MAX_BANKS);
104 prom_halt();
105 }
106
107 ret = prom_getproperty(node, property, (char *) regs, prop_size);
108 if (ret == -1) {
109 prom_printf("Couldn't get %s property from /memory.\n");
110 prom_halt();
111 }
112
13edad7a
DM
113 /* Sanitize what we got from the firmware, by page aligning
114 * everything.
115 */
116 for (i = 0; i < ents; i++) {
117 unsigned long base, size;
118
119 base = regs[i].phys_addr;
120 size = regs[i].reg_size;
10147570 121
13edad7a
DM
122 size &= PAGE_MASK;
123 if (base & ~PAGE_MASK) {
124 unsigned long new_base = PAGE_ALIGN(base);
125
126 size -= new_base - base;
127 if ((long) size < 0L)
128 size = 0UL;
129 base = new_base;
130 }
0015d3d6
DM
131 if (size == 0UL) {
132 /* If it is empty, simply get rid of it.
133 * This simplifies the logic of the other
134 * functions that process these arrays.
135 */
136 memmove(&regs[i], &regs[i + 1],
137 (ents - i - 1) * sizeof(regs[0]));
486ad10a 138 i--;
0015d3d6
DM
139 ents--;
140 continue;
486ad10a 141 }
0015d3d6
DM
142 regs[i].phys_addr = base;
143 regs[i].reg_size = size;
486ad10a
DM
144 }
145
146 *num_ents = ents;
147
c9c10830 148 sort(regs, ents, sizeof(struct linux_prom64_registers),
13edad7a
DM
149 cmp_p64, NULL);
150}
1da177e4 151
2bdb3cb2 152unsigned long *sparc64_valid_addr_bitmap __read_mostly;
1da177e4 153
d1112018 154/* Kernel physical address base and size in bytes. */
1ac4f5eb
DM
155unsigned long kern_base __read_mostly;
156unsigned long kern_size __read_mostly;
1da177e4 157
1da177e4
LT
158/* Initial ramdisk setup */
159extern unsigned long sparc_ramdisk_image64;
160extern unsigned int sparc_ramdisk_image;
161extern unsigned int sparc_ramdisk_size;
162
1ac4f5eb 163struct page *mem_map_zero __read_mostly;
35802c0b 164EXPORT_SYMBOL(mem_map_zero);
1da177e4 165
0835ae0f
DM
166unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
167
168unsigned long sparc64_kern_pri_context __read_mostly;
169unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
170unsigned long sparc64_kern_sec_context __read_mostly;
171
64658743 172int num_kernel_image_mappings;
1da177e4 173
1da177e4
LT
174#ifdef CONFIG_DEBUG_DCFLUSH
175atomic_t dcpage_flushes = ATOMIC_INIT(0);
176#ifdef CONFIG_SMP
177atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
178#endif
179#endif
180
7a591cfe 181inline void flush_dcache_page_impl(struct page *page)
1da177e4 182{
7a591cfe 183 BUG_ON(tlb_type == hypervisor);
1da177e4
LT
184#ifdef CONFIG_DEBUG_DCFLUSH
185 atomic_inc(&dcpage_flushes);
186#endif
187
188#ifdef DCACHE_ALIASING_POSSIBLE
189 __flush_dcache_page(page_address(page),
190 ((tlb_type == spitfire) &&
191 page_mapping(page) != NULL));
192#else
193 if (page_mapping(page) != NULL &&
194 tlb_type == spitfire)
195 __flush_icache_page(__pa(page_address(page)));
196#endif
197}
198
199#define PG_dcache_dirty PG_arch_1
22adb358
DM
200#define PG_dcache_cpu_shift 32UL
201#define PG_dcache_cpu_mask \
202 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
1da177e4
LT
203
204#define dcache_dirty_cpu(page) \
48b0e548 205 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
1da177e4 206
d979f179 207static inline void set_dcache_dirty(struct page *page, int this_cpu)
1da177e4
LT
208{
209 unsigned long mask = this_cpu;
48b0e548
DM
210 unsigned long non_cpu_bits;
211
212 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
213 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
214
1da177e4
LT
215 __asm__ __volatile__("1:\n\t"
216 "ldx [%2], %%g7\n\t"
217 "and %%g7, %1, %%g1\n\t"
218 "or %%g1, %0, %%g1\n\t"
219 "casx [%2], %%g7, %%g1\n\t"
220 "cmp %%g7, %%g1\n\t"
b445e26c 221 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 222 "bne,pn %%xcc, 1b\n\t"
b445e26c 223 " nop"
1da177e4
LT
224 : /* no outputs */
225 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
226 : "g1", "g7");
227}
228
d979f179 229static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
1da177e4
LT
230{
231 unsigned long mask = (1UL << PG_dcache_dirty);
232
233 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
234 "1:\n\t"
235 "ldx [%2], %%g7\n\t"
48b0e548 236 "srlx %%g7, %4, %%g1\n\t"
1da177e4
LT
237 "and %%g1, %3, %%g1\n\t"
238 "cmp %%g1, %0\n\t"
239 "bne,pn %%icc, 2f\n\t"
240 " andn %%g7, %1, %%g1\n\t"
241 "casx [%2], %%g7, %%g1\n\t"
242 "cmp %%g7, %%g1\n\t"
b445e26c 243 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 244 "bne,pn %%xcc, 1b\n\t"
b445e26c 245 " nop\n"
1da177e4
LT
246 "2:"
247 : /* no outputs */
248 : "r" (cpu), "r" (mask), "r" (&page->flags),
48b0e548
DM
249 "i" (PG_dcache_cpu_mask),
250 "i" (PG_dcache_cpu_shift)
1da177e4
LT
251 : "g1", "g7");
252}
253
517af332
DM
254static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
255{
256 unsigned long tsb_addr = (unsigned long) ent;
257
3b3ab2eb 258 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
259 tsb_addr = __pa(tsb_addr);
260
261 __tsb_insert(tsb_addr, tag, pte);
262}
263
c4bce90e
DM
264unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
265unsigned long _PAGE_SZBITS __read_mostly;
266
1da177e4
LT
267void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
268{
bd40791e 269 struct mm_struct *mm;
74ae9987 270 struct tsb *tsb;
7a1ac526 271 unsigned long tag, flags;
dcc1e8dd 272 unsigned long tsb_index, tsb_hash_shift;
7a591cfe
DM
273
274 if (tlb_type != hypervisor) {
275 unsigned long pfn = pte_pfn(pte);
276 unsigned long pg_flags;
277 struct page *page;
278
279 if (pfn_valid(pfn) &&
280 (page = pfn_to_page(pfn), page_mapping(page)) &&
281 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
282 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
283 PG_dcache_cpu_mask);
284 int this_cpu = get_cpu();
285
286 /* This is just to optimize away some function calls
287 * in the SMP case.
288 */
289 if (cpu == this_cpu)
290 flush_dcache_page_impl(page);
291 else
292 smp_flush_dcache_page_impl(page, cpu);
293
294 clear_dcache_dirty_cpu(page, cpu);
295
296 put_cpu();
297 }
1da177e4 298 }
bd40791e
DM
299
300 mm = vma->vm_mm;
7a1ac526 301
dcc1e8dd
DM
302 tsb_index = MM_TSB_BASE;
303 tsb_hash_shift = PAGE_SHIFT;
304
7a1ac526
DM
305 spin_lock_irqsave(&mm->context.lock, flags);
306
dcc1e8dd
DM
307#ifdef CONFIG_HUGETLB_PAGE
308 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
309 if ((tlb_type == hypervisor &&
310 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
311 (tlb_type != hypervisor &&
312 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
313 tsb_index = MM_TSB_HUGE;
314 tsb_hash_shift = HPAGE_SHIFT;
315 }
316 }
317#endif
318
319 tsb = mm->context.tsb_block[tsb_index].tsb;
320 tsb += ((address >> tsb_hash_shift) &
321 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
74ae9987
DM
322 tag = (address >> 22UL);
323 tsb_insert(tsb, tag, pte_val(pte));
7a1ac526
DM
324
325 spin_unlock_irqrestore(&mm->context.lock, flags);
1da177e4
LT
326}
327
328void flush_dcache_page(struct page *page)
329{
a9546f59
DM
330 struct address_space *mapping;
331 int this_cpu;
1da177e4 332
7a591cfe
DM
333 if (tlb_type == hypervisor)
334 return;
335
a9546f59
DM
336 /* Do not bother with the expensive D-cache flush if it
337 * is merely the zero page. The 'bigcore' testcase in GDB
338 * causes this case to run millions of times.
339 */
340 if (page == ZERO_PAGE(0))
341 return;
342
343 this_cpu = get_cpu();
344
345 mapping = page_mapping(page);
1da177e4 346 if (mapping && !mapping_mapped(mapping)) {
a9546f59 347 int dirty = test_bit(PG_dcache_dirty, &page->flags);
1da177e4 348 if (dirty) {
a9546f59
DM
349 int dirty_cpu = dcache_dirty_cpu(page);
350
1da177e4
LT
351 if (dirty_cpu == this_cpu)
352 goto out;
353 smp_flush_dcache_page_impl(page, dirty_cpu);
354 }
355 set_dcache_dirty(page, this_cpu);
356 } else {
357 /* We could delay the flush for the !page_mapping
358 * case too. But that case is for exec env/arg
359 * pages and those are %99 certainly going to get
360 * faulted into the tlb (and thus flushed) anyways.
361 */
362 flush_dcache_page_impl(page);
363 }
364
365out:
366 put_cpu();
367}
368
05e14cb3 369void __kprobes flush_icache_range(unsigned long start, unsigned long end)
1da177e4 370{
a43fe0e7 371 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
1da177e4
LT
372 if (tlb_type == spitfire) {
373 unsigned long kaddr;
374
a94aa253
DM
375 /* This code only runs on Spitfire cpus so this is
376 * why we can assume _PAGE_PADDR_4U.
377 */
378 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
379 unsigned long paddr, mask = _PAGE_PADDR_4U;
380
381 if (kaddr >= PAGE_OFFSET)
382 paddr = kaddr & mask;
383 else {
384 pgd_t *pgdp = pgd_offset_k(kaddr);
385 pud_t *pudp = pud_offset(pgdp, kaddr);
386 pmd_t *pmdp = pmd_offset(pudp, kaddr);
387 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
388
389 paddr = pte_val(*ptep) & mask;
390 }
391 __flush_icache_page(paddr);
392 }
1da177e4
LT
393 }
394}
395
1da177e4
LT
396void mmu_info(struct seq_file *m)
397{
398 if (tlb_type == cheetah)
399 seq_printf(m, "MMU Type\t: Cheetah\n");
400 else if (tlb_type == cheetah_plus)
401 seq_printf(m, "MMU Type\t: Cheetah+\n");
402 else if (tlb_type == spitfire)
403 seq_printf(m, "MMU Type\t: Spitfire\n");
a43fe0e7
DM
404 else if (tlb_type == hypervisor)
405 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
1da177e4
LT
406 else
407 seq_printf(m, "MMU Type\t: ???\n");
408
409#ifdef CONFIG_DEBUG_DCFLUSH
410 seq_printf(m, "DCPageFlushes\t: %d\n",
411 atomic_read(&dcpage_flushes));
412#ifdef CONFIG_SMP
413 seq_printf(m, "DCPageFlushesXC\t: %d\n",
414 atomic_read(&dcpage_flushes_xcall));
415#endif /* CONFIG_SMP */
416#endif /* CONFIG_DEBUG_DCFLUSH */
417}
418
a94aa253
DM
419struct linux_prom_translation {
420 unsigned long virt;
421 unsigned long size;
422 unsigned long data;
423};
424
425/* Exported for kernel TLB miss handling in ktlb.S */
426struct linux_prom_translation prom_trans[512] __read_mostly;
427unsigned int prom_trans_ents __read_mostly;
428
1da177e4
LT
429/* Exported for SMP bootup purposes. */
430unsigned long kern_locked_tte_data;
431
c9c10830
DM
432/* The obp translations are saved based on 8k pagesize, since obp can
433 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
74bf4312 434 * HI_OBP_ADDRESS range are handled in ktlb.S.
c9c10830 435 */
5085b4a5
DM
436static inline int in_obp_range(unsigned long vaddr)
437{
438 return (vaddr >= LOW_OBP_ADDRESS &&
439 vaddr < HI_OBP_ADDRESS);
440}
441
c9c10830 442static int cmp_ptrans(const void *a, const void *b)
405599bd 443{
c9c10830 444 const struct linux_prom_translation *x = a, *y = b;
405599bd 445
c9c10830
DM
446 if (x->virt > y->virt)
447 return 1;
448 if (x->virt < y->virt)
449 return -1;
450 return 0;
405599bd
DM
451}
452
c9c10830 453/* Read OBP translations property into 'prom_trans[]'. */
9ad98c5b 454static void __init read_obp_translations(void)
405599bd 455{
c9c10830 456 int n, node, ents, first, last, i;
1da177e4
LT
457
458 node = prom_finddevice("/virtual-memory");
459 n = prom_getproplen(node, "translations");
405599bd 460 if (unlikely(n == 0 || n == -1)) {
b206fc4c 461 prom_printf("prom_mappings: Couldn't get size.\n");
1da177e4
LT
462 prom_halt();
463 }
405599bd
DM
464 if (unlikely(n > sizeof(prom_trans))) {
465 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
1da177e4
LT
466 prom_halt();
467 }
405599bd 468
b206fc4c 469 if ((n = prom_getproperty(node, "translations",
405599bd
DM
470 (char *)&prom_trans[0],
471 sizeof(prom_trans))) == -1) {
b206fc4c 472 prom_printf("prom_mappings: Couldn't get property.\n");
1da177e4
LT
473 prom_halt();
474 }
9ad98c5b 475
b206fc4c 476 n = n / sizeof(struct linux_prom_translation);
9ad98c5b 477
c9c10830
DM
478 ents = n;
479
480 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
481 cmp_ptrans, NULL);
482
483 /* Now kick out all the non-OBP entries. */
484 for (i = 0; i < ents; i++) {
485 if (in_obp_range(prom_trans[i].virt))
486 break;
487 }
488 first = i;
489 for (; i < ents; i++) {
490 if (!in_obp_range(prom_trans[i].virt))
491 break;
492 }
493 last = i;
494
495 for (i = 0; i < (last - first); i++) {
496 struct linux_prom_translation *src = &prom_trans[i + first];
497 struct linux_prom_translation *dest = &prom_trans[i];
498
499 *dest = *src;
500 }
501 for (; i < ents; i++) {
502 struct linux_prom_translation *dest = &prom_trans[i];
503 dest->virt = dest->size = dest->data = 0x0UL;
504 }
505
506 prom_trans_ents = last - first;
507
508 if (tlb_type == spitfire) {
509 /* Clear diag TTE bits. */
510 for (i = 0; i < prom_trans_ents; i++)
511 prom_trans[i].data &= ~0x0003fe0000000000UL;
512 }
405599bd 513}
1da177e4 514
d82ace7d
DM
515static void __init hypervisor_tlb_lock(unsigned long vaddr,
516 unsigned long pte,
517 unsigned long mmu)
518{
7db35f31
DM
519 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
520
521 if (ret != 0) {
12e126ad 522 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
7db35f31 523 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
12e126ad
DM
524 prom_halt();
525 }
d82ace7d
DM
526}
527
c4bce90e
DM
528static unsigned long kern_large_tte(unsigned long paddr);
529
898cf0ec 530static void __init remap_kernel(void)
405599bd
DM
531{
532 unsigned long phys_page, tte_vaddr, tte_data;
64658743 533 int i, tlb_ent = sparc64_highest_locked_tlbent();
405599bd 534
1da177e4 535 tte_vaddr = (unsigned long) KERNBASE;
bff06d55 536 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
c4bce90e 537 tte_data = kern_large_tte(phys_page);
1da177e4
LT
538
539 kern_locked_tte_data = tte_data;
540
d82ace7d
DM
541 /* Now lock us into the TLBs via Hypervisor or OBP. */
542 if (tlb_type == hypervisor) {
64658743 543 for (i = 0; i < num_kernel_image_mappings; i++) {
d82ace7d
DM
544 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
545 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
64658743
DM
546 tte_vaddr += 0x400000;
547 tte_data += 0x400000;
d82ace7d
DM
548 }
549 } else {
64658743
DM
550 for (i = 0; i < num_kernel_image_mappings; i++) {
551 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
552 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
553 tte_vaddr += 0x400000;
554 tte_data += 0x400000;
d82ace7d 555 }
64658743 556 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
1da177e4 557 }
0835ae0f
DM
558 if (tlb_type == cheetah_plus) {
559 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
560 CTX_CHEETAH_PLUS_NUC);
561 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
562 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
563 }
405599bd 564}
1da177e4 565
405599bd 566
c9c10830 567static void __init inherit_prom_mappings(void)
9ad98c5b 568{
405599bd 569 /* Now fixup OBP's idea about where we really are mapped. */
3c62a2d3 570 printk("Remapping the kernel... ");
405599bd 571 remap_kernel();
3c62a2d3 572 printk("done.\n");
1da177e4
LT
573}
574
1da177e4
LT
575void prom_world(int enter)
576{
1da177e4
LT
577 if (!enter)
578 set_fs((mm_segment_t) { get_thread_current_ds() });
579
3487d1d4 580 __asm__ __volatile__("flushw");
1da177e4
LT
581}
582
1da177e4
LT
583void __flush_dcache_range(unsigned long start, unsigned long end)
584{
585 unsigned long va;
586
587 if (tlb_type == spitfire) {
588 int n = 0;
589
590 for (va = start; va < end; va += 32) {
591 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
592 if (++n >= 512)
593 break;
594 }
a43fe0e7 595 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
596 start = __pa(start);
597 end = __pa(end);
598 for (va = start; va < end; va += 32)
599 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
600 "membar #Sync"
601 : /* no outputs */
602 : "r" (va),
603 "i" (ASI_DCACHE_INVALIDATE));
604 }
605}
1da177e4 606
85f1e1f6
DM
607/* get_new_mmu_context() uses "cache + 1". */
608DEFINE_SPINLOCK(ctx_alloc_lock);
609unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
610#define MAX_CTX_NR (1UL << CTX_NR_BITS)
611#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
612DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
613
1da177e4
LT
614/* Caller does TLB context flushing on local CPU if necessary.
615 * The caller also ensures that CTX_VALID(mm->context) is false.
616 *
617 * We must be careful about boundary cases so that we never
618 * let the user have CTX 0 (nucleus) or we ever use a CTX
619 * version of zero (and thus NO_CONTEXT would not be caught
620 * by version mis-match tests in mmu_context.h).
a0663a79
DM
621 *
622 * Always invoked with interrupts disabled.
1da177e4
LT
623 */
624void get_new_mmu_context(struct mm_struct *mm)
625{
626 unsigned long ctx, new_ctx;
627 unsigned long orig_pgsz_bits;
a77754b4 628 unsigned long flags;
a0663a79 629 int new_version;
1da177e4 630
a77754b4 631 spin_lock_irqsave(&ctx_alloc_lock, flags);
1da177e4
LT
632 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
633 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
634 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
a0663a79 635 new_version = 0;
1da177e4
LT
636 if (new_ctx >= (1 << CTX_NR_BITS)) {
637 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
638 if (new_ctx >= ctx) {
639 int i;
640 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
641 CTX_FIRST_VERSION;
642 if (new_ctx == 1)
643 new_ctx = CTX_FIRST_VERSION;
644
645 /* Don't call memset, for 16 entries that's just
646 * plain silly...
647 */
648 mmu_context_bmap[0] = 3;
649 mmu_context_bmap[1] = 0;
650 mmu_context_bmap[2] = 0;
651 mmu_context_bmap[3] = 0;
652 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
653 mmu_context_bmap[i + 0] = 0;
654 mmu_context_bmap[i + 1] = 0;
655 mmu_context_bmap[i + 2] = 0;
656 mmu_context_bmap[i + 3] = 0;
657 }
a0663a79 658 new_version = 1;
1da177e4
LT
659 goto out;
660 }
661 }
662 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
663 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
664out:
665 tlb_context_cache = new_ctx;
666 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
a77754b4 667 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
a0663a79
DM
668
669 if (unlikely(new_version))
670 smp_new_mmu_context_version();
1da177e4
LT
671}
672
919ee677
DM
673static int numa_enabled = 1;
674static int numa_debug;
675
676static int __init early_numa(char *p)
1da177e4 677{
919ee677
DM
678 if (!p)
679 return 0;
680
681 if (strstr(p, "off"))
682 numa_enabled = 0;
d1112018 683
919ee677
DM
684 if (strstr(p, "debug"))
685 numa_debug = 1;
d1112018 686
919ee677 687 return 0;
d1112018 688}
919ee677
DM
689early_param("numa", early_numa);
690
691#define numadbg(f, a...) \
692do { if (numa_debug) \
693 printk(KERN_INFO f, ## a); \
694} while (0)
d1112018 695
4e82c9a6
DM
696static void __init find_ramdisk(unsigned long phys_base)
697{
698#ifdef CONFIG_BLK_DEV_INITRD
699 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
700 unsigned long ramdisk_image;
701
702 /* Older versions of the bootloader only supported a
703 * 32-bit physical address for the ramdisk image
704 * location, stored at sparc_ramdisk_image. Newer
705 * SILO versions set sparc_ramdisk_image to zero and
706 * provide a full 64-bit physical address at
707 * sparc_ramdisk_image64.
708 */
709 ramdisk_image = sparc_ramdisk_image;
710 if (!ramdisk_image)
711 ramdisk_image = sparc_ramdisk_image64;
712
713 /* Another bootloader quirk. The bootloader normalizes
714 * the physical address to KERNBASE, so we have to
715 * factor that back out and add in the lowest valid
716 * physical page address to get the true physical address.
717 */
718 ramdisk_image -= KERNBASE;
719 ramdisk_image += phys_base;
720
919ee677
DM
721 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
722 ramdisk_image, sparc_ramdisk_size);
723
4e82c9a6
DM
724 initrd_start = ramdisk_image;
725 initrd_end = ramdisk_image + sparc_ramdisk_size;
3b2a7e23 726
7047901e 727 lmb_reserve(initrd_start, sparc_ramdisk_size);
d45100f7
DM
728
729 initrd_start += PAGE_OFFSET;
730 initrd_end += PAGE_OFFSET;
4e82c9a6
DM
731 }
732#endif
733}
734
919ee677
DM
735struct node_mem_mask {
736 unsigned long mask;
737 unsigned long val;
738 unsigned long bootmem_paddr;
739};
740static struct node_mem_mask node_masks[MAX_NUMNODES];
741static int num_node_masks;
742
743int numa_cpu_lookup_table[NR_CPUS];
744cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
745
746#ifdef CONFIG_NEED_MULTIPLE_NODES
919ee677
DM
747
748struct mdesc_mblock {
749 u64 base;
750 u64 size;
751 u64 offset; /* RA-to-PA */
752};
753static struct mdesc_mblock *mblocks;
754static int num_mblocks;
755
756static unsigned long ra_to_pa(unsigned long addr)
757{
758 int i;
759
760 for (i = 0; i < num_mblocks; i++) {
761 struct mdesc_mblock *m = &mblocks[i];
762
763 if (addr >= m->base &&
764 addr < (m->base + m->size)) {
765 addr += m->offset;
766 break;
767 }
768 }
769 return addr;
770}
771
772static int find_node(unsigned long addr)
773{
774 int i;
775
776 addr = ra_to_pa(addr);
777 for (i = 0; i < num_node_masks; i++) {
778 struct node_mem_mask *p = &node_masks[i];
779
780 if ((addr & p->mask) == p->val)
781 return i;
782 }
783 return -1;
784}
785
786static unsigned long nid_range(unsigned long start, unsigned long end,
787 int *nid)
788{
789 *nid = find_node(start);
790 start += PAGE_SIZE;
791 while (start < end) {
792 int n = find_node(start);
793
794 if (n != *nid)
795 break;
796 start += PAGE_SIZE;
797 }
798
c918dcce
DM
799 if (start > end)
800 start = end;
801
919ee677
DM
802 return start;
803}
804#else
805static unsigned long nid_range(unsigned long start, unsigned long end,
806 int *nid)
807{
808 *nid = 0;
809 return end;
810}
811#endif
812
813/* This must be invoked after performing all of the necessary
814 * add_active_range() calls for 'nid'. We need to be able to get
815 * correct data from get_pfn_range_for_nid().
f1cfdb55 816 */
919ee677
DM
817static void __init allocate_node_data(int nid)
818{
819 unsigned long paddr, num_pages, start_pfn, end_pfn;
820 struct pglist_data *p;
821
822#ifdef CONFIG_NEED_MULTIPLE_NODES
823 paddr = lmb_alloc_nid(sizeof(struct pglist_data),
824 SMP_CACHE_BYTES, nid, nid_range);
825 if (!paddr) {
826 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
827 prom_halt();
828 }
829 NODE_DATA(nid) = __va(paddr);
830 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
831
b61bfa3c 832 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
919ee677
DM
833#endif
834
835 p = NODE_DATA(nid);
836
837 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
838 p->node_start_pfn = start_pfn;
839 p->node_spanned_pages = end_pfn - start_pfn;
840
841 if (p->node_spanned_pages) {
842 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
843
844 paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
845 nid_range);
846 if (!paddr) {
847 prom_printf("Cannot allocate bootmap for nid[%d]\n",
848 nid);
849 prom_halt();
850 }
851 node_masks[nid].bootmem_paddr = paddr;
852 }
853}
854
855static void init_node_masks_nonnuma(void)
d1112018 856{
1da177e4
LT
857 int i;
858
919ee677 859 numadbg("Initializing tables for non-numa.\n");
6fc5bae7 860
919ee677
DM
861 node_masks[0].mask = node_masks[0].val = 0;
862 num_node_masks = 1;
d1112018 863
919ee677
DM
864 for (i = 0; i < NR_CPUS; i++)
865 numa_cpu_lookup_table[i] = 0;
1da177e4 866
919ee677
DM
867 numa_cpumask_lookup_table[0] = CPU_MASK_ALL;
868}
869
870#ifdef CONFIG_NEED_MULTIPLE_NODES
871struct pglist_data *node_data[MAX_NUMNODES];
872
873EXPORT_SYMBOL(numa_cpu_lookup_table);
874EXPORT_SYMBOL(numa_cpumask_lookup_table);
875EXPORT_SYMBOL(node_data);
876
877struct mdesc_mlgroup {
878 u64 node;
879 u64 latency;
880 u64 match;
881 u64 mask;
882};
883static struct mdesc_mlgroup *mlgroups;
884static int num_mlgroups;
885
886static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
887 u32 cfg_handle)
888{
889 u64 arc;
890
891 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
892 u64 target = mdesc_arc_target(md, arc);
893 const u64 *val;
894
895 val = mdesc_get_property(md, target,
896 "cfg-handle", NULL);
897 if (val && *val == cfg_handle)
898 return 0;
899 }
900 return -ENODEV;
901}
902
903static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
904 u32 cfg_handle)
905{
906 u64 arc, candidate, best_latency = ~(u64)0;
907
908 candidate = MDESC_NODE_NULL;
909 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
910 u64 target = mdesc_arc_target(md, arc);
911 const char *name = mdesc_node_name(md, target);
912 const u64 *val;
913
914 if (strcmp(name, "pio-latency-group"))
915 continue;
916
917 val = mdesc_get_property(md, target, "latency", NULL);
918 if (!val)
919 continue;
920
921 if (*val < best_latency) {
922 candidate = target;
923 best_latency = *val;
924 }
925 }
926
927 if (candidate == MDESC_NODE_NULL)
928 return -ENODEV;
929
930 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
931}
932
933int of_node_to_nid(struct device_node *dp)
934{
935 const struct linux_prom64_registers *regs;
936 struct mdesc_handle *md;
937 u32 cfg_handle;
938 int count, nid;
939 u64 grp;
940
941 if (!mlgroups)
942 return -1;
943
944 regs = of_get_property(dp, "reg", NULL);
945 if (!regs)
946 return -1;
947
948 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
949
950 md = mdesc_grab();
951
952 count = 0;
953 nid = -1;
954 mdesc_for_each_node_by_name(md, grp, "group") {
955 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
956 nid = count;
957 break;
958 }
959 count++;
960 }
961
962 mdesc_release(md);
963
964 return nid;
965}
966
967static void add_node_ranges(void)
968{
969 int i;
970
971 for (i = 0; i < lmb.memory.cnt; i++) {
972 unsigned long size = lmb_size_bytes(&lmb.memory, i);
973 unsigned long start, end;
974
975 start = lmb.memory.region[i].base;
976 end = start + size;
977 while (start < end) {
978 unsigned long this_end;
979 int nid;
980
981 this_end = nid_range(start, end, &nid);
982
983 numadbg("Adding active range nid[%d] "
984 "start[%lx] end[%lx]\n",
985 nid, start, this_end);
986
987 add_active_range(nid,
988 start >> PAGE_SHIFT,
989 this_end >> PAGE_SHIFT);
990
991 start = this_end;
992 }
993 }
994}
995
996static int __init grab_mlgroups(struct mdesc_handle *md)
997{
998 unsigned long paddr;
999 int count = 0;
1000 u64 node;
1001
1002 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1003 count++;
1004 if (!count)
1005 return -ENOENT;
1006
1007 paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup),
1008 SMP_CACHE_BYTES);
1009 if (!paddr)
1010 return -ENOMEM;
1011
1012 mlgroups = __va(paddr);
1013 num_mlgroups = count;
1014
1015 count = 0;
1016 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1017 struct mdesc_mlgroup *m = &mlgroups[count++];
1018 const u64 *val;
1019
1020 m->node = node;
1021
1022 val = mdesc_get_property(md, node, "latency", NULL);
1023 m->latency = *val;
1024 val = mdesc_get_property(md, node, "address-match", NULL);
1025 m->match = *val;
1026 val = mdesc_get_property(md, node, "address-mask", NULL);
1027 m->mask = *val;
1028
1029 numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
1030 "match[%lx] mask[%lx]\n",
1031 count - 1, m->node, m->latency, m->match, m->mask);
1032 }
1033
1034 return 0;
1035}
1036
1037static int __init grab_mblocks(struct mdesc_handle *md)
1038{
1039 unsigned long paddr;
1040 int count = 0;
1041 u64 node;
1042
1043 mdesc_for_each_node_by_name(md, node, "mblock")
1044 count++;
1045 if (!count)
1046 return -ENOENT;
1047
1048 paddr = lmb_alloc(count * sizeof(struct mdesc_mblock),
1049 SMP_CACHE_BYTES);
1050 if (!paddr)
1051 return -ENOMEM;
1052
1053 mblocks = __va(paddr);
1054 num_mblocks = count;
1055
1056 count = 0;
1057 mdesc_for_each_node_by_name(md, node, "mblock") {
1058 struct mdesc_mblock *m = &mblocks[count++];
1059 const u64 *val;
1060
1061 val = mdesc_get_property(md, node, "base", NULL);
1062 m->base = *val;
1063 val = mdesc_get_property(md, node, "size", NULL);
1064 m->size = *val;
1065 val = mdesc_get_property(md, node,
1066 "address-congruence-offset", NULL);
1067 m->offset = *val;
1068
1069 numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
1070 count - 1, m->base, m->size, m->offset);
1071 }
1072
1073 return 0;
1074}
1075
1076static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1077 u64 grp, cpumask_t *mask)
1078{
1079 u64 arc;
1080
1081 cpus_clear(*mask);
1082
1083 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1084 u64 target = mdesc_arc_target(md, arc);
1085 const char *name = mdesc_node_name(md, target);
1086 const u64 *id;
1087
1088 if (strcmp(name, "cpu"))
1089 continue;
1090 id = mdesc_get_property(md, target, "id", NULL);
1091 if (*id < NR_CPUS)
1092 cpu_set(*id, *mask);
1093 }
1094}
1095
1096static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1097{
1098 int i;
1099
1100 for (i = 0; i < num_mlgroups; i++) {
1101 struct mdesc_mlgroup *m = &mlgroups[i];
1102 if (m->node == node)
1103 return m;
1104 }
1105 return NULL;
1106}
1107
1108static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1109 int index)
1110{
1111 struct mdesc_mlgroup *candidate = NULL;
1112 u64 arc, best_latency = ~(u64)0;
1113 struct node_mem_mask *n;
1114
1115 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1116 u64 target = mdesc_arc_target(md, arc);
1117 struct mdesc_mlgroup *m = find_mlgroup(target);
1118 if (!m)
1119 continue;
1120 if (m->latency < best_latency) {
1121 candidate = m;
1122 best_latency = m->latency;
1123 }
1124 }
1125 if (!candidate)
1126 return -ENOENT;
1127
1128 if (num_node_masks != index) {
1129 printk(KERN_ERR "Inconsistent NUMA state, "
1130 "index[%d] != num_node_masks[%d]\n",
1131 index, num_node_masks);
1132 return -EINVAL;
1133 }
1134
1135 n = &node_masks[num_node_masks++];
1136
1137 n->mask = candidate->mask;
1138 n->val = candidate->match;
1da177e4 1139
919ee677
DM
1140 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
1141 index, n->mask, n->val, candidate->latency);
1da177e4 1142
919ee677
DM
1143 return 0;
1144}
1145
1146static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1147 int index)
1148{
1149 cpumask_t mask;
1150 int cpu;
1151
1152 numa_parse_mdesc_group_cpus(md, grp, &mask);
1153
1154 for_each_cpu_mask(cpu, mask)
1155 numa_cpu_lookup_table[cpu] = index;
1156 numa_cpumask_lookup_table[index] = mask;
1157
1158 if (numa_debug) {
1159 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1160 for_each_cpu_mask(cpu, mask)
1161 printk("%d ", cpu);
1162 printk("]\n");
1163 }
1164
1165 return numa_attach_mlgroup(md, grp, index);
1166}
1167
1168static int __init numa_parse_mdesc(void)
1169{
1170 struct mdesc_handle *md = mdesc_grab();
1171 int i, err, count;
1172 u64 node;
1173
1174 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1175 if (node == MDESC_NODE_NULL) {
1176 mdesc_release(md);
1177 return -ENOENT;
1178 }
1179
1180 err = grab_mblocks(md);
1181 if (err < 0)
1182 goto out;
1183
1184 err = grab_mlgroups(md);
1185 if (err < 0)
1186 goto out;
1187
1188 count = 0;
1189 mdesc_for_each_node_by_name(md, node, "group") {
1190 err = numa_parse_mdesc_group(md, node, count);
1191 if (err < 0)
1192 break;
1193 count++;
1194 }
1195
1196 add_node_ranges();
1197
1198 for (i = 0; i < num_node_masks; i++) {
1199 allocate_node_data(i);
1200 node_set_online(i);
1201 }
1202
1203 err = 0;
1204out:
1205 mdesc_release(md);
1206 return err;
1207}
1208
1209static int __init numa_parse_sun4u(void)
1210{
1211 return -1;
1212}
1213
1214static int __init bootmem_init_numa(void)
1215{
1216 int err = -1;
1217
1218 numadbg("bootmem_init_numa()\n");
1219
1220 if (numa_enabled) {
1221 if (tlb_type == hypervisor)
1222 err = numa_parse_mdesc();
1223 else
1224 err = numa_parse_sun4u();
1225 }
1226 return err;
1227}
1228
1229#else
1da177e4 1230
919ee677
DM
1231static int bootmem_init_numa(void)
1232{
1233 return -1;
1234}
1235
1236#endif
1237
1238static void __init bootmem_init_nonnuma(void)
1239{
1240 unsigned long top_of_ram = lmb_end_of_DRAM();
1241 unsigned long total_ram = lmb_phys_mem_size();
1242 unsigned int i;
1243
1244 numadbg("bootmem_init_nonnuma()\n");
1245
1246 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1247 top_of_ram, total_ram);
1248 printk(KERN_INFO "Memory hole size: %ldMB\n",
1249 (top_of_ram - total_ram) >> 20);
1250
1251 init_node_masks_nonnuma();
1252
1253 for (i = 0; i < lmb.memory.cnt; i++) {
1254 unsigned long size = lmb_size_bytes(&lmb.memory, i);
1255 unsigned long start_pfn, end_pfn;
1256
1257 if (!size)
1258 continue;
1da177e4 1259
9422273b 1260 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
919ee677
DM
1261 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
1262 add_active_range(0, start_pfn, end_pfn);
1263 }
d1112018 1264
919ee677
DM
1265 allocate_node_data(0);
1266
1267 node_set_online(0);
1268}
1269
1270static void __init reserve_range_in_node(int nid, unsigned long start,
1271 unsigned long end)
1272{
1273 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1274 nid, start, end);
1275 while (start < end) {
1276 unsigned long this_end;
1277 int n;
1278
1279 this_end = nid_range(start, end, &n);
1280 if (n == nid) {
1281 numadbg(" MATCH reserving range [%lx:%lx]\n",
1282 start, this_end);
1283 reserve_bootmem_node(NODE_DATA(nid), start,
1284 (this_end - start), BOOTMEM_DEFAULT);
1285 } else
1286 numadbg(" NO MATCH, advancing start to %lx\n",
1287 this_end);
1288
1289 start = this_end;
d1112018 1290 }
919ee677
DM
1291}
1292
1293static void __init trim_reserved_in_node(int nid)
1294{
1295 int i;
1296
1297 numadbg(" trim_reserved_in_node(%d)\n", nid);
1298
1299 for (i = 0; i < lmb.reserved.cnt; i++) {
1300 unsigned long start = lmb.reserved.region[i].base;
1301 unsigned long size = lmb_size_bytes(&lmb.reserved, i);
1302 unsigned long end = start + size;
1303
1304 reserve_range_in_node(nid, start, end);
1305 }
1306}
1307
1308static void __init bootmem_init_one_node(int nid)
1309{
1310 struct pglist_data *p;
1311
1312 numadbg("bootmem_init_one_node(%d)\n", nid);
1313
1314 p = NODE_DATA(nid);
1315
1316 if (p->node_spanned_pages) {
1317 unsigned long paddr = node_masks[nid].bootmem_paddr;
1318 unsigned long end_pfn;
1319
1320 end_pfn = p->node_start_pfn + p->node_spanned_pages;
1321
1322 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1323 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
1324
1325 init_bootmem_node(p, paddr >> PAGE_SHIFT,
1326 p->node_start_pfn, end_pfn);
1327
1328 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1329 nid, end_pfn);
1330 free_bootmem_with_active_regions(nid, end_pfn);
1331
1332 trim_reserved_in_node(nid);
1333
1334 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1335 nid);
1336 sparse_memory_present_with_active_regions(nid);
1337 }
1338}
1339
1340static unsigned long __init bootmem_init(unsigned long phys_base)
1341{
1342 unsigned long end_pfn;
1343 int nid;
1344
1345 end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
1346 max_pfn = max_low_pfn = end_pfn;
1347 min_low_pfn = (phys_base >> PAGE_SHIFT);
1348
1349 if (bootmem_init_numa() < 0)
1350 bootmem_init_nonnuma();
1351
1352 /* XXX cpu notifier XXX */
1353
1354 for_each_online_node(nid)
1355 bootmem_init_one_node(nid);
d1112018
DM
1356
1357 sparse_init();
1358
1da177e4
LT
1359 return end_pfn;
1360}
1361
9cc3a1ac
DM
1362static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1363static int pall_ents __initdata;
1364
56425306 1365#ifdef CONFIG_DEBUG_PAGEALLOC
896aef43
SR
1366static unsigned long __ref kernel_map_range(unsigned long pstart,
1367 unsigned long pend, pgprot_t prot)
56425306
DM
1368{
1369 unsigned long vstart = PAGE_OFFSET + pstart;
1370 unsigned long vend = PAGE_OFFSET + pend;
1371 unsigned long alloc_bytes = 0UL;
1372
1373 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a 1374 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306
DM
1375 vstart, vend);
1376 prom_halt();
1377 }
1378
1379 while (vstart < vend) {
1380 unsigned long this_end, paddr = __pa(vstart);
1381 pgd_t *pgd = pgd_offset_k(vstart);
1382 pud_t *pud;
1383 pmd_t *pmd;
1384 pte_t *pte;
1385
1386 pud = pud_offset(pgd, vstart);
1387 if (pud_none(*pud)) {
1388 pmd_t *new;
1389
1390 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1391 alloc_bytes += PAGE_SIZE;
1392 pud_populate(&init_mm, pud, new);
1393 }
1394
1395 pmd = pmd_offset(pud, vstart);
1396 if (!pmd_present(*pmd)) {
1397 pte_t *new;
1398
1399 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1400 alloc_bytes += PAGE_SIZE;
1401 pmd_populate_kernel(&init_mm, pmd, new);
1402 }
1403
1404 pte = pte_offset_kernel(pmd, vstart);
1405 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1406 if (this_end > vend)
1407 this_end = vend;
1408
1409 while (vstart < this_end) {
1410 pte_val(*pte) = (paddr | pgprot_val(prot));
1411
1412 vstart += PAGE_SIZE;
1413 paddr += PAGE_SIZE;
1414 pte++;
1415 }
1416 }
1417
1418 return alloc_bytes;
1419}
1420
56425306 1421extern unsigned int kvmap_linear_patch[1];
9cc3a1ac
DM
1422#endif /* CONFIG_DEBUG_PAGEALLOC */
1423
1424static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1425{
1426 const unsigned long shift_256MB = 28;
1427 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1428 const unsigned long size_256MB = (1UL << shift_256MB);
1429
1430 while (start < end) {
1431 long remains;
1432
f7c00338
DM
1433 remains = end - start;
1434 if (remains < size_256MB)
1435 break;
1436
9cc3a1ac
DM
1437 if (start & mask_256MB) {
1438 start = (start + size_256MB) & ~mask_256MB;
1439 continue;
1440 }
1441
9cc3a1ac
DM
1442 while (remains >= size_256MB) {
1443 unsigned long index = start >> shift_256MB;
1444
1445 __set_bit(index, kpte_linear_bitmap);
1446
1447 start += size_256MB;
1448 remains -= size_256MB;
1449 }
1450 }
1451}
56425306 1452
8f361453 1453static void __init init_kpte_bitmap(void)
56425306 1454{
9cc3a1ac 1455 unsigned long i;
13edad7a
DM
1456
1457 for (i = 0; i < pall_ents; i++) {
56425306
DM
1458 unsigned long phys_start, phys_end;
1459
13edad7a
DM
1460 phys_start = pall[i].phys_addr;
1461 phys_end = phys_start + pall[i].reg_size;
9cc3a1ac
DM
1462
1463 mark_kpte_bitmap(phys_start, phys_end);
8f361453
DM
1464 }
1465}
9cc3a1ac 1466
8f361453
DM
1467static void __init kernel_physical_mapping_init(void)
1468{
9cc3a1ac 1469#ifdef CONFIG_DEBUG_PAGEALLOC
8f361453
DM
1470 unsigned long i, mem_alloced = 0UL;
1471
1472 for (i = 0; i < pall_ents; i++) {
1473 unsigned long phys_start, phys_end;
1474
1475 phys_start = pall[i].phys_addr;
1476 phys_end = phys_start + pall[i].reg_size;
1477
56425306
DM
1478 mem_alloced += kernel_map_range(phys_start, phys_end,
1479 PAGE_KERNEL);
56425306
DM
1480 }
1481
1482 printk("Allocated %ld bytes for kernel page tables.\n",
1483 mem_alloced);
1484
1485 kvmap_linear_patch[0] = 0x01000000; /* nop */
1486 flushi(&kvmap_linear_patch[0]);
1487
1488 __flush_tlb_all();
9cc3a1ac 1489#endif
56425306
DM
1490}
1491
9cc3a1ac 1492#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1493void kernel_map_pages(struct page *page, int numpages, int enable)
1494{
1495 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1496 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1497
1498 kernel_map_range(phys_start, phys_end,
1499 (enable ? PAGE_KERNEL : __pgprot(0)));
1500
74bf4312
DM
1501 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1502 PAGE_OFFSET + phys_end);
1503
56425306
DM
1504 /* we should perform an IPI and flush all tlbs,
1505 * but that can deadlock->flush only current cpu.
1506 */
1507 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1508 PAGE_OFFSET + phys_end);
1509}
1510#endif
1511
10147570
DM
1512unsigned long __init find_ecache_flush_span(unsigned long size)
1513{
0836a0eb
DM
1514 int i;
1515
13edad7a
DM
1516 for (i = 0; i < pavail_ents; i++) {
1517 if (pavail[i].reg_size >= size)
1518 return pavail[i].phys_addr;
0836a0eb
DM
1519 }
1520
13edad7a 1521 return ~0UL;
0836a0eb
DM
1522}
1523
517af332
DM
1524static void __init tsb_phys_patch(void)
1525{
d257d5da 1526 struct tsb_ldquad_phys_patch_entry *pquad;
517af332
DM
1527 struct tsb_phys_patch_entry *p;
1528
d257d5da
DM
1529 pquad = &__tsb_ldquad_phys_patch;
1530 while (pquad < &__tsb_ldquad_phys_patch_end) {
1531 unsigned long addr = pquad->addr;
1532
1533 if (tlb_type == hypervisor)
1534 *(unsigned int *) addr = pquad->sun4v_insn;
1535 else
1536 *(unsigned int *) addr = pquad->sun4u_insn;
1537 wmb();
1538 __asm__ __volatile__("flush %0"
1539 : /* no outputs */
1540 : "r" (addr));
1541
1542 pquad++;
1543 }
1544
517af332
DM
1545 p = &__tsb_phys_patch;
1546 while (p < &__tsb_phys_patch_end) {
1547 unsigned long addr = p->addr;
1548
1549 *(unsigned int *) addr = p->insn;
1550 wmb();
1551 __asm__ __volatile__("flush %0"
1552 : /* no outputs */
1553 : "r" (addr));
1554
1555 p++;
1556 }
1557}
1558
490384e7 1559/* Don't mark as init, we give this to the Hypervisor. */
d1acb421
DM
1560#ifndef CONFIG_DEBUG_PAGEALLOC
1561#define NUM_KTSB_DESCR 2
1562#else
1563#define NUM_KTSB_DESCR 1
1564#endif
1565static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
490384e7
DM
1566extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1567
1568static void __init sun4v_ktsb_init(void)
1569{
1570 unsigned long ktsb_pa;
1571
d7744a09 1572 /* First KTSB for PAGE_SIZE mappings. */
490384e7
DM
1573 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1574
1575 switch (PAGE_SIZE) {
1576 case 8 * 1024:
1577 default:
1578 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1579 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1580 break;
1581
1582 case 64 * 1024:
1583 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1584 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1585 break;
1586
1587 case 512 * 1024:
1588 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1589 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1590 break;
1591
1592 case 4 * 1024 * 1024:
1593 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1594 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1595 break;
1596 };
1597
3f19a84e 1598 ktsb_descr[0].assoc = 1;
490384e7
DM
1599 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1600 ktsb_descr[0].ctx_idx = 0;
1601 ktsb_descr[0].tsb_base = ktsb_pa;
1602 ktsb_descr[0].resv = 0;
1603
d1acb421 1604#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09
DM
1605 /* Second KTSB for 4MB/256MB mappings. */
1606 ktsb_pa = (kern_base +
1607 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1608
1609 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1610 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1611 HV_PGSZ_MASK_256MB);
1612 ktsb_descr[1].assoc = 1;
1613 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1614 ktsb_descr[1].ctx_idx = 0;
1615 ktsb_descr[1].tsb_base = ktsb_pa;
1616 ktsb_descr[1].resv = 0;
d1acb421 1617#endif
490384e7
DM
1618}
1619
1620void __cpuinit sun4v_ktsb_register(void)
1621{
7db35f31 1622 unsigned long pa, ret;
490384e7
DM
1623
1624 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1625
7db35f31
DM
1626 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1627 if (ret != 0) {
1628 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1629 "errors with %lx\n", pa, ret);
1630 prom_halt();
1631 }
490384e7
DM
1632}
1633
1da177e4
LT
1634/* paging_init() sets up the page tables */
1635
5cbc3073
DM
1636extern void central_probe(void);
1637
1da177e4 1638static unsigned long last_valid_pfn;
56425306 1639pgd_t swapper_pg_dir[2048];
1da177e4 1640
c4bce90e
DM
1641static void sun4u_pgprot_init(void);
1642static void sun4v_pgprot_init(void);
1643
3afc6202 1644/* Dummy function */
1645void __init setup_per_cpu_areas(void)
1646{
1647}
1648
1da177e4
LT
1649void __init paging_init(void)
1650{
919ee677 1651 unsigned long end_pfn, shift, phys_base;
0836a0eb
DM
1652 unsigned long real_end, i;
1653
22adb358
DM
1654 /* These build time checkes make sure that the dcache_dirty_cpu()
1655 * page->flags usage will work.
1656 *
1657 * When a page gets marked as dcache-dirty, we store the
1658 * cpu number starting at bit 32 in the page->flags. Also,
1659 * functions like clear_dcache_dirty_cpu use the cpu mask
1660 * in 13-bit signed-immediate instruction fields.
1661 */
9223b419
CL
1662
1663 /*
1664 * Page flags must not reach into upper 32 bits that are used
1665 * for the cpu number
1666 */
1667 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1668
1669 /*
1670 * The bit fields placed in the high range must not reach below
1671 * the 32 bit boundary. Otherwise we cannot place the cpu field
1672 * at the 32 bit boundary.
1673 */
22adb358 1674 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
9223b419
CL
1675 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1676
22adb358
DM
1677 BUILD_BUG_ON(NR_CPUS > 4096);
1678
481295f9
DM
1679 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1680 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1681
22d6a1cb
DM
1682 sstate_booting();
1683
d7744a09 1684 /* Invalidate both kernel TSBs. */
8b234274 1685 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
d1acb421 1686#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09 1687 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
d1acb421 1688#endif
8b234274 1689
c4bce90e
DM
1690 if (tlb_type == hypervisor)
1691 sun4v_pgprot_init();
1692 else
1693 sun4u_pgprot_init();
1694
d257d5da
DM
1695 if (tlb_type == cheetah_plus ||
1696 tlb_type == hypervisor)
517af332
DM
1697 tsb_phys_patch();
1698
490384e7 1699 if (tlb_type == hypervisor) {
d257d5da 1700 sun4v_patch_tlb_handlers();
490384e7
DM
1701 sun4v_ktsb_init();
1702 }
d257d5da 1703
3b2a7e23
DM
1704 lmb_init();
1705
a94a172d
DM
1706 /* Find available physical memory...
1707 *
1708 * Read it twice in order to work around a bug in openfirmware.
1709 * The call to grab this table itself can cause openfirmware to
1710 * allocate memory, which in turn can take away some space from
1711 * the list of available memory. Reading it twice makes sure
1712 * we really do get the final value.
1713 */
1714 read_obp_translations();
1715 read_obp_memory("reg", &pall[0], &pall_ents);
1716 read_obp_memory("available", &pavail[0], &pavail_ents);
13edad7a 1717 read_obp_memory("available", &pavail[0], &pavail_ents);
0836a0eb
DM
1718
1719 phys_base = 0xffffffffffffffffUL;
3b2a7e23 1720 for (i = 0; i < pavail_ents; i++) {
13edad7a 1721 phys_base = min(phys_base, pavail[i].phys_addr);
3b2a7e23
DM
1722 lmb_add(pavail[i].phys_addr, pavail[i].reg_size);
1723 }
1724
1725 lmb_reserve(kern_base, kern_size);
0836a0eb 1726
4e82c9a6
DM
1727 find_ramdisk(phys_base);
1728
f2b60794 1729 lmb_enforce_memory_limit(cmdline_memory_size);
25b0c659 1730
3b2a7e23
DM
1731 lmb_analyze();
1732 lmb_dump_all();
1733
1da177e4
LT
1734 set_bit(0, mmu_context_bmap);
1735
2bdb3cb2
DM
1736 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1737
1da177e4 1738 real_end = (unsigned long)_end;
64658743
DM
1739 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1740 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1741 num_kernel_image_mappings);
2bdb3cb2
DM
1742
1743 /* Set kernel pgd to upper alias so physical page computations
1da177e4
LT
1744 * work.
1745 */
1746 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1747
56425306 1748 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1da177e4
LT
1749
1750 /* Now can init the kernel/bad page tables. */
1751 pud_set(pud_offset(&swapper_pg_dir[0], 0),
56425306 1752 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1da177e4 1753
c9c10830 1754 inherit_prom_mappings();
5085b4a5 1755
8f361453
DM
1756 init_kpte_bitmap();
1757
a8b900d8
DM
1758 /* Ok, we can use our TLB miss and window trap handlers safely. */
1759 setup_tba();
1da177e4 1760
c9c10830 1761 __flush_tlb_all();
9ad98c5b 1762
490384e7
DM
1763 if (tlb_type == hypervisor)
1764 sun4v_ktsb_register();
1765
b9709456
DM
1766 /* We must setup the per-cpu areas before we pull in the
1767 * PROM and the MDESC. The code there fills in cpu and
1768 * other information into per-cpu data structures.
1769 */
1770 real_setup_per_cpu_areas();
1771
ad072004
DM
1772 prom_build_devicetree();
1773
4a283339
DM
1774 if (tlb_type == hypervisor)
1775 sun4v_mdesc_init();
1776
4f70f7a9
DM
1777 /* Once the OF device tree and MDESC have been setup, we know
1778 * the list of possible cpus. Therefore we can allocate the
1779 * IRQ stacks.
1780 */
1781 for_each_possible_cpu(i) {
1782 /* XXX Use node local allocations... XXX */
1783 softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1784 hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1785 }
1786
2bdb3cb2 1787 /* Setup bootmem... */
919ee677 1788 last_valid_pfn = end_pfn = bootmem_init(phys_base);
d1112018 1789
919ee677 1790#ifndef CONFIG_NEED_MULTIPLE_NODES
17b0e199 1791 max_mapnr = last_valid_pfn;
919ee677 1792#endif
56425306 1793 kernel_physical_mapping_init();
56425306 1794
1da177e4 1795 {
919ee677 1796 unsigned long max_zone_pfns[MAX_NR_ZONES];
1da177e4 1797
919ee677 1798 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1da177e4 1799
919ee677 1800 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1da177e4 1801
919ee677 1802 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
1803 }
1804
3c62a2d3 1805 printk("Booting Linux...\n");
5cbc3073
DM
1806
1807 central_probe();
1808 cpu_probe();
1da177e4
LT
1809}
1810
919ee677
DM
1811int __init page_in_phys_avail(unsigned long paddr)
1812{
1813 int i;
1814
1815 paddr &= PAGE_MASK;
1816
1817 for (i = 0; i < pavail_ents; i++) {
1818 unsigned long start, end;
1819
1820 start = pavail[i].phys_addr;
1821 end = start + pavail[i].reg_size;
1822
1823 if (paddr >= start && paddr < end)
1824 return 1;
1825 }
1826 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1827 return 1;
1828#ifdef CONFIG_BLK_DEV_INITRD
1829 if (paddr >= __pa(initrd_start) &&
1830 paddr < __pa(PAGE_ALIGN(initrd_end)))
1831 return 1;
1832#endif
1833
1834 return 0;
1835}
1836
1837static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1838static int pavail_rescan_ents __initdata;
1839
1840/* Certain OBP calls, such as fetching "available" properties, can
1841 * claim physical memory. So, along with initializing the valid
1842 * address bitmap, what we do here is refetch the physical available
1843 * memory list again, and make sure it provides at least as much
1844 * memory as 'pavail' does.
1845 */
1846static void setup_valid_addr_bitmap_from_pavail(void)
1da177e4 1847{
1da177e4
LT
1848 int i;
1849
13edad7a 1850 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1da177e4 1851
13edad7a 1852 for (i = 0; i < pavail_ents; i++) {
1da177e4
LT
1853 unsigned long old_start, old_end;
1854
13edad7a 1855 old_start = pavail[i].phys_addr;
919ee677 1856 old_end = old_start + pavail[i].reg_size;
1da177e4
LT
1857 while (old_start < old_end) {
1858 int n;
1859
c2a5a46b 1860 for (n = 0; n < pavail_rescan_ents; n++) {
1da177e4
LT
1861 unsigned long new_start, new_end;
1862
13edad7a
DM
1863 new_start = pavail_rescan[n].phys_addr;
1864 new_end = new_start +
1865 pavail_rescan[n].reg_size;
1da177e4
LT
1866
1867 if (new_start <= old_start &&
1868 new_end >= (old_start + PAGE_SIZE)) {
13edad7a
DM
1869 set_bit(old_start >> 22,
1870 sparc64_valid_addr_bitmap);
1da177e4
LT
1871 goto do_next_page;
1872 }
1873 }
919ee677
DM
1874
1875 prom_printf("mem_init: Lost memory in pavail\n");
1876 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1877 pavail[i].phys_addr,
1878 pavail[i].reg_size);
1879 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1880 pavail_rescan[i].phys_addr,
1881 pavail_rescan[i].reg_size);
1882 prom_printf("mem_init: Cannot continue, aborting.\n");
1883 prom_halt();
1da177e4
LT
1884
1885 do_next_page:
1886 old_start += PAGE_SIZE;
1887 }
1888 }
1889}
1890
1891void __init mem_init(void)
1892{
1893 unsigned long codepages, datapages, initpages;
1894 unsigned long addr, last;
1895 int i;
1896
1897 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1898 i += 1;
2bdb3cb2 1899 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1da177e4
LT
1900 if (sparc64_valid_addr_bitmap == NULL) {
1901 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1902 prom_halt();
1903 }
1904 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1905
1906 addr = PAGE_OFFSET + kern_base;
1907 last = PAGE_ALIGN(kern_size) + addr;
1908 while (addr < last) {
1909 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1910 addr += PAGE_SIZE;
1911 }
1912
919ee677 1913 setup_valid_addr_bitmap_from_pavail();
1da177e4 1914
1da177e4
LT
1915 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1916
919ee677
DM
1917#ifdef CONFIG_NEED_MULTIPLE_NODES
1918 for_each_online_node(i) {
1919 if (NODE_DATA(i)->node_spanned_pages != 0) {
1920 totalram_pages +=
1921 free_all_bootmem_node(NODE_DATA(i));
1922 }
1923 }
1924#else
1925 totalram_pages = free_all_bootmem();
1926#endif
1927
f1cfdb55
DM
1928 /* We subtract one to account for the mem_map_zero page
1929 * allocated below.
1930 */
919ee677
DM
1931 totalram_pages -= 1;
1932 num_physpages = totalram_pages;
1da177e4
LT
1933
1934 /*
1935 * Set up the zero page, mark it reserved, so that page count
1936 * is not manipulated when freeing the page from user ptes.
1937 */
1938 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1939 if (mem_map_zero == NULL) {
1940 prom_printf("paging_init: Cannot alloc zero page.\n");
1941 prom_halt();
1942 }
1943 SetPageReserved(mem_map_zero);
1944
1945 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1946 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1947 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1948 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1949 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1950 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1951
96177299 1952 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1da177e4
LT
1953 nr_free_pages() << (PAGE_SHIFT-10),
1954 codepages << (PAGE_SHIFT-10),
1955 datapages << (PAGE_SHIFT-10),
1956 initpages << (PAGE_SHIFT-10),
1957 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1958
1959 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1960 cheetah_ecache_flush_init();
1961}
1962
898cf0ec 1963void free_initmem(void)
1da177e4
LT
1964{
1965 unsigned long addr, initend;
f2b60794
DM
1966 int do_free = 1;
1967
1968 /* If the physical memory maps were trimmed by kernel command
1969 * line options, don't even try freeing this initmem stuff up.
1970 * The kernel image could have been in the trimmed out region
1971 * and if so the freeing below will free invalid page structs.
1972 */
1973 if (cmdline_memory_size)
1974 do_free = 0;
1da177e4
LT
1975
1976 /*
1977 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1978 */
1979 addr = PAGE_ALIGN((unsigned long)(__init_begin));
1980 initend = (unsigned long)(__init_end) & PAGE_MASK;
1981 for (; addr < initend; addr += PAGE_SIZE) {
1982 unsigned long page;
1983 struct page *p;
1984
1985 page = (addr +
1986 ((unsigned long) __va(kern_base)) -
1987 ((unsigned long) KERNBASE));
c9cf5528 1988 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1da177e4 1989
f2b60794
DM
1990 if (do_free) {
1991 p = virt_to_page(page);
1992
1993 ClearPageReserved(p);
1994 init_page_count(p);
1995 __free_page(p);
1996 num_physpages++;
1997 totalram_pages++;
1998 }
1da177e4
LT
1999 }
2000}
2001
2002#ifdef CONFIG_BLK_DEV_INITRD
2003void free_initrd_mem(unsigned long start, unsigned long end)
2004{
2005 if (start < end)
2006 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2007 for (; start < end; start += PAGE_SIZE) {
2008 struct page *p = virt_to_page(start);
2009
2010 ClearPageReserved(p);
7835e98b 2011 init_page_count(p);
1da177e4
LT
2012 __free_page(p);
2013 num_physpages++;
2014 totalram_pages++;
2015 }
2016}
2017#endif
c4bce90e 2018
c4bce90e
DM
2019#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2020#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2021#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2022#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2023#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2024#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2025
2026pgprot_t PAGE_KERNEL __read_mostly;
2027EXPORT_SYMBOL(PAGE_KERNEL);
2028
2029pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2030pgprot_t PAGE_COPY __read_mostly;
0f15952a
DM
2031
2032pgprot_t PAGE_SHARED __read_mostly;
2033EXPORT_SYMBOL(PAGE_SHARED);
2034
c4bce90e
DM
2035pgprot_t PAGE_EXEC __read_mostly;
2036unsigned long pg_iobits __read_mostly;
2037
2038unsigned long _PAGE_IE __read_mostly;
987c74fc 2039EXPORT_SYMBOL(_PAGE_IE);
b2bef442 2040
c4bce90e 2041unsigned long _PAGE_E __read_mostly;
b2bef442
DM
2042EXPORT_SYMBOL(_PAGE_E);
2043
c4bce90e 2044unsigned long _PAGE_CACHE __read_mostly;
b2bef442 2045EXPORT_SYMBOL(_PAGE_CACHE);
c4bce90e 2046
46644c24
DM
2047#ifdef CONFIG_SPARSEMEM_VMEMMAP
2048
2049#define VMEMMAP_CHUNK_SHIFT 22
2050#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
2051#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
2052#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
2053
2054#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
2055 sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
2056unsigned long vmemmap_table[VMEMMAP_SIZE];
2057
2058int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2059{
2060 unsigned long vstart = (unsigned long) start;
2061 unsigned long vend = (unsigned long) (start + nr);
2062 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2063 unsigned long phys_end = (vend - VMEMMAP_BASE);
2064 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2065 unsigned long end = VMEMMAP_ALIGN(phys_end);
2066 unsigned long pte_base;
2067
2068 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2069 _PAGE_CP_4U | _PAGE_CV_4U |
2070 _PAGE_P_4U | _PAGE_W_4U);
2071 if (tlb_type == hypervisor)
2072 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2073 _PAGE_CP_4V | _PAGE_CV_4V |
2074 _PAGE_P_4V | _PAGE_W_4V);
2075
2076 for (; addr < end; addr += VMEMMAP_CHUNK) {
2077 unsigned long *vmem_pp =
2078 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2079 void *block;
2080
2081 if (!(*vmem_pp & _PAGE_VALID)) {
2082 block = vmemmap_alloc_block(1UL << 22, node);
2083 if (!block)
2084 return -ENOMEM;
2085
2086 *vmem_pp = pte_base | __pa(block);
2087
2088 printk(KERN_INFO "[%p-%p] page_structs=%lu "
2089 "node=%d entry=%lu/%lu\n", start, block, nr,
2090 node,
2091 addr >> VMEMMAP_CHUNK_SHIFT,
2092 VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
2093 }
2094 }
2095 return 0;
2096}
2097#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2098
c4bce90e
DM
2099static void prot_init_common(unsigned long page_none,
2100 unsigned long page_shared,
2101 unsigned long page_copy,
2102 unsigned long page_readonly,
2103 unsigned long page_exec_bit)
2104{
2105 PAGE_COPY = __pgprot(page_copy);
0f15952a 2106 PAGE_SHARED = __pgprot(page_shared);
c4bce90e
DM
2107
2108 protection_map[0x0] = __pgprot(page_none);
2109 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2110 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2111 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2112 protection_map[0x4] = __pgprot(page_readonly);
2113 protection_map[0x5] = __pgprot(page_readonly);
2114 protection_map[0x6] = __pgprot(page_copy);
2115 protection_map[0x7] = __pgprot(page_copy);
2116 protection_map[0x8] = __pgprot(page_none);
2117 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2118 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2119 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2120 protection_map[0xc] = __pgprot(page_readonly);
2121 protection_map[0xd] = __pgprot(page_readonly);
2122 protection_map[0xe] = __pgprot(page_shared);
2123 protection_map[0xf] = __pgprot(page_shared);
2124}
2125
2126static void __init sun4u_pgprot_init(void)
2127{
2128 unsigned long page_none, page_shared, page_copy, page_readonly;
2129 unsigned long page_exec_bit;
2130
2131 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2132 _PAGE_CACHE_4U | _PAGE_P_4U |
2133 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2134 _PAGE_EXEC_4U);
2135 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2136 _PAGE_CACHE_4U | _PAGE_P_4U |
2137 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2138 _PAGE_EXEC_4U | _PAGE_L_4U);
2139 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
2140
2141 _PAGE_IE = _PAGE_IE_4U;
2142 _PAGE_E = _PAGE_E_4U;
2143 _PAGE_CACHE = _PAGE_CACHE_4U;
2144
2145 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2146 __ACCESS_BITS_4U | _PAGE_E_4U);
2147
d1acb421
DM
2148#ifdef CONFIG_DEBUG_PAGEALLOC
2149 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
2150 0xfffff80000000000;
2151#else
9cc3a1ac 2152 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
c4bce90e 2153 0xfffff80000000000;
d1acb421 2154#endif
9cc3a1ac
DM
2155 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2156 _PAGE_P_4U | _PAGE_W_4U);
2157
2158 /* XXX Should use 256MB on Panther. XXX */
2159 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
c4bce90e
DM
2160
2161 _PAGE_SZBITS = _PAGE_SZBITS_4U;
2162 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2163 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2164 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2165
2166
2167 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2168 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2169 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2170 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2171 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2172 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2173 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2174
2175 page_exec_bit = _PAGE_EXEC_4U;
2176
2177 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2178 page_exec_bit);
2179}
2180
2181static void __init sun4v_pgprot_init(void)
2182{
2183 unsigned long page_none, page_shared, page_copy, page_readonly;
2184 unsigned long page_exec_bit;
2185
2186 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2187 _PAGE_CACHE_4V | _PAGE_P_4V |
2188 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2189 _PAGE_EXEC_4V);
2190 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2191 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
2192
2193 _PAGE_IE = _PAGE_IE_4V;
2194 _PAGE_E = _PAGE_E_4V;
2195 _PAGE_CACHE = _PAGE_CACHE_4V;
2196
d1acb421
DM
2197#ifdef CONFIG_DEBUG_PAGEALLOC
2198 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2199 0xfffff80000000000;
2200#else
9cc3a1ac
DM
2201 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2202 0xfffff80000000000;
d1acb421 2203#endif
9cc3a1ac
DM
2204 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2205 _PAGE_P_4V | _PAGE_W_4V);
2206
d1acb421
DM
2207#ifdef CONFIG_DEBUG_PAGEALLOC
2208 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2209 0xfffff80000000000;
2210#else
9cc3a1ac 2211 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
c4bce90e 2212 0xfffff80000000000;
d1acb421 2213#endif
9cc3a1ac
DM
2214 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2215 _PAGE_P_4V | _PAGE_W_4V);
c4bce90e
DM
2216
2217 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2218 __ACCESS_BITS_4V | _PAGE_E_4V);
2219
2220 _PAGE_SZBITS = _PAGE_SZBITS_4V;
2221 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2222 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2223 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2224 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2225
2226 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2227 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2228 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2229 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2230 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2231 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2232 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2233
2234 page_exec_bit = _PAGE_EXEC_4V;
2235
2236 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2237 page_exec_bit);
2238}
2239
2240unsigned long pte_sz_bits(unsigned long sz)
2241{
2242 if (tlb_type == hypervisor) {
2243 switch (sz) {
2244 case 8 * 1024:
2245 default:
2246 return _PAGE_SZ8K_4V;
2247 case 64 * 1024:
2248 return _PAGE_SZ64K_4V;
2249 case 512 * 1024:
2250 return _PAGE_SZ512K_4V;
2251 case 4 * 1024 * 1024:
2252 return _PAGE_SZ4MB_4V;
2253 };
2254 } else {
2255 switch (sz) {
2256 case 8 * 1024:
2257 default:
2258 return _PAGE_SZ8K_4U;
2259 case 64 * 1024:
2260 return _PAGE_SZ64K_4U;
2261 case 512 * 1024:
2262 return _PAGE_SZ512K_4U;
2263 case 4 * 1024 * 1024:
2264 return _PAGE_SZ4MB_4U;
2265 };
2266 }
2267}
2268
2269pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2270{
2271 pte_t pte;
cf627156
DM
2272
2273 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
c4bce90e
DM
2274 pte_val(pte) |= (((unsigned long)space) << 32);
2275 pte_val(pte) |= pte_sz_bits(page_size);
c4bce90e 2276
cf627156 2277 return pte;
c4bce90e
DM
2278}
2279
2280static unsigned long kern_large_tte(unsigned long paddr)
2281{
2282 unsigned long val;
2283
2284 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2285 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2286 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2287 if (tlb_type == hypervisor)
2288 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2289 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2290 _PAGE_EXEC_4V | _PAGE_W_4V);
2291
2292 return val | paddr;
2293}
2294
c4bce90e
DM
2295/* If not locked, zap it. */
2296void __flush_tlb_all(void)
2297{
2298 unsigned long pstate;
2299 int i;
2300
2301 __asm__ __volatile__("flushw\n\t"
2302 "rdpr %%pstate, %0\n\t"
2303 "wrpr %0, %1, %%pstate"
2304 : "=r" (pstate)
2305 : "i" (PSTATE_IE));
8f361453
DM
2306 if (tlb_type == hypervisor) {
2307 sun4v_mmu_demap_all();
2308 } else if (tlb_type == spitfire) {
c4bce90e
DM
2309 for (i = 0; i < 64; i++) {
2310 /* Spitfire Errata #32 workaround */
2311 /* NOTE: Always runs on spitfire, so no
2312 * cheetah+ page size encodings.
2313 */
2314 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2315 "flush %%g6"
2316 : /* No outputs */
2317 : "r" (0),
2318 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2319
2320 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2321 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2322 "membar #Sync"
2323 : /* no outputs */
2324 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2325 spitfire_put_dtlb_data(i, 0x0UL);
2326 }
2327
2328 /* Spitfire Errata #32 workaround */
2329 /* NOTE: Always runs on spitfire, so no
2330 * cheetah+ page size encodings.
2331 */
2332 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2333 "flush %%g6"
2334 : /* No outputs */
2335 : "r" (0),
2336 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2337
2338 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2339 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2340 "membar #Sync"
2341 : /* no outputs */
2342 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2343 spitfire_put_itlb_data(i, 0x0UL);
2344 }
2345 }
2346 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2347 cheetah_flush_dtlb_all();
2348 cheetah_flush_itlb_all();
2349 }
2350 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2351 : : "r" (pstate));
2352}