Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc / mm / init_64.c
CommitLineData
b00dc837 1/*
1da177e4
LT
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
c4bce90e 8#include <linux/module.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
1da177e4
LT
16#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
c9cf5528 19#include <linux/poison.h>
1da177e4
LT
20#include <linux/fs.h>
21#include <linux/seq_file.h>
05e14cb3 22#include <linux/kprobes.h>
1ac4f5eb 23#include <linux/cache.h>
13edad7a 24#include <linux/sort.h>
5cbc3073 25#include <linux/percpu.h>
95f72d1e 26#include <linux/memblock.h>
919ee677 27#include <linux/mmzone.h>
5a0e3ad6 28#include <linux/gfp.h>
1da177e4
LT
29
30#include <asm/head.h>
1da177e4
LT
31#include <asm/page.h>
32#include <asm/pgalloc.h>
33#include <asm/pgtable.h>
34#include <asm/oplib.h>
35#include <asm/iommu.h>
36#include <asm/io.h>
37#include <asm/uaccess.h>
38#include <asm/mmu_context.h>
39#include <asm/tlbflush.h>
40#include <asm/dma.h>
41#include <asm/starfire.h>
42#include <asm/tlb.h>
43#include <asm/spitfire.h>
44#include <asm/sections.h>
517af332 45#include <asm/tsb.h>
481295f9 46#include <asm/hypervisor.h>
372b07bb 47#include <asm/prom.h>
5cbc3073 48#include <asm/mdesc.h>
3d5ae6b6 49#include <asm/cpudata.h>
4f70f7a9 50#include <asm/irq.h>
1da177e4 51
27137e52 52#include "init_64.h"
9cc3a1ac
DM
53
54unsigned long kern_linear_pte_xor[2] __read_mostly;
55
56/* A bitmap, one bit for every 256MB of physical memory. If the bit
57 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
58 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
59 */
60unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
61
d1acb421 62#ifndef CONFIG_DEBUG_PAGEALLOC
2d9e2763
DM
63/* A special kernel TSB for 4MB and 256MB linear mappings.
64 * Space is allocated for this right after the trap table
65 * in arch/sparc64/kernel/head.S
66 */
67extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
d1acb421 68#endif
d7744a09 69
13edad7a
DM
70#define MAX_BANKS 32
71
9a2ed5cc
DM
72static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata;
73static int pavail_ents __devinitdata;
13edad7a
DM
74
75static int cmp_p64(const void *a, const void *b)
76{
77 const struct linux_prom64_registers *x = a, *y = b;
78
79 if (x->phys_addr > y->phys_addr)
80 return 1;
81 if (x->phys_addr < y->phys_addr)
82 return -1;
83 return 0;
84}
85
86static void __init read_obp_memory(const char *property,
87 struct linux_prom64_registers *regs,
88 int *num_ents)
89{
8d125562 90 phandle node = prom_finddevice("/memory");
13edad7a
DM
91 int prop_size = prom_getproplen(node, property);
92 int ents, ret, i;
93
94 ents = prop_size / sizeof(struct linux_prom64_registers);
95 if (ents > MAX_BANKS) {
96 prom_printf("The machine has more %s property entries than "
97 "this kernel can support (%d).\n",
98 property, MAX_BANKS);
99 prom_halt();
100 }
101
102 ret = prom_getproperty(node, property, (char *) regs, prop_size);
103 if (ret == -1) {
104 prom_printf("Couldn't get %s property from /memory.\n");
105 prom_halt();
106 }
107
13edad7a
DM
108 /* Sanitize what we got from the firmware, by page aligning
109 * everything.
110 */
111 for (i = 0; i < ents; i++) {
112 unsigned long base, size;
113
114 base = regs[i].phys_addr;
115 size = regs[i].reg_size;
10147570 116
13edad7a
DM
117 size &= PAGE_MASK;
118 if (base & ~PAGE_MASK) {
119 unsigned long new_base = PAGE_ALIGN(base);
120
121 size -= new_base - base;
122 if ((long) size < 0L)
123 size = 0UL;
124 base = new_base;
125 }
0015d3d6
DM
126 if (size == 0UL) {
127 /* If it is empty, simply get rid of it.
128 * This simplifies the logic of the other
129 * functions that process these arrays.
130 */
131 memmove(&regs[i], &regs[i + 1],
132 (ents - i - 1) * sizeof(regs[0]));
486ad10a 133 i--;
0015d3d6
DM
134 ents--;
135 continue;
486ad10a 136 }
0015d3d6
DM
137 regs[i].phys_addr = base;
138 regs[i].reg_size = size;
486ad10a
DM
139 }
140
141 *num_ents = ents;
142
c9c10830 143 sort(regs, ents, sizeof(struct linux_prom64_registers),
13edad7a
DM
144 cmp_p64, NULL);
145}
1da177e4 146
d8ed1d43
DM
147unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
148 sizeof(unsigned long)];
917c3660 149EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
1da177e4 150
d1112018 151/* Kernel physical address base and size in bytes. */
1ac4f5eb
DM
152unsigned long kern_base __read_mostly;
153unsigned long kern_size __read_mostly;
1da177e4 154
1da177e4
LT
155/* Initial ramdisk setup */
156extern unsigned long sparc_ramdisk_image64;
157extern unsigned int sparc_ramdisk_image;
158extern unsigned int sparc_ramdisk_size;
159
1ac4f5eb 160struct page *mem_map_zero __read_mostly;
35802c0b 161EXPORT_SYMBOL(mem_map_zero);
1da177e4 162
0835ae0f
DM
163unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
164
165unsigned long sparc64_kern_pri_context __read_mostly;
166unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
167unsigned long sparc64_kern_sec_context __read_mostly;
168
64658743 169int num_kernel_image_mappings;
1da177e4 170
1da177e4
LT
171#ifdef CONFIG_DEBUG_DCFLUSH
172atomic_t dcpage_flushes = ATOMIC_INIT(0);
173#ifdef CONFIG_SMP
174atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
175#endif
176#endif
177
7a591cfe 178inline void flush_dcache_page_impl(struct page *page)
1da177e4 179{
7a591cfe 180 BUG_ON(tlb_type == hypervisor);
1da177e4
LT
181#ifdef CONFIG_DEBUG_DCFLUSH
182 atomic_inc(&dcpage_flushes);
183#endif
184
185#ifdef DCACHE_ALIASING_POSSIBLE
186 __flush_dcache_page(page_address(page),
187 ((tlb_type == spitfire) &&
188 page_mapping(page) != NULL));
189#else
190 if (page_mapping(page) != NULL &&
191 tlb_type == spitfire)
192 __flush_icache_page(__pa(page_address(page)));
193#endif
194}
195
196#define PG_dcache_dirty PG_arch_1
22adb358
DM
197#define PG_dcache_cpu_shift 32UL
198#define PG_dcache_cpu_mask \
199 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
1da177e4
LT
200
201#define dcache_dirty_cpu(page) \
48b0e548 202 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
1da177e4 203
d979f179 204static inline void set_dcache_dirty(struct page *page, int this_cpu)
1da177e4
LT
205{
206 unsigned long mask = this_cpu;
48b0e548
DM
207 unsigned long non_cpu_bits;
208
209 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
210 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
211
1da177e4
LT
212 __asm__ __volatile__("1:\n\t"
213 "ldx [%2], %%g7\n\t"
214 "and %%g7, %1, %%g1\n\t"
215 "or %%g1, %0, %%g1\n\t"
216 "casx [%2], %%g7, %%g1\n\t"
217 "cmp %%g7, %%g1\n\t"
218 "bne,pn %%xcc, 1b\n\t"
b445e26c 219 " nop"
1da177e4
LT
220 : /* no outputs */
221 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
222 : "g1", "g7");
223}
224
d979f179 225static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
1da177e4
LT
226{
227 unsigned long mask = (1UL << PG_dcache_dirty);
228
229 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
230 "1:\n\t"
231 "ldx [%2], %%g7\n\t"
48b0e548 232 "srlx %%g7, %4, %%g1\n\t"
1da177e4
LT
233 "and %%g1, %3, %%g1\n\t"
234 "cmp %%g1, %0\n\t"
235 "bne,pn %%icc, 2f\n\t"
236 " andn %%g7, %1, %%g1\n\t"
237 "casx [%2], %%g7, %%g1\n\t"
238 "cmp %%g7, %%g1\n\t"
239 "bne,pn %%xcc, 1b\n\t"
b445e26c 240 " nop\n"
1da177e4
LT
241 "2:"
242 : /* no outputs */
243 : "r" (cpu), "r" (mask), "r" (&page->flags),
48b0e548
DM
244 "i" (PG_dcache_cpu_mask),
245 "i" (PG_dcache_cpu_shift)
1da177e4
LT
246 : "g1", "g7");
247}
248
517af332
DM
249static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
250{
251 unsigned long tsb_addr = (unsigned long) ent;
252
3b3ab2eb 253 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
254 tsb_addr = __pa(tsb_addr);
255
256 __tsb_insert(tsb_addr, tag, pte);
257}
258
c4bce90e
DM
259unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
260unsigned long _PAGE_SZBITS __read_mostly;
261
ff9aefbf 262static void flush_dcache(unsigned long pfn)
1da177e4 263{
ff9aefbf 264 struct page *page;
7a591cfe 265
ff9aefbf 266 page = pfn_to_page(pfn);
1a78cedb 267 if (page) {
7a591cfe 268 unsigned long pg_flags;
7a591cfe 269
ff9aefbf
SR
270 pg_flags = page->flags;
271 if (pg_flags & (1UL << PG_dcache_dirty)) {
7a591cfe
DM
272 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
273 PG_dcache_cpu_mask);
274 int this_cpu = get_cpu();
275
276 /* This is just to optimize away some function calls
277 * in the SMP case.
278 */
279 if (cpu == this_cpu)
280 flush_dcache_page_impl(page);
281 else
282 smp_flush_dcache_page_impl(page, cpu);
283
284 clear_dcache_dirty_cpu(page, cpu);
285
286 put_cpu();
287 }
1da177e4 288 }
ff9aefbf
SR
289}
290
4b3073e1 291void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
ff9aefbf
SR
292{
293 struct mm_struct *mm;
294 struct tsb *tsb;
295 unsigned long tag, flags;
296 unsigned long tsb_index, tsb_hash_shift;
4b3073e1 297 pte_t pte = *ptep;
ff9aefbf
SR
298
299 if (tlb_type != hypervisor) {
300 unsigned long pfn = pte_pfn(pte);
301
302 if (pfn_valid(pfn))
303 flush_dcache(pfn);
304 }
bd40791e
DM
305
306 mm = vma->vm_mm;
7a1ac526 307
dcc1e8dd
DM
308 tsb_index = MM_TSB_BASE;
309 tsb_hash_shift = PAGE_SHIFT;
310
7a1ac526
DM
311 spin_lock_irqsave(&mm->context.lock, flags);
312
dcc1e8dd
DM
313#ifdef CONFIG_HUGETLB_PAGE
314 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
315 if ((tlb_type == hypervisor &&
316 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
317 (tlb_type != hypervisor &&
318 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
319 tsb_index = MM_TSB_HUGE;
320 tsb_hash_shift = HPAGE_SHIFT;
321 }
322 }
323#endif
324
325 tsb = mm->context.tsb_block[tsb_index].tsb;
326 tsb += ((address >> tsb_hash_shift) &
327 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
74ae9987
DM
328 tag = (address >> 22UL);
329 tsb_insert(tsb, tag, pte_val(pte));
7a1ac526
DM
330
331 spin_unlock_irqrestore(&mm->context.lock, flags);
1da177e4
LT
332}
333
334void flush_dcache_page(struct page *page)
335{
a9546f59
DM
336 struct address_space *mapping;
337 int this_cpu;
1da177e4 338
7a591cfe
DM
339 if (tlb_type == hypervisor)
340 return;
341
a9546f59
DM
342 /* Do not bother with the expensive D-cache flush if it
343 * is merely the zero page. The 'bigcore' testcase in GDB
344 * causes this case to run millions of times.
345 */
346 if (page == ZERO_PAGE(0))
347 return;
348
349 this_cpu = get_cpu();
350
351 mapping = page_mapping(page);
1da177e4 352 if (mapping && !mapping_mapped(mapping)) {
a9546f59 353 int dirty = test_bit(PG_dcache_dirty, &page->flags);
1da177e4 354 if (dirty) {
a9546f59
DM
355 int dirty_cpu = dcache_dirty_cpu(page);
356
1da177e4
LT
357 if (dirty_cpu == this_cpu)
358 goto out;
359 smp_flush_dcache_page_impl(page, dirty_cpu);
360 }
361 set_dcache_dirty(page, this_cpu);
362 } else {
363 /* We could delay the flush for the !page_mapping
364 * case too. But that case is for exec env/arg
365 * pages and those are %99 certainly going to get
366 * faulted into the tlb (and thus flushed) anyways.
367 */
368 flush_dcache_page_impl(page);
369 }
370
371out:
372 put_cpu();
373}
917c3660 374EXPORT_SYMBOL(flush_dcache_page);
1da177e4 375
05e14cb3 376void __kprobes flush_icache_range(unsigned long start, unsigned long end)
1da177e4 377{
a43fe0e7 378 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
1da177e4
LT
379 if (tlb_type == spitfire) {
380 unsigned long kaddr;
381
a94aa253
DM
382 /* This code only runs on Spitfire cpus so this is
383 * why we can assume _PAGE_PADDR_4U.
384 */
385 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
386 unsigned long paddr, mask = _PAGE_PADDR_4U;
387
388 if (kaddr >= PAGE_OFFSET)
389 paddr = kaddr & mask;
390 else {
391 pgd_t *pgdp = pgd_offset_k(kaddr);
392 pud_t *pudp = pud_offset(pgdp, kaddr);
393 pmd_t *pmdp = pmd_offset(pudp, kaddr);
394 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
395
396 paddr = pte_val(*ptep) & mask;
397 }
398 __flush_icache_page(paddr);
399 }
1da177e4
LT
400 }
401}
917c3660 402EXPORT_SYMBOL(flush_icache_range);
1da177e4 403
1da177e4
LT
404void mmu_info(struct seq_file *m)
405{
406 if (tlb_type == cheetah)
407 seq_printf(m, "MMU Type\t: Cheetah\n");
408 else if (tlb_type == cheetah_plus)
409 seq_printf(m, "MMU Type\t: Cheetah+\n");
410 else if (tlb_type == spitfire)
411 seq_printf(m, "MMU Type\t: Spitfire\n");
a43fe0e7
DM
412 else if (tlb_type == hypervisor)
413 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
1da177e4
LT
414 else
415 seq_printf(m, "MMU Type\t: ???\n");
416
417#ifdef CONFIG_DEBUG_DCFLUSH
418 seq_printf(m, "DCPageFlushes\t: %d\n",
419 atomic_read(&dcpage_flushes));
420#ifdef CONFIG_SMP
421 seq_printf(m, "DCPageFlushesXC\t: %d\n",
422 atomic_read(&dcpage_flushes_xcall));
423#endif /* CONFIG_SMP */
424#endif /* CONFIG_DEBUG_DCFLUSH */
425}
426
a94aa253
DM
427struct linux_prom_translation prom_trans[512] __read_mostly;
428unsigned int prom_trans_ents __read_mostly;
429
1da177e4
LT
430unsigned long kern_locked_tte_data;
431
c9c10830
DM
432/* The obp translations are saved based on 8k pagesize, since obp can
433 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
74bf4312 434 * HI_OBP_ADDRESS range are handled in ktlb.S.
c9c10830 435 */
5085b4a5
DM
436static inline int in_obp_range(unsigned long vaddr)
437{
438 return (vaddr >= LOW_OBP_ADDRESS &&
439 vaddr < HI_OBP_ADDRESS);
440}
441
c9c10830 442static int cmp_ptrans(const void *a, const void *b)
405599bd 443{
c9c10830 444 const struct linux_prom_translation *x = a, *y = b;
405599bd 445
c9c10830
DM
446 if (x->virt > y->virt)
447 return 1;
448 if (x->virt < y->virt)
449 return -1;
450 return 0;
405599bd
DM
451}
452
c9c10830 453/* Read OBP translations property into 'prom_trans[]'. */
9ad98c5b 454static void __init read_obp_translations(void)
405599bd 455{
c9c10830 456 int n, node, ents, first, last, i;
1da177e4
LT
457
458 node = prom_finddevice("/virtual-memory");
459 n = prom_getproplen(node, "translations");
405599bd 460 if (unlikely(n == 0 || n == -1)) {
b206fc4c 461 prom_printf("prom_mappings: Couldn't get size.\n");
1da177e4
LT
462 prom_halt();
463 }
405599bd
DM
464 if (unlikely(n > sizeof(prom_trans))) {
465 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
1da177e4
LT
466 prom_halt();
467 }
405599bd 468
b206fc4c 469 if ((n = prom_getproperty(node, "translations",
405599bd
DM
470 (char *)&prom_trans[0],
471 sizeof(prom_trans))) == -1) {
b206fc4c 472 prom_printf("prom_mappings: Couldn't get property.\n");
1da177e4
LT
473 prom_halt();
474 }
9ad98c5b 475
b206fc4c 476 n = n / sizeof(struct linux_prom_translation);
9ad98c5b 477
c9c10830
DM
478 ents = n;
479
480 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
481 cmp_ptrans, NULL);
482
483 /* Now kick out all the non-OBP entries. */
484 for (i = 0; i < ents; i++) {
485 if (in_obp_range(prom_trans[i].virt))
486 break;
487 }
488 first = i;
489 for (; i < ents; i++) {
490 if (!in_obp_range(prom_trans[i].virt))
491 break;
492 }
493 last = i;
494
495 for (i = 0; i < (last - first); i++) {
496 struct linux_prom_translation *src = &prom_trans[i + first];
497 struct linux_prom_translation *dest = &prom_trans[i];
498
499 *dest = *src;
500 }
501 for (; i < ents; i++) {
502 struct linux_prom_translation *dest = &prom_trans[i];
503 dest->virt = dest->size = dest->data = 0x0UL;
504 }
505
506 prom_trans_ents = last - first;
507
508 if (tlb_type == spitfire) {
509 /* Clear diag TTE bits. */
510 for (i = 0; i < prom_trans_ents; i++)
511 prom_trans[i].data &= ~0x0003fe0000000000UL;
512 }
f4142cba
DM
513
514 /* Force execute bit on. */
515 for (i = 0; i < prom_trans_ents; i++)
516 prom_trans[i].data |= (tlb_type == hypervisor ?
517 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
405599bd 518}
1da177e4 519
d82ace7d
DM
520static void __init hypervisor_tlb_lock(unsigned long vaddr,
521 unsigned long pte,
522 unsigned long mmu)
523{
7db35f31
DM
524 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
525
526 if (ret != 0) {
12e126ad 527 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
7db35f31 528 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
12e126ad
DM
529 prom_halt();
530 }
d82ace7d
DM
531}
532
c4bce90e
DM
533static unsigned long kern_large_tte(unsigned long paddr);
534
898cf0ec 535static void __init remap_kernel(void)
405599bd
DM
536{
537 unsigned long phys_page, tte_vaddr, tte_data;
64658743 538 int i, tlb_ent = sparc64_highest_locked_tlbent();
405599bd 539
1da177e4 540 tte_vaddr = (unsigned long) KERNBASE;
bff06d55 541 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
c4bce90e 542 tte_data = kern_large_tte(phys_page);
1da177e4
LT
543
544 kern_locked_tte_data = tte_data;
545
d82ace7d
DM
546 /* Now lock us into the TLBs via Hypervisor or OBP. */
547 if (tlb_type == hypervisor) {
64658743 548 for (i = 0; i < num_kernel_image_mappings; i++) {
d82ace7d
DM
549 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
550 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
64658743
DM
551 tte_vaddr += 0x400000;
552 tte_data += 0x400000;
d82ace7d
DM
553 }
554 } else {
64658743
DM
555 for (i = 0; i < num_kernel_image_mappings; i++) {
556 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
557 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
558 tte_vaddr += 0x400000;
559 tte_data += 0x400000;
d82ace7d 560 }
64658743 561 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
1da177e4 562 }
0835ae0f
DM
563 if (tlb_type == cheetah_plus) {
564 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
565 CTX_CHEETAH_PLUS_NUC);
566 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
567 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
568 }
405599bd 569}
1da177e4 570
405599bd 571
c9c10830 572static void __init inherit_prom_mappings(void)
9ad98c5b 573{
405599bd 574 /* Now fixup OBP's idea about where we really are mapped. */
3c62a2d3 575 printk("Remapping the kernel... ");
405599bd 576 remap_kernel();
3c62a2d3 577 printk("done.\n");
1da177e4
LT
578}
579
1da177e4
LT
580void prom_world(int enter)
581{
1da177e4
LT
582 if (!enter)
583 set_fs((mm_segment_t) { get_thread_current_ds() });
584
3487d1d4 585 __asm__ __volatile__("flushw");
1da177e4
LT
586}
587
1da177e4
LT
588void __flush_dcache_range(unsigned long start, unsigned long end)
589{
590 unsigned long va;
591
592 if (tlb_type == spitfire) {
593 int n = 0;
594
595 for (va = start; va < end; va += 32) {
596 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
597 if (++n >= 512)
598 break;
599 }
a43fe0e7 600 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
601 start = __pa(start);
602 end = __pa(end);
603 for (va = start; va < end; va += 32)
604 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
605 "membar #Sync"
606 : /* no outputs */
607 : "r" (va),
608 "i" (ASI_DCACHE_INVALIDATE));
609 }
610}
917c3660 611EXPORT_SYMBOL(__flush_dcache_range);
1da177e4 612
85f1e1f6
DM
613/* get_new_mmu_context() uses "cache + 1". */
614DEFINE_SPINLOCK(ctx_alloc_lock);
615unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
616#define MAX_CTX_NR (1UL << CTX_NR_BITS)
617#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
618DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
619
1da177e4
LT
620/* Caller does TLB context flushing on local CPU if necessary.
621 * The caller also ensures that CTX_VALID(mm->context) is false.
622 *
623 * We must be careful about boundary cases so that we never
624 * let the user have CTX 0 (nucleus) or we ever use a CTX
625 * version of zero (and thus NO_CONTEXT would not be caught
626 * by version mis-match tests in mmu_context.h).
a0663a79
DM
627 *
628 * Always invoked with interrupts disabled.
1da177e4
LT
629 */
630void get_new_mmu_context(struct mm_struct *mm)
631{
632 unsigned long ctx, new_ctx;
633 unsigned long orig_pgsz_bits;
a77754b4 634 unsigned long flags;
a0663a79 635 int new_version;
1da177e4 636
a77754b4 637 spin_lock_irqsave(&ctx_alloc_lock, flags);
1da177e4
LT
638 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
639 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
640 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
a0663a79 641 new_version = 0;
1da177e4
LT
642 if (new_ctx >= (1 << CTX_NR_BITS)) {
643 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
644 if (new_ctx >= ctx) {
645 int i;
646 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
647 CTX_FIRST_VERSION;
648 if (new_ctx == 1)
649 new_ctx = CTX_FIRST_VERSION;
650
651 /* Don't call memset, for 16 entries that's just
652 * plain silly...
653 */
654 mmu_context_bmap[0] = 3;
655 mmu_context_bmap[1] = 0;
656 mmu_context_bmap[2] = 0;
657 mmu_context_bmap[3] = 0;
658 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
659 mmu_context_bmap[i + 0] = 0;
660 mmu_context_bmap[i + 1] = 0;
661 mmu_context_bmap[i + 2] = 0;
662 mmu_context_bmap[i + 3] = 0;
663 }
a0663a79 664 new_version = 1;
1da177e4
LT
665 goto out;
666 }
667 }
668 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
669 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
670out:
671 tlb_context_cache = new_ctx;
672 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
a77754b4 673 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
a0663a79
DM
674
675 if (unlikely(new_version))
676 smp_new_mmu_context_version();
1da177e4
LT
677}
678
919ee677
DM
679static int numa_enabled = 1;
680static int numa_debug;
681
682static int __init early_numa(char *p)
1da177e4 683{
919ee677
DM
684 if (!p)
685 return 0;
686
687 if (strstr(p, "off"))
688 numa_enabled = 0;
d1112018 689
919ee677
DM
690 if (strstr(p, "debug"))
691 numa_debug = 1;
d1112018 692
919ee677 693 return 0;
d1112018 694}
919ee677
DM
695early_param("numa", early_numa);
696
697#define numadbg(f, a...) \
698do { if (numa_debug) \
699 printk(KERN_INFO f, ## a); \
700} while (0)
d1112018 701
4e82c9a6
DM
702static void __init find_ramdisk(unsigned long phys_base)
703{
704#ifdef CONFIG_BLK_DEV_INITRD
705 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
706 unsigned long ramdisk_image;
707
708 /* Older versions of the bootloader only supported a
709 * 32-bit physical address for the ramdisk image
710 * location, stored at sparc_ramdisk_image. Newer
711 * SILO versions set sparc_ramdisk_image to zero and
712 * provide a full 64-bit physical address at
713 * sparc_ramdisk_image64.
714 */
715 ramdisk_image = sparc_ramdisk_image;
716 if (!ramdisk_image)
717 ramdisk_image = sparc_ramdisk_image64;
718
719 /* Another bootloader quirk. The bootloader normalizes
720 * the physical address to KERNBASE, so we have to
721 * factor that back out and add in the lowest valid
722 * physical page address to get the true physical address.
723 */
724 ramdisk_image -= KERNBASE;
725 ramdisk_image += phys_base;
726
919ee677
DM
727 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
728 ramdisk_image, sparc_ramdisk_size);
729
4e82c9a6
DM
730 initrd_start = ramdisk_image;
731 initrd_end = ramdisk_image + sparc_ramdisk_size;
3b2a7e23 732
95f72d1e 733 memblock_reserve(initrd_start, sparc_ramdisk_size);
d45100f7
DM
734
735 initrd_start += PAGE_OFFSET;
736 initrd_end += PAGE_OFFSET;
4e82c9a6
DM
737 }
738#endif
739}
740
919ee677
DM
741struct node_mem_mask {
742 unsigned long mask;
743 unsigned long val;
744 unsigned long bootmem_paddr;
745};
746static struct node_mem_mask node_masks[MAX_NUMNODES];
747static int num_node_masks;
748
749int numa_cpu_lookup_table[NR_CPUS];
750cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
751
752#ifdef CONFIG_NEED_MULTIPLE_NODES
919ee677
DM
753
754struct mdesc_mblock {
755 u64 base;
756 u64 size;
757 u64 offset; /* RA-to-PA */
758};
759static struct mdesc_mblock *mblocks;
760static int num_mblocks;
761
762static unsigned long ra_to_pa(unsigned long addr)
763{
764 int i;
765
766 for (i = 0; i < num_mblocks; i++) {
767 struct mdesc_mblock *m = &mblocks[i];
768
769 if (addr >= m->base &&
770 addr < (m->base + m->size)) {
771 addr += m->offset;
772 break;
773 }
774 }
775 return addr;
776}
777
778static int find_node(unsigned long addr)
779{
780 int i;
781
782 addr = ra_to_pa(addr);
783 for (i = 0; i < num_node_masks; i++) {
784 struct node_mem_mask *p = &node_masks[i];
785
786 if ((addr & p->mask) == p->val)
787 return i;
788 }
789 return -1;
790}
791
f9b18db3 792static u64 memblock_nid_range(u64 start, u64 end, int *nid)
919ee677
DM
793{
794 *nid = find_node(start);
795 start += PAGE_SIZE;
796 while (start < end) {
797 int n = find_node(start);
798
799 if (n != *nid)
800 break;
801 start += PAGE_SIZE;
802 }
803
c918dcce
DM
804 if (start > end)
805 start = end;
806
919ee677
DM
807 return start;
808}
809#else
f9b18db3 810static u64 memblock_nid_range(u64 start, u64 end, int *nid)
919ee677
DM
811{
812 *nid = 0;
813 return end;
814}
815#endif
816
817/* This must be invoked after performing all of the necessary
2a4814df 818 * memblock_set_node() calls for 'nid'. We need to be able to get
919ee677 819 * correct data from get_pfn_range_for_nid().
f1cfdb55 820 */
919ee677
DM
821static void __init allocate_node_data(int nid)
822{
823 unsigned long paddr, num_pages, start_pfn, end_pfn;
824 struct pglist_data *p;
825
826#ifdef CONFIG_NEED_MULTIPLE_NODES
9d1e2492 827 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
919ee677
DM
828 if (!paddr) {
829 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
830 prom_halt();
831 }
832 NODE_DATA(nid) = __va(paddr);
833 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
834
b61bfa3c 835 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
919ee677
DM
836#endif
837
838 p = NODE_DATA(nid);
839
840 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
841 p->node_start_pfn = start_pfn;
842 p->node_spanned_pages = end_pfn - start_pfn;
843
844 if (p->node_spanned_pages) {
845 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
846
9d1e2492 847 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
919ee677
DM
848 if (!paddr) {
849 prom_printf("Cannot allocate bootmap for nid[%d]\n",
850 nid);
851 prom_halt();
852 }
853 node_masks[nid].bootmem_paddr = paddr;
854 }
855}
856
857static void init_node_masks_nonnuma(void)
d1112018 858{
1da177e4
LT
859 int i;
860
919ee677 861 numadbg("Initializing tables for non-numa.\n");
6fc5bae7 862
919ee677
DM
863 node_masks[0].mask = node_masks[0].val = 0;
864 num_node_masks = 1;
d1112018 865
919ee677
DM
866 for (i = 0; i < NR_CPUS; i++)
867 numa_cpu_lookup_table[i] = 0;
1da177e4 868
fb1fece5 869 cpumask_setall(&numa_cpumask_lookup_table[0]);
919ee677
DM
870}
871
872#ifdef CONFIG_NEED_MULTIPLE_NODES
873struct pglist_data *node_data[MAX_NUMNODES];
874
875EXPORT_SYMBOL(numa_cpu_lookup_table);
876EXPORT_SYMBOL(numa_cpumask_lookup_table);
877EXPORT_SYMBOL(node_data);
878
879struct mdesc_mlgroup {
880 u64 node;
881 u64 latency;
882 u64 match;
883 u64 mask;
884};
885static struct mdesc_mlgroup *mlgroups;
886static int num_mlgroups;
887
888static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
889 u32 cfg_handle)
890{
891 u64 arc;
892
893 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
894 u64 target = mdesc_arc_target(md, arc);
895 const u64 *val;
896
897 val = mdesc_get_property(md, target,
898 "cfg-handle", NULL);
899 if (val && *val == cfg_handle)
900 return 0;
901 }
902 return -ENODEV;
903}
904
905static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
906 u32 cfg_handle)
907{
908 u64 arc, candidate, best_latency = ~(u64)0;
909
910 candidate = MDESC_NODE_NULL;
911 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
912 u64 target = mdesc_arc_target(md, arc);
913 const char *name = mdesc_node_name(md, target);
914 const u64 *val;
915
916 if (strcmp(name, "pio-latency-group"))
917 continue;
918
919 val = mdesc_get_property(md, target, "latency", NULL);
920 if (!val)
921 continue;
922
923 if (*val < best_latency) {
924 candidate = target;
925 best_latency = *val;
926 }
927 }
928
929 if (candidate == MDESC_NODE_NULL)
930 return -ENODEV;
931
932 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
933}
934
935int of_node_to_nid(struct device_node *dp)
936{
937 const struct linux_prom64_registers *regs;
938 struct mdesc_handle *md;
939 u32 cfg_handle;
940 int count, nid;
941 u64 grp;
942
072bd413
DM
943 /* This is the right thing to do on currently supported
944 * SUN4U NUMA platforms as well, as the PCI controller does
945 * not sit behind any particular memory controller.
946 */
919ee677
DM
947 if (!mlgroups)
948 return -1;
949
950 regs = of_get_property(dp, "reg", NULL);
951 if (!regs)
952 return -1;
953
954 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
955
956 md = mdesc_grab();
957
958 count = 0;
959 nid = -1;
960 mdesc_for_each_node_by_name(md, grp, "group") {
961 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
962 nid = count;
963 break;
964 }
965 count++;
966 }
967
968 mdesc_release(md);
969
970 return nid;
971}
972
01c45381 973static void __init add_node_ranges(void)
919ee677 974{
08b84798 975 struct memblock_region *reg;
919ee677 976
08b84798
BH
977 for_each_memblock(memory, reg) {
978 unsigned long size = reg->size;
919ee677
DM
979 unsigned long start, end;
980
08b84798 981 start = reg->base;
919ee677
DM
982 end = start + size;
983 while (start < end) {
984 unsigned long this_end;
985 int nid;
986
35a1f0bd 987 this_end = memblock_nid_range(start, end, &nid);
919ee677 988
2a4814df 989 numadbg("Setting memblock NUMA node nid[%d] "
919ee677
DM
990 "start[%lx] end[%lx]\n",
991 nid, start, this_end);
992
2a4814df 993 memblock_set_node(start, this_end - start, nid);
919ee677
DM
994 start = this_end;
995 }
996 }
997}
998
999static int __init grab_mlgroups(struct mdesc_handle *md)
1000{
1001 unsigned long paddr;
1002 int count = 0;
1003 u64 node;
1004
1005 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1006 count++;
1007 if (!count)
1008 return -ENOENT;
1009
95f72d1e 1010 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
919ee677
DM
1011 SMP_CACHE_BYTES);
1012 if (!paddr)
1013 return -ENOMEM;
1014
1015 mlgroups = __va(paddr);
1016 num_mlgroups = count;
1017
1018 count = 0;
1019 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1020 struct mdesc_mlgroup *m = &mlgroups[count++];
1021 const u64 *val;
1022
1023 m->node = node;
1024
1025 val = mdesc_get_property(md, node, "latency", NULL);
1026 m->latency = *val;
1027 val = mdesc_get_property(md, node, "address-match", NULL);
1028 m->match = *val;
1029 val = mdesc_get_property(md, node, "address-mask", NULL);
1030 m->mask = *val;
1031
90181136
SR
1032 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1033 "match[%llx] mask[%llx]\n",
919ee677
DM
1034 count - 1, m->node, m->latency, m->match, m->mask);
1035 }
1036
1037 return 0;
1038}
1039
1040static int __init grab_mblocks(struct mdesc_handle *md)
1041{
1042 unsigned long paddr;
1043 int count = 0;
1044 u64 node;
1045
1046 mdesc_for_each_node_by_name(md, node, "mblock")
1047 count++;
1048 if (!count)
1049 return -ENOENT;
1050
95f72d1e 1051 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
919ee677
DM
1052 SMP_CACHE_BYTES);
1053 if (!paddr)
1054 return -ENOMEM;
1055
1056 mblocks = __va(paddr);
1057 num_mblocks = count;
1058
1059 count = 0;
1060 mdesc_for_each_node_by_name(md, node, "mblock") {
1061 struct mdesc_mblock *m = &mblocks[count++];
1062 const u64 *val;
1063
1064 val = mdesc_get_property(md, node, "base", NULL);
1065 m->base = *val;
1066 val = mdesc_get_property(md, node, "size", NULL);
1067 m->size = *val;
1068 val = mdesc_get_property(md, node,
1069 "address-congruence-offset", NULL);
1070 m->offset = *val;
1071
90181136 1072 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
919ee677
DM
1073 count - 1, m->base, m->size, m->offset);
1074 }
1075
1076 return 0;
1077}
1078
1079static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1080 u64 grp, cpumask_t *mask)
1081{
1082 u64 arc;
1083
fb1fece5 1084 cpumask_clear(mask);
919ee677
DM
1085
1086 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1087 u64 target = mdesc_arc_target(md, arc);
1088 const char *name = mdesc_node_name(md, target);
1089 const u64 *id;
1090
1091 if (strcmp(name, "cpu"))
1092 continue;
1093 id = mdesc_get_property(md, target, "id", NULL);
e305cb8f 1094 if (*id < nr_cpu_ids)
fb1fece5 1095 cpumask_set_cpu(*id, mask);
919ee677
DM
1096 }
1097}
1098
1099static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1100{
1101 int i;
1102
1103 for (i = 0; i < num_mlgroups; i++) {
1104 struct mdesc_mlgroup *m = &mlgroups[i];
1105 if (m->node == node)
1106 return m;
1107 }
1108 return NULL;
1109}
1110
1111static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1112 int index)
1113{
1114 struct mdesc_mlgroup *candidate = NULL;
1115 u64 arc, best_latency = ~(u64)0;
1116 struct node_mem_mask *n;
1117
1118 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1119 u64 target = mdesc_arc_target(md, arc);
1120 struct mdesc_mlgroup *m = find_mlgroup(target);
1121 if (!m)
1122 continue;
1123 if (m->latency < best_latency) {
1124 candidate = m;
1125 best_latency = m->latency;
1126 }
1127 }
1128 if (!candidate)
1129 return -ENOENT;
1130
1131 if (num_node_masks != index) {
1132 printk(KERN_ERR "Inconsistent NUMA state, "
1133 "index[%d] != num_node_masks[%d]\n",
1134 index, num_node_masks);
1135 return -EINVAL;
1136 }
1137
1138 n = &node_masks[num_node_masks++];
1139
1140 n->mask = candidate->mask;
1141 n->val = candidate->match;
1da177e4 1142
90181136 1143 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
919ee677 1144 index, n->mask, n->val, candidate->latency);
1da177e4 1145
919ee677
DM
1146 return 0;
1147}
1148
1149static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1150 int index)
1151{
1152 cpumask_t mask;
1153 int cpu;
1154
1155 numa_parse_mdesc_group_cpus(md, grp, &mask);
1156
fb1fece5 1157 for_each_cpu(cpu, &mask)
919ee677 1158 numa_cpu_lookup_table[cpu] = index;
fb1fece5 1159 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
919ee677
DM
1160
1161 if (numa_debug) {
1162 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
fb1fece5 1163 for_each_cpu(cpu, &mask)
919ee677
DM
1164 printk("%d ", cpu);
1165 printk("]\n");
1166 }
1167
1168 return numa_attach_mlgroup(md, grp, index);
1169}
1170
1171static int __init numa_parse_mdesc(void)
1172{
1173 struct mdesc_handle *md = mdesc_grab();
1174 int i, err, count;
1175 u64 node;
1176
1177 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1178 if (node == MDESC_NODE_NULL) {
1179 mdesc_release(md);
1180 return -ENOENT;
1181 }
1182
1183 err = grab_mblocks(md);
1184 if (err < 0)
1185 goto out;
1186
1187 err = grab_mlgroups(md);
1188 if (err < 0)
1189 goto out;
1190
1191 count = 0;
1192 mdesc_for_each_node_by_name(md, node, "group") {
1193 err = numa_parse_mdesc_group(md, node, count);
1194 if (err < 0)
1195 break;
1196 count++;
1197 }
1198
1199 add_node_ranges();
1200
1201 for (i = 0; i < num_node_masks; i++) {
1202 allocate_node_data(i);
1203 node_set_online(i);
1204 }
1205
1206 err = 0;
1207out:
1208 mdesc_release(md);
1209 return err;
1210}
1211
072bd413
DM
1212static int __init numa_parse_jbus(void)
1213{
1214 unsigned long cpu, index;
1215
1216 /* NUMA node id is encoded in bits 36 and higher, and there is
1217 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1218 */
1219 index = 0;
1220 for_each_present_cpu(cpu) {
1221 numa_cpu_lookup_table[cpu] = index;
fb1fece5 1222 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
072bd413
DM
1223 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1224 node_masks[index].val = cpu << 36UL;
1225
1226 index++;
1227 }
1228 num_node_masks = index;
1229
1230 add_node_ranges();
1231
1232 for (index = 0; index < num_node_masks; index++) {
1233 allocate_node_data(index);
1234 node_set_online(index);
1235 }
1236
1237 return 0;
1238}
1239
919ee677
DM
1240static int __init numa_parse_sun4u(void)
1241{
072bd413
DM
1242 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1243 unsigned long ver;
1244
1245 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1246 if ((ver >> 32UL) == __JALAPENO_ID ||
1247 (ver >> 32UL) == __SERRANO_ID)
1248 return numa_parse_jbus();
1249 }
919ee677
DM
1250 return -1;
1251}
1252
1253static int __init bootmem_init_numa(void)
1254{
1255 int err = -1;
1256
1257 numadbg("bootmem_init_numa()\n");
1258
1259 if (numa_enabled) {
1260 if (tlb_type == hypervisor)
1261 err = numa_parse_mdesc();
1262 else
1263 err = numa_parse_sun4u();
1264 }
1265 return err;
1266}
1267
1268#else
1da177e4 1269
919ee677
DM
1270static int bootmem_init_numa(void)
1271{
1272 return -1;
1273}
1274
1275#endif
1276
1277static void __init bootmem_init_nonnuma(void)
1278{
95f72d1e
YL
1279 unsigned long top_of_ram = memblock_end_of_DRAM();
1280 unsigned long total_ram = memblock_phys_mem_size();
919ee677
DM
1281
1282 numadbg("bootmem_init_nonnuma()\n");
1283
1284 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1285 top_of_ram, total_ram);
1286 printk(KERN_INFO "Memory hole size: %ldMB\n",
1287 (top_of_ram - total_ram) >> 20);
1288
1289 init_node_masks_nonnuma();
2a4814df 1290 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
919ee677 1291 allocate_node_data(0);
919ee677
DM
1292 node_set_online(0);
1293}
1294
1295static void __init reserve_range_in_node(int nid, unsigned long start,
1296 unsigned long end)
1297{
1298 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1299 nid, start, end);
1300 while (start < end) {
1301 unsigned long this_end;
1302 int n;
1303
35a1f0bd 1304 this_end = memblock_nid_range(start, end, &n);
919ee677
DM
1305 if (n == nid) {
1306 numadbg(" MATCH reserving range [%lx:%lx]\n",
1307 start, this_end);
1308 reserve_bootmem_node(NODE_DATA(nid), start,
1309 (this_end - start), BOOTMEM_DEFAULT);
1310 } else
1311 numadbg(" NO MATCH, advancing start to %lx\n",
1312 this_end);
1313
1314 start = this_end;
d1112018 1315 }
919ee677
DM
1316}
1317
1318static void __init trim_reserved_in_node(int nid)
1319{
08b84798 1320 struct memblock_region *reg;
919ee677
DM
1321
1322 numadbg(" trim_reserved_in_node(%d)\n", nid);
1323
08b84798
BH
1324 for_each_memblock(reserved, reg)
1325 reserve_range_in_node(nid, reg->base, reg->base + reg->size);
919ee677
DM
1326}
1327
1328static void __init bootmem_init_one_node(int nid)
1329{
1330 struct pglist_data *p;
1331
1332 numadbg("bootmem_init_one_node(%d)\n", nid);
1333
1334 p = NODE_DATA(nid);
1335
1336 if (p->node_spanned_pages) {
1337 unsigned long paddr = node_masks[nid].bootmem_paddr;
1338 unsigned long end_pfn;
1339
1340 end_pfn = p->node_start_pfn + p->node_spanned_pages;
1341
1342 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1343 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
1344
1345 init_bootmem_node(p, paddr >> PAGE_SHIFT,
1346 p->node_start_pfn, end_pfn);
1347
1348 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1349 nid, end_pfn);
1350 free_bootmem_with_active_regions(nid, end_pfn);
1351
1352 trim_reserved_in_node(nid);
1353
1354 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1355 nid);
1356 sparse_memory_present_with_active_regions(nid);
1357 }
1358}
1359
1360static unsigned long __init bootmem_init(unsigned long phys_base)
1361{
1362 unsigned long end_pfn;
1363 int nid;
1364
95f72d1e 1365 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
919ee677
DM
1366 max_pfn = max_low_pfn = end_pfn;
1367 min_low_pfn = (phys_base >> PAGE_SHIFT);
1368
1369 if (bootmem_init_numa() < 0)
1370 bootmem_init_nonnuma();
1371
1372 /* XXX cpu notifier XXX */
1373
1374 for_each_online_node(nid)
1375 bootmem_init_one_node(nid);
d1112018
DM
1376
1377 sparse_init();
1378
1da177e4
LT
1379 return end_pfn;
1380}
1381
9cc3a1ac
DM
1382static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1383static int pall_ents __initdata;
1384
56425306 1385#ifdef CONFIG_DEBUG_PAGEALLOC
896aef43
SR
1386static unsigned long __ref kernel_map_range(unsigned long pstart,
1387 unsigned long pend, pgprot_t prot)
56425306
DM
1388{
1389 unsigned long vstart = PAGE_OFFSET + pstart;
1390 unsigned long vend = PAGE_OFFSET + pend;
1391 unsigned long alloc_bytes = 0UL;
1392
1393 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a 1394 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306
DM
1395 vstart, vend);
1396 prom_halt();
1397 }
1398
1399 while (vstart < vend) {
1400 unsigned long this_end, paddr = __pa(vstart);
1401 pgd_t *pgd = pgd_offset_k(vstart);
1402 pud_t *pud;
1403 pmd_t *pmd;
1404 pte_t *pte;
1405
1406 pud = pud_offset(pgd, vstart);
1407 if (pud_none(*pud)) {
1408 pmd_t *new;
1409
1410 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1411 alloc_bytes += PAGE_SIZE;
1412 pud_populate(&init_mm, pud, new);
1413 }
1414
1415 pmd = pmd_offset(pud, vstart);
1416 if (!pmd_present(*pmd)) {
1417 pte_t *new;
1418
1419 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1420 alloc_bytes += PAGE_SIZE;
1421 pmd_populate_kernel(&init_mm, pmd, new);
1422 }
1423
1424 pte = pte_offset_kernel(pmd, vstart);
1425 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1426 if (this_end > vend)
1427 this_end = vend;
1428
1429 while (vstart < this_end) {
1430 pte_val(*pte) = (paddr | pgprot_val(prot));
1431
1432 vstart += PAGE_SIZE;
1433 paddr += PAGE_SIZE;
1434 pte++;
1435 }
1436 }
1437
1438 return alloc_bytes;
1439}
1440
56425306 1441extern unsigned int kvmap_linear_patch[1];
9cc3a1ac
DM
1442#endif /* CONFIG_DEBUG_PAGEALLOC */
1443
1444static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1445{
1446 const unsigned long shift_256MB = 28;
1447 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1448 const unsigned long size_256MB = (1UL << shift_256MB);
1449
1450 while (start < end) {
1451 long remains;
1452
f7c00338
DM
1453 remains = end - start;
1454 if (remains < size_256MB)
1455 break;
1456
9cc3a1ac
DM
1457 if (start & mask_256MB) {
1458 start = (start + size_256MB) & ~mask_256MB;
1459 continue;
1460 }
1461
9cc3a1ac
DM
1462 while (remains >= size_256MB) {
1463 unsigned long index = start >> shift_256MB;
1464
1465 __set_bit(index, kpte_linear_bitmap);
1466
1467 start += size_256MB;
1468 remains -= size_256MB;
1469 }
1470 }
1471}
56425306 1472
8f361453 1473static void __init init_kpte_bitmap(void)
56425306 1474{
9cc3a1ac 1475 unsigned long i;
13edad7a
DM
1476
1477 for (i = 0; i < pall_ents; i++) {
56425306
DM
1478 unsigned long phys_start, phys_end;
1479
13edad7a
DM
1480 phys_start = pall[i].phys_addr;
1481 phys_end = phys_start + pall[i].reg_size;
9cc3a1ac
DM
1482
1483 mark_kpte_bitmap(phys_start, phys_end);
8f361453
DM
1484 }
1485}
9cc3a1ac 1486
8f361453
DM
1487static void __init kernel_physical_mapping_init(void)
1488{
9cc3a1ac 1489#ifdef CONFIG_DEBUG_PAGEALLOC
8f361453
DM
1490 unsigned long i, mem_alloced = 0UL;
1491
1492 for (i = 0; i < pall_ents; i++) {
1493 unsigned long phys_start, phys_end;
1494
1495 phys_start = pall[i].phys_addr;
1496 phys_end = phys_start + pall[i].reg_size;
1497
56425306
DM
1498 mem_alloced += kernel_map_range(phys_start, phys_end,
1499 PAGE_KERNEL);
56425306
DM
1500 }
1501
1502 printk("Allocated %ld bytes for kernel page tables.\n",
1503 mem_alloced);
1504
1505 kvmap_linear_patch[0] = 0x01000000; /* nop */
1506 flushi(&kvmap_linear_patch[0]);
1507
1508 __flush_tlb_all();
9cc3a1ac 1509#endif
56425306
DM
1510}
1511
9cc3a1ac 1512#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1513void kernel_map_pages(struct page *page, int numpages, int enable)
1514{
1515 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1516 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1517
1518 kernel_map_range(phys_start, phys_end,
1519 (enable ? PAGE_KERNEL : __pgprot(0)));
1520
74bf4312
DM
1521 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1522 PAGE_OFFSET + phys_end);
1523
56425306
DM
1524 /* we should perform an IPI and flush all tlbs,
1525 * but that can deadlock->flush only current cpu.
1526 */
1527 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1528 PAGE_OFFSET + phys_end);
1529}
1530#endif
1531
10147570
DM
1532unsigned long __init find_ecache_flush_span(unsigned long size)
1533{
0836a0eb
DM
1534 int i;
1535
13edad7a
DM
1536 for (i = 0; i < pavail_ents; i++) {
1537 if (pavail[i].reg_size >= size)
1538 return pavail[i].phys_addr;
0836a0eb
DM
1539 }
1540
13edad7a 1541 return ~0UL;
0836a0eb
DM
1542}
1543
517af332
DM
1544static void __init tsb_phys_patch(void)
1545{
d257d5da 1546 struct tsb_ldquad_phys_patch_entry *pquad;
517af332
DM
1547 struct tsb_phys_patch_entry *p;
1548
d257d5da
DM
1549 pquad = &__tsb_ldquad_phys_patch;
1550 while (pquad < &__tsb_ldquad_phys_patch_end) {
1551 unsigned long addr = pquad->addr;
1552
1553 if (tlb_type == hypervisor)
1554 *(unsigned int *) addr = pquad->sun4v_insn;
1555 else
1556 *(unsigned int *) addr = pquad->sun4u_insn;
1557 wmb();
1558 __asm__ __volatile__("flush %0"
1559 : /* no outputs */
1560 : "r" (addr));
1561
1562 pquad++;
1563 }
1564
517af332
DM
1565 p = &__tsb_phys_patch;
1566 while (p < &__tsb_phys_patch_end) {
1567 unsigned long addr = p->addr;
1568
1569 *(unsigned int *) addr = p->insn;
1570 wmb();
1571 __asm__ __volatile__("flush %0"
1572 : /* no outputs */
1573 : "r" (addr));
1574
1575 p++;
1576 }
1577}
1578
490384e7 1579/* Don't mark as init, we give this to the Hypervisor. */
d1acb421
DM
1580#ifndef CONFIG_DEBUG_PAGEALLOC
1581#define NUM_KTSB_DESCR 2
1582#else
1583#define NUM_KTSB_DESCR 1
1584#endif
1585static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
490384e7
DM
1586extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1587
9076d0e7
DM
1588static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1589{
1590 pa >>= KTSB_PHYS_SHIFT;
1591
1592 while (start < end) {
1593 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1594
1595 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1596 __asm__ __volatile__("flush %0" : : "r" (ia));
1597
1598 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1599 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1600
1601 start++;
1602 }
1603}
1604
1605static void ktsb_phys_patch(void)
1606{
1607 extern unsigned int __swapper_tsb_phys_patch;
1608 extern unsigned int __swapper_tsb_phys_patch_end;
9076d0e7
DM
1609 unsigned long ktsb_pa;
1610
1611 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1612 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1613 &__swapper_tsb_phys_patch_end, ktsb_pa);
1614#ifndef CONFIG_DEBUG_PAGEALLOC
0785a8e8
DM
1615 {
1616 extern unsigned int __swapper_4m_tsb_phys_patch;
1617 extern unsigned int __swapper_4m_tsb_phys_patch_end;
9076d0e7
DM
1618 ktsb_pa = (kern_base +
1619 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1620 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1621 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
0785a8e8 1622 }
9076d0e7
DM
1623#endif
1624}
1625
490384e7
DM
1626static void __init sun4v_ktsb_init(void)
1627{
1628 unsigned long ktsb_pa;
1629
d7744a09 1630 /* First KTSB for PAGE_SIZE mappings. */
490384e7
DM
1631 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1632
1633 switch (PAGE_SIZE) {
1634 case 8 * 1024:
1635 default:
1636 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1637 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1638 break;
1639
1640 case 64 * 1024:
1641 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1642 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1643 break;
1644
1645 case 512 * 1024:
1646 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1647 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1648 break;
1649
1650 case 4 * 1024 * 1024:
1651 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1652 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1653 break;
6cb79b3f 1654 }
490384e7 1655
3f19a84e 1656 ktsb_descr[0].assoc = 1;
490384e7
DM
1657 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1658 ktsb_descr[0].ctx_idx = 0;
1659 ktsb_descr[0].tsb_base = ktsb_pa;
1660 ktsb_descr[0].resv = 0;
1661
d1acb421 1662#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09
DM
1663 /* Second KTSB for 4MB/256MB mappings. */
1664 ktsb_pa = (kern_base +
1665 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1666
1667 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1668 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1669 HV_PGSZ_MASK_256MB);
1670 ktsb_descr[1].assoc = 1;
1671 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1672 ktsb_descr[1].ctx_idx = 0;
1673 ktsb_descr[1].tsb_base = ktsb_pa;
1674 ktsb_descr[1].resv = 0;
d1acb421 1675#endif
490384e7
DM
1676}
1677
1678void __cpuinit sun4v_ktsb_register(void)
1679{
7db35f31 1680 unsigned long pa, ret;
490384e7
DM
1681
1682 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1683
7db35f31
DM
1684 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1685 if (ret != 0) {
1686 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1687 "errors with %lx\n", pa, ret);
1688 prom_halt();
1689 }
490384e7
DM
1690}
1691
1da177e4
LT
1692/* paging_init() sets up the page tables */
1693
1da177e4 1694static unsigned long last_valid_pfn;
56425306 1695pgd_t swapper_pg_dir[2048];
1da177e4 1696
c4bce90e
DM
1697static void sun4u_pgprot_init(void);
1698static void sun4v_pgprot_init(void);
1699
1da177e4
LT
1700void __init paging_init(void)
1701{
919ee677 1702 unsigned long end_pfn, shift, phys_base;
0836a0eb
DM
1703 unsigned long real_end, i;
1704
22adb358
DM
1705 /* These build time checkes make sure that the dcache_dirty_cpu()
1706 * page->flags usage will work.
1707 *
1708 * When a page gets marked as dcache-dirty, we store the
1709 * cpu number starting at bit 32 in the page->flags. Also,
1710 * functions like clear_dcache_dirty_cpu use the cpu mask
1711 * in 13-bit signed-immediate instruction fields.
1712 */
9223b419
CL
1713
1714 /*
1715 * Page flags must not reach into upper 32 bits that are used
1716 * for the cpu number
1717 */
1718 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1719
1720 /*
1721 * The bit fields placed in the high range must not reach below
1722 * the 32 bit boundary. Otherwise we cannot place the cpu field
1723 * at the 32 bit boundary.
1724 */
22adb358 1725 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
9223b419
CL
1726 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1727
22adb358
DM
1728 BUILD_BUG_ON(NR_CPUS > 4096);
1729
481295f9
DM
1730 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1731 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1732
d7744a09 1733 /* Invalidate both kernel TSBs. */
8b234274 1734 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
d1acb421 1735#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09 1736 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
d1acb421 1737#endif
8b234274 1738
c4bce90e
DM
1739 if (tlb_type == hypervisor)
1740 sun4v_pgprot_init();
1741 else
1742 sun4u_pgprot_init();
1743
d257d5da 1744 if (tlb_type == cheetah_plus ||
9076d0e7 1745 tlb_type == hypervisor) {
517af332 1746 tsb_phys_patch();
9076d0e7
DM
1747 ktsb_phys_patch();
1748 }
517af332 1749
490384e7 1750 if (tlb_type == hypervisor) {
d257d5da 1751 sun4v_patch_tlb_handlers();
490384e7
DM
1752 sun4v_ktsb_init();
1753 }
d257d5da 1754
a94a172d
DM
1755 /* Find available physical memory...
1756 *
1757 * Read it twice in order to work around a bug in openfirmware.
1758 * The call to grab this table itself can cause openfirmware to
1759 * allocate memory, which in turn can take away some space from
1760 * the list of available memory. Reading it twice makes sure
1761 * we really do get the final value.
1762 */
1763 read_obp_translations();
1764 read_obp_memory("reg", &pall[0], &pall_ents);
1765 read_obp_memory("available", &pavail[0], &pavail_ents);
13edad7a 1766 read_obp_memory("available", &pavail[0], &pavail_ents);
0836a0eb
DM
1767
1768 phys_base = 0xffffffffffffffffUL;
3b2a7e23 1769 for (i = 0; i < pavail_ents; i++) {
13edad7a 1770 phys_base = min(phys_base, pavail[i].phys_addr);
95f72d1e 1771 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
3b2a7e23
DM
1772 }
1773
95f72d1e 1774 memblock_reserve(kern_base, kern_size);
0836a0eb 1775
4e82c9a6
DM
1776 find_ramdisk(phys_base);
1777
95f72d1e 1778 memblock_enforce_memory_limit(cmdline_memory_size);
25b0c659 1779
1aadc056 1780 memblock_allow_resize();
95f72d1e 1781 memblock_dump_all();
3b2a7e23 1782
1da177e4
LT
1783 set_bit(0, mmu_context_bmap);
1784
2bdb3cb2
DM
1785 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1786
1da177e4 1787 real_end = (unsigned long)_end;
64658743
DM
1788 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1789 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1790 num_kernel_image_mappings);
2bdb3cb2
DM
1791
1792 /* Set kernel pgd to upper alias so physical page computations
1da177e4
LT
1793 * work.
1794 */
1795 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1796
56425306 1797 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1da177e4
LT
1798
1799 /* Now can init the kernel/bad page tables. */
1800 pud_set(pud_offset(&swapper_pg_dir[0], 0),
56425306 1801 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1da177e4 1802
c9c10830 1803 inherit_prom_mappings();
5085b4a5 1804
8f361453
DM
1805 init_kpte_bitmap();
1806
a8b900d8
DM
1807 /* Ok, we can use our TLB miss and window trap handlers safely. */
1808 setup_tba();
1da177e4 1809
c9c10830 1810 __flush_tlb_all();
9ad98c5b 1811
490384e7
DM
1812 if (tlb_type == hypervisor)
1813 sun4v_ktsb_register();
1814
ad072004 1815 prom_build_devicetree();
b696fdc2 1816 of_populate_present_mask();
b99c6ebe
DM
1817#ifndef CONFIG_SMP
1818 of_fill_in_cpu_data();
1819#endif
ad072004 1820
890db403 1821 if (tlb_type == hypervisor) {
4a283339 1822 sun4v_mdesc_init();
6ac5c610 1823 mdesc_populate_present_mask(cpu_all_mask);
b99c6ebe
DM
1824#ifndef CONFIG_SMP
1825 mdesc_fill_in_cpu_data(cpu_all_mask);
1826#endif
890db403 1827 }
4a283339 1828
4f70f7a9
DM
1829 /* Once the OF device tree and MDESC have been setup, we know
1830 * the list of possible cpus. Therefore we can allocate the
1831 * IRQ stacks.
1832 */
1833 for_each_possible_cpu(i) {
1834 /* XXX Use node local allocations... XXX */
95f72d1e
YL
1835 softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1836 hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
4f70f7a9
DM
1837 }
1838
2bdb3cb2 1839 /* Setup bootmem... */
919ee677 1840 last_valid_pfn = end_pfn = bootmem_init(phys_base);
d1112018 1841
919ee677 1842#ifndef CONFIG_NEED_MULTIPLE_NODES
17b0e199 1843 max_mapnr = last_valid_pfn;
919ee677 1844#endif
56425306 1845 kernel_physical_mapping_init();
56425306 1846
1da177e4 1847 {
919ee677 1848 unsigned long max_zone_pfns[MAX_NR_ZONES];
1da177e4 1849
919ee677 1850 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1da177e4 1851
919ee677 1852 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1da177e4 1853
919ee677 1854 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
1855 }
1856
3c62a2d3 1857 printk("Booting Linux...\n");
1da177e4
LT
1858}
1859
9a2ed5cc 1860int __devinit page_in_phys_avail(unsigned long paddr)
919ee677
DM
1861{
1862 int i;
1863
1864 paddr &= PAGE_MASK;
1865
1866 for (i = 0; i < pavail_ents; i++) {
1867 unsigned long start, end;
1868
1869 start = pavail[i].phys_addr;
1870 end = start + pavail[i].reg_size;
1871
1872 if (paddr >= start && paddr < end)
1873 return 1;
1874 }
1875 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1876 return 1;
1877#ifdef CONFIG_BLK_DEV_INITRD
1878 if (paddr >= __pa(initrd_start) &&
1879 paddr < __pa(PAGE_ALIGN(initrd_end)))
1880 return 1;
1881#endif
1882
1883 return 0;
1884}
1885
1886static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1887static int pavail_rescan_ents __initdata;
1888
1889/* Certain OBP calls, such as fetching "available" properties, can
1890 * claim physical memory. So, along with initializing the valid
1891 * address bitmap, what we do here is refetch the physical available
1892 * memory list again, and make sure it provides at least as much
1893 * memory as 'pavail' does.
1894 */
d8ed1d43 1895static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
1da177e4 1896{
1da177e4
LT
1897 int i;
1898
13edad7a 1899 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1da177e4 1900
13edad7a 1901 for (i = 0; i < pavail_ents; i++) {
1da177e4
LT
1902 unsigned long old_start, old_end;
1903
13edad7a 1904 old_start = pavail[i].phys_addr;
919ee677 1905 old_end = old_start + pavail[i].reg_size;
1da177e4
LT
1906 while (old_start < old_end) {
1907 int n;
1908
c2a5a46b 1909 for (n = 0; n < pavail_rescan_ents; n++) {
1da177e4
LT
1910 unsigned long new_start, new_end;
1911
13edad7a
DM
1912 new_start = pavail_rescan[n].phys_addr;
1913 new_end = new_start +
1914 pavail_rescan[n].reg_size;
1da177e4
LT
1915
1916 if (new_start <= old_start &&
1917 new_end >= (old_start + PAGE_SIZE)) {
d8ed1d43 1918 set_bit(old_start >> 22, bitmap);
1da177e4
LT
1919 goto do_next_page;
1920 }
1921 }
919ee677
DM
1922
1923 prom_printf("mem_init: Lost memory in pavail\n");
1924 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1925 pavail[i].phys_addr,
1926 pavail[i].reg_size);
1927 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1928 pavail_rescan[i].phys_addr,
1929 pavail_rescan[i].reg_size);
1930 prom_printf("mem_init: Cannot continue, aborting.\n");
1931 prom_halt();
1da177e4
LT
1932
1933 do_next_page:
1934 old_start += PAGE_SIZE;
1935 }
1936 }
1937}
1938
d8ed1d43
DM
1939static void __init patch_tlb_miss_handler_bitmap(void)
1940{
1941 extern unsigned int valid_addr_bitmap_insn[];
1942 extern unsigned int valid_addr_bitmap_patch[];
1943
1944 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
1945 mb();
1946 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
1947 flushi(&valid_addr_bitmap_insn[0]);
1948}
1949
1da177e4
LT
1950void __init mem_init(void)
1951{
1952 unsigned long codepages, datapages, initpages;
1953 unsigned long addr, last;
1da177e4
LT
1954
1955 addr = PAGE_OFFSET + kern_base;
1956 last = PAGE_ALIGN(kern_size) + addr;
1957 while (addr < last) {
1958 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1959 addr += PAGE_SIZE;
1960 }
1961
d8ed1d43
DM
1962 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
1963 patch_tlb_miss_handler_bitmap();
1da177e4 1964
1da177e4
LT
1965 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1966
919ee677 1967#ifdef CONFIG_NEED_MULTIPLE_NODES
d8ed1d43
DM
1968 {
1969 int i;
1970 for_each_online_node(i) {
1971 if (NODE_DATA(i)->node_spanned_pages != 0) {
1972 totalram_pages +=
1973 free_all_bootmem_node(NODE_DATA(i));
1974 }
919ee677
DM
1975 }
1976 }
1977#else
1978 totalram_pages = free_all_bootmem();
1979#endif
1980
f1cfdb55
DM
1981 /* We subtract one to account for the mem_map_zero page
1982 * allocated below.
1983 */
919ee677
DM
1984 totalram_pages -= 1;
1985 num_physpages = totalram_pages;
1da177e4
LT
1986
1987 /*
1988 * Set up the zero page, mark it reserved, so that page count
1989 * is not manipulated when freeing the page from user ptes.
1990 */
1991 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1992 if (mem_map_zero == NULL) {
1993 prom_printf("paging_init: Cannot alloc zero page.\n");
1994 prom_halt();
1995 }
1996 SetPageReserved(mem_map_zero);
1997
1998 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1999 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
2000 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
2001 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
2002 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
2003 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
2004
96177299 2005 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1da177e4
LT
2006 nr_free_pages() << (PAGE_SHIFT-10),
2007 codepages << (PAGE_SHIFT-10),
2008 datapages << (PAGE_SHIFT-10),
2009 initpages << (PAGE_SHIFT-10),
2010 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
2011
2012 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2013 cheetah_ecache_flush_init();
2014}
2015
898cf0ec 2016void free_initmem(void)
1da177e4
LT
2017{
2018 unsigned long addr, initend;
f2b60794
DM
2019 int do_free = 1;
2020
2021 /* If the physical memory maps were trimmed by kernel command
2022 * line options, don't even try freeing this initmem stuff up.
2023 * The kernel image could have been in the trimmed out region
2024 * and if so the freeing below will free invalid page structs.
2025 */
2026 if (cmdline_memory_size)
2027 do_free = 0;
1da177e4
LT
2028
2029 /*
2030 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2031 */
2032 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2033 initend = (unsigned long)(__init_end) & PAGE_MASK;
2034 for (; addr < initend; addr += PAGE_SIZE) {
2035 unsigned long page;
2036 struct page *p;
2037
2038 page = (addr +
2039 ((unsigned long) __va(kern_base)) -
2040 ((unsigned long) KERNBASE));
c9cf5528 2041 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1da177e4 2042
f2b60794
DM
2043 if (do_free) {
2044 p = virt_to_page(page);
2045
2046 ClearPageReserved(p);
2047 init_page_count(p);
2048 __free_page(p);
2049 num_physpages++;
2050 totalram_pages++;
2051 }
1da177e4
LT
2052 }
2053}
2054
2055#ifdef CONFIG_BLK_DEV_INITRD
2056void free_initrd_mem(unsigned long start, unsigned long end)
2057{
2058 if (start < end)
2059 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2060 for (; start < end; start += PAGE_SIZE) {
2061 struct page *p = virt_to_page(start);
2062
2063 ClearPageReserved(p);
7835e98b 2064 init_page_count(p);
1da177e4
LT
2065 __free_page(p);
2066 num_physpages++;
2067 totalram_pages++;
2068 }
2069}
2070#endif
c4bce90e 2071
c4bce90e
DM
2072#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2073#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2074#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2075#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2076#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2077#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2078
2079pgprot_t PAGE_KERNEL __read_mostly;
2080EXPORT_SYMBOL(PAGE_KERNEL);
2081
2082pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2083pgprot_t PAGE_COPY __read_mostly;
0f15952a
DM
2084
2085pgprot_t PAGE_SHARED __read_mostly;
2086EXPORT_SYMBOL(PAGE_SHARED);
2087
c4bce90e
DM
2088unsigned long pg_iobits __read_mostly;
2089
2090unsigned long _PAGE_IE __read_mostly;
987c74fc 2091EXPORT_SYMBOL(_PAGE_IE);
b2bef442 2092
c4bce90e 2093unsigned long _PAGE_E __read_mostly;
b2bef442
DM
2094EXPORT_SYMBOL(_PAGE_E);
2095
c4bce90e 2096unsigned long _PAGE_CACHE __read_mostly;
b2bef442 2097EXPORT_SYMBOL(_PAGE_CACHE);
c4bce90e 2098
46644c24 2099#ifdef CONFIG_SPARSEMEM_VMEMMAP
46644c24
DM
2100unsigned long vmemmap_table[VMEMMAP_SIZE];
2101
2102int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2103{
2104 unsigned long vstart = (unsigned long) start;
2105 unsigned long vend = (unsigned long) (start + nr);
2106 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2107 unsigned long phys_end = (vend - VMEMMAP_BASE);
2108 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2109 unsigned long end = VMEMMAP_ALIGN(phys_end);
2110 unsigned long pte_base;
2111
2112 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2113 _PAGE_CP_4U | _PAGE_CV_4U |
2114 _PAGE_P_4U | _PAGE_W_4U);
2115 if (tlb_type == hypervisor)
2116 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2117 _PAGE_CP_4V | _PAGE_CV_4V |
2118 _PAGE_P_4V | _PAGE_W_4V);
2119
2120 for (; addr < end; addr += VMEMMAP_CHUNK) {
2121 unsigned long *vmem_pp =
2122 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2123 void *block;
2124
2125 if (!(*vmem_pp & _PAGE_VALID)) {
2126 block = vmemmap_alloc_block(1UL << 22, node);
2127 if (!block)
2128 return -ENOMEM;
2129
2130 *vmem_pp = pte_base | __pa(block);
2131
2132 printk(KERN_INFO "[%p-%p] page_structs=%lu "
2133 "node=%d entry=%lu/%lu\n", start, block, nr,
2134 node,
2135 addr >> VMEMMAP_CHUNK_SHIFT,
33cd9dfa 2136 VMEMMAP_SIZE);
46644c24
DM
2137 }
2138 }
2139 return 0;
2140}
2141#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2142
c4bce90e
DM
2143static void prot_init_common(unsigned long page_none,
2144 unsigned long page_shared,
2145 unsigned long page_copy,
2146 unsigned long page_readonly,
2147 unsigned long page_exec_bit)
2148{
2149 PAGE_COPY = __pgprot(page_copy);
0f15952a 2150 PAGE_SHARED = __pgprot(page_shared);
c4bce90e
DM
2151
2152 protection_map[0x0] = __pgprot(page_none);
2153 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2154 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2155 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2156 protection_map[0x4] = __pgprot(page_readonly);
2157 protection_map[0x5] = __pgprot(page_readonly);
2158 protection_map[0x6] = __pgprot(page_copy);
2159 protection_map[0x7] = __pgprot(page_copy);
2160 protection_map[0x8] = __pgprot(page_none);
2161 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2162 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2163 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2164 protection_map[0xc] = __pgprot(page_readonly);
2165 protection_map[0xd] = __pgprot(page_readonly);
2166 protection_map[0xe] = __pgprot(page_shared);
2167 protection_map[0xf] = __pgprot(page_shared);
2168}
2169
2170static void __init sun4u_pgprot_init(void)
2171{
2172 unsigned long page_none, page_shared, page_copy, page_readonly;
2173 unsigned long page_exec_bit;
2174
2175 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2176 _PAGE_CACHE_4U | _PAGE_P_4U |
2177 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2178 _PAGE_EXEC_4U);
2179 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2180 _PAGE_CACHE_4U | _PAGE_P_4U |
2181 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2182 _PAGE_EXEC_4U | _PAGE_L_4U);
c4bce90e
DM
2183
2184 _PAGE_IE = _PAGE_IE_4U;
2185 _PAGE_E = _PAGE_E_4U;
2186 _PAGE_CACHE = _PAGE_CACHE_4U;
2187
2188 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2189 __ACCESS_BITS_4U | _PAGE_E_4U);
2190
d1acb421
DM
2191#ifdef CONFIG_DEBUG_PAGEALLOC
2192 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
af1ee569 2193 0xfffff80000000000UL;
d1acb421 2194#else
9cc3a1ac 2195 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
af1ee569 2196 0xfffff80000000000UL;
d1acb421 2197#endif
9cc3a1ac
DM
2198 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2199 _PAGE_P_4U | _PAGE_W_4U);
2200
2201 /* XXX Should use 256MB on Panther. XXX */
2202 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
c4bce90e
DM
2203
2204 _PAGE_SZBITS = _PAGE_SZBITS_4U;
2205 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2206 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2207 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2208
2209
2210 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2211 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2212 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2213 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2214 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2215 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2216 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2217
2218 page_exec_bit = _PAGE_EXEC_4U;
2219
2220 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2221 page_exec_bit);
2222}
2223
2224static void __init sun4v_pgprot_init(void)
2225{
2226 unsigned long page_none, page_shared, page_copy, page_readonly;
2227 unsigned long page_exec_bit;
2228
2229 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2230 _PAGE_CACHE_4V | _PAGE_P_4V |
2231 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2232 _PAGE_EXEC_4V);
2233 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
c4bce90e
DM
2234
2235 _PAGE_IE = _PAGE_IE_4V;
2236 _PAGE_E = _PAGE_E_4V;
2237 _PAGE_CACHE = _PAGE_CACHE_4V;
2238
d1acb421
DM
2239#ifdef CONFIG_DEBUG_PAGEALLOC
2240 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
af1ee569 2241 0xfffff80000000000UL;
d1acb421 2242#else
9cc3a1ac 2243 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
af1ee569 2244 0xfffff80000000000UL;
d1acb421 2245#endif
9cc3a1ac
DM
2246 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2247 _PAGE_P_4V | _PAGE_W_4V);
2248
d1acb421
DM
2249#ifdef CONFIG_DEBUG_PAGEALLOC
2250 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
af1ee569 2251 0xfffff80000000000UL;
d1acb421 2252#else
9cc3a1ac 2253 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
af1ee569 2254 0xfffff80000000000UL;
d1acb421 2255#endif
9cc3a1ac
DM
2256 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2257 _PAGE_P_4V | _PAGE_W_4V);
c4bce90e
DM
2258
2259 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2260 __ACCESS_BITS_4V | _PAGE_E_4V);
2261
2262 _PAGE_SZBITS = _PAGE_SZBITS_4V;
2263 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2264 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2265 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2266 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2267
2268 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2269 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2270 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2271 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2272 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2273 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2274 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2275
2276 page_exec_bit = _PAGE_EXEC_4V;
2277
2278 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2279 page_exec_bit);
2280}
2281
2282unsigned long pte_sz_bits(unsigned long sz)
2283{
2284 if (tlb_type == hypervisor) {
2285 switch (sz) {
2286 case 8 * 1024:
2287 default:
2288 return _PAGE_SZ8K_4V;
2289 case 64 * 1024:
2290 return _PAGE_SZ64K_4V;
2291 case 512 * 1024:
2292 return _PAGE_SZ512K_4V;
2293 case 4 * 1024 * 1024:
2294 return _PAGE_SZ4MB_4V;
6cb79b3f 2295 }
c4bce90e
DM
2296 } else {
2297 switch (sz) {
2298 case 8 * 1024:
2299 default:
2300 return _PAGE_SZ8K_4U;
2301 case 64 * 1024:
2302 return _PAGE_SZ64K_4U;
2303 case 512 * 1024:
2304 return _PAGE_SZ512K_4U;
2305 case 4 * 1024 * 1024:
2306 return _PAGE_SZ4MB_4U;
6cb79b3f 2307 }
c4bce90e
DM
2308 }
2309}
2310
2311pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2312{
2313 pte_t pte;
cf627156
DM
2314
2315 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
c4bce90e
DM
2316 pte_val(pte) |= (((unsigned long)space) << 32);
2317 pte_val(pte) |= pte_sz_bits(page_size);
c4bce90e 2318
cf627156 2319 return pte;
c4bce90e
DM
2320}
2321
2322static unsigned long kern_large_tte(unsigned long paddr)
2323{
2324 unsigned long val;
2325
2326 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2327 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2328 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2329 if (tlb_type == hypervisor)
2330 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2331 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2332 _PAGE_EXEC_4V | _PAGE_W_4V);
2333
2334 return val | paddr;
2335}
2336
c4bce90e
DM
2337/* If not locked, zap it. */
2338void __flush_tlb_all(void)
2339{
2340 unsigned long pstate;
2341 int i;
2342
2343 __asm__ __volatile__("flushw\n\t"
2344 "rdpr %%pstate, %0\n\t"
2345 "wrpr %0, %1, %%pstate"
2346 : "=r" (pstate)
2347 : "i" (PSTATE_IE));
8f361453
DM
2348 if (tlb_type == hypervisor) {
2349 sun4v_mmu_demap_all();
2350 } else if (tlb_type == spitfire) {
c4bce90e
DM
2351 for (i = 0; i < 64; i++) {
2352 /* Spitfire Errata #32 workaround */
2353 /* NOTE: Always runs on spitfire, so no
2354 * cheetah+ page size encodings.
2355 */
2356 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2357 "flush %%g6"
2358 : /* No outputs */
2359 : "r" (0),
2360 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2361
2362 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2363 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2364 "membar #Sync"
2365 : /* no outputs */
2366 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2367 spitfire_put_dtlb_data(i, 0x0UL);
2368 }
2369
2370 /* Spitfire Errata #32 workaround */
2371 /* NOTE: Always runs on spitfire, so no
2372 * cheetah+ page size encodings.
2373 */
2374 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2375 "flush %%g6"
2376 : /* No outputs */
2377 : "r" (0),
2378 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2379
2380 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2381 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2382 "membar #Sync"
2383 : /* no outputs */
2384 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2385 spitfire_put_itlb_data(i, 0x0UL);
2386 }
2387 }
2388 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2389 cheetah_flush_dtlb_all();
2390 cheetah_flush_itlb_all();
2391 }
2392 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2393 : : "r" (pstate));
2394}