Commit | Line | Data |
---|---|---|
b00dc837 | 1 | /* |
1da177e4 LT |
2 | * arch/sparc64/mm/init.c |
3 | * | |
4 | * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
6 | */ | |
7 | ||
c4bce90e | 8 | #include <linux/module.h> |
1da177e4 LT |
9 | #include <linux/kernel.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/bootmem.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/hugetlb.h> | |
1da177e4 LT |
16 | #include <linux/initrd.h> |
17 | #include <linux/swap.h> | |
18 | #include <linux/pagemap.h> | |
c9cf5528 | 19 | #include <linux/poison.h> |
1da177e4 LT |
20 | #include <linux/fs.h> |
21 | #include <linux/seq_file.h> | |
05e14cb3 | 22 | #include <linux/kprobes.h> |
1ac4f5eb | 23 | #include <linux/cache.h> |
13edad7a | 24 | #include <linux/sort.h> |
5cbc3073 | 25 | #include <linux/percpu.h> |
95f72d1e | 26 | #include <linux/memblock.h> |
919ee677 | 27 | #include <linux/mmzone.h> |
5a0e3ad6 | 28 | #include <linux/gfp.h> |
1da177e4 LT |
29 | |
30 | #include <asm/head.h> | |
1da177e4 LT |
31 | #include <asm/page.h> |
32 | #include <asm/pgalloc.h> | |
33 | #include <asm/pgtable.h> | |
34 | #include <asm/oplib.h> | |
35 | #include <asm/iommu.h> | |
36 | #include <asm/io.h> | |
37 | #include <asm/uaccess.h> | |
38 | #include <asm/mmu_context.h> | |
39 | #include <asm/tlbflush.h> | |
40 | #include <asm/dma.h> | |
41 | #include <asm/starfire.h> | |
42 | #include <asm/tlb.h> | |
43 | #include <asm/spitfire.h> | |
44 | #include <asm/sections.h> | |
517af332 | 45 | #include <asm/tsb.h> |
481295f9 | 46 | #include <asm/hypervisor.h> |
372b07bb | 47 | #include <asm/prom.h> |
5cbc3073 | 48 | #include <asm/mdesc.h> |
3d5ae6b6 | 49 | #include <asm/cpudata.h> |
4f70f7a9 | 50 | #include <asm/irq.h> |
1da177e4 | 51 | |
27137e52 | 52 | #include "init_64.h" |
9cc3a1ac | 53 | |
4f93d21d | 54 | unsigned long kern_linear_pte_xor[4] __read_mostly; |
9cc3a1ac | 55 | |
4f93d21d DM |
56 | /* A bitmap, two bits for every 256MB of physical memory. These two |
57 | * bits determine what page size we use for kernel linear | |
58 | * translations. They form an index into kern_linear_pte_xor[]. The | |
59 | * value in the indexed slot is XOR'd with the TLB miss virtual | |
60 | * address to form the resulting TTE. The mapping is: | |
61 | * | |
62 | * 0 ==> 4MB | |
63 | * 1 ==> 256MB | |
64 | * 2 ==> 2GB | |
65 | * 3 ==> 16GB | |
66 | * | |
67 | * All sun4v chips support 256MB pages. Only SPARC-T4 and later | |
68 | * support 2GB pages, and hopefully future cpus will support the 16GB | |
69 | * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there | |
70 | * if these larger page sizes are not supported by the cpu. | |
71 | * | |
72 | * It would be nice to determine this from the machine description | |
73 | * 'cpu' properties, but we need to have this table setup before the | |
74 | * MDESC is initialized. | |
9cc3a1ac DM |
75 | */ |
76 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | |
77 | ||
d1acb421 | 78 | #ifndef CONFIG_DEBUG_PAGEALLOC |
4f93d21d DM |
79 | /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. |
80 | * Space is allocated for this right after the trap table in | |
81 | * arch/sparc64/kernel/head.S | |
2d9e2763 DM |
82 | */ |
83 | extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; | |
d1acb421 | 84 | #endif |
d7744a09 | 85 | |
13edad7a DM |
86 | #define MAX_BANKS 32 |
87 | ||
9a2ed5cc DM |
88 | static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata; |
89 | static int pavail_ents __devinitdata; | |
13edad7a DM |
90 | |
91 | static int cmp_p64(const void *a, const void *b) | |
92 | { | |
93 | const struct linux_prom64_registers *x = a, *y = b; | |
94 | ||
95 | if (x->phys_addr > y->phys_addr) | |
96 | return 1; | |
97 | if (x->phys_addr < y->phys_addr) | |
98 | return -1; | |
99 | return 0; | |
100 | } | |
101 | ||
102 | static void __init read_obp_memory(const char *property, | |
103 | struct linux_prom64_registers *regs, | |
104 | int *num_ents) | |
105 | { | |
8d125562 | 106 | phandle node = prom_finddevice("/memory"); |
13edad7a DM |
107 | int prop_size = prom_getproplen(node, property); |
108 | int ents, ret, i; | |
109 | ||
110 | ents = prop_size / sizeof(struct linux_prom64_registers); | |
111 | if (ents > MAX_BANKS) { | |
112 | prom_printf("The machine has more %s property entries than " | |
113 | "this kernel can support (%d).\n", | |
114 | property, MAX_BANKS); | |
115 | prom_halt(); | |
116 | } | |
117 | ||
118 | ret = prom_getproperty(node, property, (char *) regs, prop_size); | |
119 | if (ret == -1) { | |
120 | prom_printf("Couldn't get %s property from /memory.\n"); | |
121 | prom_halt(); | |
122 | } | |
123 | ||
13edad7a DM |
124 | /* Sanitize what we got from the firmware, by page aligning |
125 | * everything. | |
126 | */ | |
127 | for (i = 0; i < ents; i++) { | |
128 | unsigned long base, size; | |
129 | ||
130 | base = regs[i].phys_addr; | |
131 | size = regs[i].reg_size; | |
10147570 | 132 | |
13edad7a DM |
133 | size &= PAGE_MASK; |
134 | if (base & ~PAGE_MASK) { | |
135 | unsigned long new_base = PAGE_ALIGN(base); | |
136 | ||
137 | size -= new_base - base; | |
138 | if ((long) size < 0L) | |
139 | size = 0UL; | |
140 | base = new_base; | |
141 | } | |
0015d3d6 DM |
142 | if (size == 0UL) { |
143 | /* If it is empty, simply get rid of it. | |
144 | * This simplifies the logic of the other | |
145 | * functions that process these arrays. | |
146 | */ | |
147 | memmove(®s[i], ®s[i + 1], | |
148 | (ents - i - 1) * sizeof(regs[0])); | |
486ad10a | 149 | i--; |
0015d3d6 DM |
150 | ents--; |
151 | continue; | |
486ad10a | 152 | } |
0015d3d6 DM |
153 | regs[i].phys_addr = base; |
154 | regs[i].reg_size = size; | |
486ad10a DM |
155 | } |
156 | ||
157 | *num_ents = ents; | |
158 | ||
c9c10830 | 159 | sort(regs, ents, sizeof(struct linux_prom64_registers), |
13edad7a DM |
160 | cmp_p64, NULL); |
161 | } | |
1da177e4 | 162 | |
d8ed1d43 DM |
163 | unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / |
164 | sizeof(unsigned long)]; | |
917c3660 | 165 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); |
1da177e4 | 166 | |
d1112018 | 167 | /* Kernel physical address base and size in bytes. */ |
1ac4f5eb DM |
168 | unsigned long kern_base __read_mostly; |
169 | unsigned long kern_size __read_mostly; | |
1da177e4 | 170 | |
1da177e4 LT |
171 | /* Initial ramdisk setup */ |
172 | extern unsigned long sparc_ramdisk_image64; | |
173 | extern unsigned int sparc_ramdisk_image; | |
174 | extern unsigned int sparc_ramdisk_size; | |
175 | ||
1ac4f5eb | 176 | struct page *mem_map_zero __read_mostly; |
35802c0b | 177 | EXPORT_SYMBOL(mem_map_zero); |
1da177e4 | 178 | |
0835ae0f DM |
179 | unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; |
180 | ||
181 | unsigned long sparc64_kern_pri_context __read_mostly; | |
182 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | |
183 | unsigned long sparc64_kern_sec_context __read_mostly; | |
184 | ||
64658743 | 185 | int num_kernel_image_mappings; |
1da177e4 | 186 | |
1da177e4 LT |
187 | #ifdef CONFIG_DEBUG_DCFLUSH |
188 | atomic_t dcpage_flushes = ATOMIC_INIT(0); | |
189 | #ifdef CONFIG_SMP | |
190 | atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); | |
191 | #endif | |
192 | #endif | |
193 | ||
7a591cfe | 194 | inline void flush_dcache_page_impl(struct page *page) |
1da177e4 | 195 | { |
7a591cfe | 196 | BUG_ON(tlb_type == hypervisor); |
1da177e4 LT |
197 | #ifdef CONFIG_DEBUG_DCFLUSH |
198 | atomic_inc(&dcpage_flushes); | |
199 | #endif | |
200 | ||
201 | #ifdef DCACHE_ALIASING_POSSIBLE | |
202 | __flush_dcache_page(page_address(page), | |
203 | ((tlb_type == spitfire) && | |
204 | page_mapping(page) != NULL)); | |
205 | #else | |
206 | if (page_mapping(page) != NULL && | |
207 | tlb_type == spitfire) | |
208 | __flush_icache_page(__pa(page_address(page))); | |
209 | #endif | |
210 | } | |
211 | ||
212 | #define PG_dcache_dirty PG_arch_1 | |
22adb358 DM |
213 | #define PG_dcache_cpu_shift 32UL |
214 | #define PG_dcache_cpu_mask \ | |
215 | ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) | |
1da177e4 LT |
216 | |
217 | #define dcache_dirty_cpu(page) \ | |
48b0e548 | 218 | (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) |
1da177e4 | 219 | |
d979f179 | 220 | static inline void set_dcache_dirty(struct page *page, int this_cpu) |
1da177e4 LT |
221 | { |
222 | unsigned long mask = this_cpu; | |
48b0e548 DM |
223 | unsigned long non_cpu_bits; |
224 | ||
225 | non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); | |
226 | mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); | |
227 | ||
1da177e4 LT |
228 | __asm__ __volatile__("1:\n\t" |
229 | "ldx [%2], %%g7\n\t" | |
230 | "and %%g7, %1, %%g1\n\t" | |
231 | "or %%g1, %0, %%g1\n\t" | |
232 | "casx [%2], %%g7, %%g1\n\t" | |
233 | "cmp %%g7, %%g1\n\t" | |
234 | "bne,pn %%xcc, 1b\n\t" | |
b445e26c | 235 | " nop" |
1da177e4 LT |
236 | : /* no outputs */ |
237 | : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) | |
238 | : "g1", "g7"); | |
239 | } | |
240 | ||
d979f179 | 241 | static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) |
1da177e4 LT |
242 | { |
243 | unsigned long mask = (1UL << PG_dcache_dirty); | |
244 | ||
245 | __asm__ __volatile__("! test_and_clear_dcache_dirty\n" | |
246 | "1:\n\t" | |
247 | "ldx [%2], %%g7\n\t" | |
48b0e548 | 248 | "srlx %%g7, %4, %%g1\n\t" |
1da177e4 LT |
249 | "and %%g1, %3, %%g1\n\t" |
250 | "cmp %%g1, %0\n\t" | |
251 | "bne,pn %%icc, 2f\n\t" | |
252 | " andn %%g7, %1, %%g1\n\t" | |
253 | "casx [%2], %%g7, %%g1\n\t" | |
254 | "cmp %%g7, %%g1\n\t" | |
255 | "bne,pn %%xcc, 1b\n\t" | |
b445e26c | 256 | " nop\n" |
1da177e4 LT |
257 | "2:" |
258 | : /* no outputs */ | |
259 | : "r" (cpu), "r" (mask), "r" (&page->flags), | |
48b0e548 DM |
260 | "i" (PG_dcache_cpu_mask), |
261 | "i" (PG_dcache_cpu_shift) | |
1da177e4 LT |
262 | : "g1", "g7"); |
263 | } | |
264 | ||
517af332 DM |
265 | static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) |
266 | { | |
267 | unsigned long tsb_addr = (unsigned long) ent; | |
268 | ||
3b3ab2eb | 269 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
517af332 DM |
270 | tsb_addr = __pa(tsb_addr); |
271 | ||
272 | __tsb_insert(tsb_addr, tag, pte); | |
273 | } | |
274 | ||
c4bce90e DM |
275 | unsigned long _PAGE_ALL_SZ_BITS __read_mostly; |
276 | unsigned long _PAGE_SZBITS __read_mostly; | |
277 | ||
ff9aefbf | 278 | static void flush_dcache(unsigned long pfn) |
1da177e4 | 279 | { |
ff9aefbf | 280 | struct page *page; |
7a591cfe | 281 | |
ff9aefbf | 282 | page = pfn_to_page(pfn); |
1a78cedb | 283 | if (page) { |
7a591cfe | 284 | unsigned long pg_flags; |
7a591cfe | 285 | |
ff9aefbf SR |
286 | pg_flags = page->flags; |
287 | if (pg_flags & (1UL << PG_dcache_dirty)) { | |
7a591cfe DM |
288 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & |
289 | PG_dcache_cpu_mask); | |
290 | int this_cpu = get_cpu(); | |
291 | ||
292 | /* This is just to optimize away some function calls | |
293 | * in the SMP case. | |
294 | */ | |
295 | if (cpu == this_cpu) | |
296 | flush_dcache_page_impl(page); | |
297 | else | |
298 | smp_flush_dcache_page_impl(page, cpu); | |
299 | ||
300 | clear_dcache_dirty_cpu(page, cpu); | |
301 | ||
302 | put_cpu(); | |
303 | } | |
1da177e4 | 304 | } |
ff9aefbf SR |
305 | } |
306 | ||
4b3073e1 | 307 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
ff9aefbf SR |
308 | { |
309 | struct mm_struct *mm; | |
310 | struct tsb *tsb; | |
311 | unsigned long tag, flags; | |
312 | unsigned long tsb_index, tsb_hash_shift; | |
4b3073e1 | 313 | pte_t pte = *ptep; |
ff9aefbf SR |
314 | |
315 | if (tlb_type != hypervisor) { | |
316 | unsigned long pfn = pte_pfn(pte); | |
317 | ||
318 | if (pfn_valid(pfn)) | |
319 | flush_dcache(pfn); | |
320 | } | |
bd40791e DM |
321 | |
322 | mm = vma->vm_mm; | |
7a1ac526 | 323 | |
dcc1e8dd DM |
324 | tsb_index = MM_TSB_BASE; |
325 | tsb_hash_shift = PAGE_SHIFT; | |
326 | ||
7a1ac526 DM |
327 | spin_lock_irqsave(&mm->context.lock, flags); |
328 | ||
dcc1e8dd DM |
329 | #ifdef CONFIG_HUGETLB_PAGE |
330 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { | |
331 | if ((tlb_type == hypervisor && | |
332 | (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || | |
333 | (tlb_type != hypervisor && | |
334 | (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { | |
335 | tsb_index = MM_TSB_HUGE; | |
336 | tsb_hash_shift = HPAGE_SHIFT; | |
337 | } | |
338 | } | |
339 | #endif | |
340 | ||
341 | tsb = mm->context.tsb_block[tsb_index].tsb; | |
342 | tsb += ((address >> tsb_hash_shift) & | |
343 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); | |
74ae9987 DM |
344 | tag = (address >> 22UL); |
345 | tsb_insert(tsb, tag, pte_val(pte)); | |
7a1ac526 DM |
346 | |
347 | spin_unlock_irqrestore(&mm->context.lock, flags); | |
1da177e4 LT |
348 | } |
349 | ||
350 | void flush_dcache_page(struct page *page) | |
351 | { | |
a9546f59 DM |
352 | struct address_space *mapping; |
353 | int this_cpu; | |
1da177e4 | 354 | |
7a591cfe DM |
355 | if (tlb_type == hypervisor) |
356 | return; | |
357 | ||
a9546f59 DM |
358 | /* Do not bother with the expensive D-cache flush if it |
359 | * is merely the zero page. The 'bigcore' testcase in GDB | |
360 | * causes this case to run millions of times. | |
361 | */ | |
362 | if (page == ZERO_PAGE(0)) | |
363 | return; | |
364 | ||
365 | this_cpu = get_cpu(); | |
366 | ||
367 | mapping = page_mapping(page); | |
1da177e4 | 368 | if (mapping && !mapping_mapped(mapping)) { |
a9546f59 | 369 | int dirty = test_bit(PG_dcache_dirty, &page->flags); |
1da177e4 | 370 | if (dirty) { |
a9546f59 DM |
371 | int dirty_cpu = dcache_dirty_cpu(page); |
372 | ||
1da177e4 LT |
373 | if (dirty_cpu == this_cpu) |
374 | goto out; | |
375 | smp_flush_dcache_page_impl(page, dirty_cpu); | |
376 | } | |
377 | set_dcache_dirty(page, this_cpu); | |
378 | } else { | |
379 | /* We could delay the flush for the !page_mapping | |
380 | * case too. But that case is for exec env/arg | |
381 | * pages and those are %99 certainly going to get | |
382 | * faulted into the tlb (and thus flushed) anyways. | |
383 | */ | |
384 | flush_dcache_page_impl(page); | |
385 | } | |
386 | ||
387 | out: | |
388 | put_cpu(); | |
389 | } | |
917c3660 | 390 | EXPORT_SYMBOL(flush_dcache_page); |
1da177e4 | 391 | |
05e14cb3 | 392 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) |
1da177e4 | 393 | { |
a43fe0e7 | 394 | /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ |
1da177e4 LT |
395 | if (tlb_type == spitfire) { |
396 | unsigned long kaddr; | |
397 | ||
a94aa253 DM |
398 | /* This code only runs on Spitfire cpus so this is |
399 | * why we can assume _PAGE_PADDR_4U. | |
400 | */ | |
401 | for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { | |
402 | unsigned long paddr, mask = _PAGE_PADDR_4U; | |
403 | ||
404 | if (kaddr >= PAGE_OFFSET) | |
405 | paddr = kaddr & mask; | |
406 | else { | |
407 | pgd_t *pgdp = pgd_offset_k(kaddr); | |
408 | pud_t *pudp = pud_offset(pgdp, kaddr); | |
409 | pmd_t *pmdp = pmd_offset(pudp, kaddr); | |
410 | pte_t *ptep = pte_offset_kernel(pmdp, kaddr); | |
411 | ||
412 | paddr = pte_val(*ptep) & mask; | |
413 | } | |
414 | __flush_icache_page(paddr); | |
415 | } | |
1da177e4 LT |
416 | } |
417 | } | |
917c3660 | 418 | EXPORT_SYMBOL(flush_icache_range); |
1da177e4 | 419 | |
1da177e4 LT |
420 | void mmu_info(struct seq_file *m) |
421 | { | |
422 | if (tlb_type == cheetah) | |
423 | seq_printf(m, "MMU Type\t: Cheetah\n"); | |
424 | else if (tlb_type == cheetah_plus) | |
425 | seq_printf(m, "MMU Type\t: Cheetah+\n"); | |
426 | else if (tlb_type == spitfire) | |
427 | seq_printf(m, "MMU Type\t: Spitfire\n"); | |
a43fe0e7 DM |
428 | else if (tlb_type == hypervisor) |
429 | seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); | |
1da177e4 LT |
430 | else |
431 | seq_printf(m, "MMU Type\t: ???\n"); | |
432 | ||
433 | #ifdef CONFIG_DEBUG_DCFLUSH | |
434 | seq_printf(m, "DCPageFlushes\t: %d\n", | |
435 | atomic_read(&dcpage_flushes)); | |
436 | #ifdef CONFIG_SMP | |
437 | seq_printf(m, "DCPageFlushesXC\t: %d\n", | |
438 | atomic_read(&dcpage_flushes_xcall)); | |
439 | #endif /* CONFIG_SMP */ | |
440 | #endif /* CONFIG_DEBUG_DCFLUSH */ | |
441 | } | |
442 | ||
a94aa253 DM |
443 | struct linux_prom_translation prom_trans[512] __read_mostly; |
444 | unsigned int prom_trans_ents __read_mostly; | |
445 | ||
1da177e4 LT |
446 | unsigned long kern_locked_tte_data; |
447 | ||
c9c10830 DM |
448 | /* The obp translations are saved based on 8k pagesize, since obp can |
449 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | |
74bf4312 | 450 | * HI_OBP_ADDRESS range are handled in ktlb.S. |
c9c10830 | 451 | */ |
5085b4a5 DM |
452 | static inline int in_obp_range(unsigned long vaddr) |
453 | { | |
454 | return (vaddr >= LOW_OBP_ADDRESS && | |
455 | vaddr < HI_OBP_ADDRESS); | |
456 | } | |
457 | ||
c9c10830 | 458 | static int cmp_ptrans(const void *a, const void *b) |
405599bd | 459 | { |
c9c10830 | 460 | const struct linux_prom_translation *x = a, *y = b; |
405599bd | 461 | |
c9c10830 DM |
462 | if (x->virt > y->virt) |
463 | return 1; | |
464 | if (x->virt < y->virt) | |
465 | return -1; | |
466 | return 0; | |
405599bd DM |
467 | } |
468 | ||
c9c10830 | 469 | /* Read OBP translations property into 'prom_trans[]'. */ |
9ad98c5b | 470 | static void __init read_obp_translations(void) |
405599bd | 471 | { |
c9c10830 | 472 | int n, node, ents, first, last, i; |
1da177e4 LT |
473 | |
474 | node = prom_finddevice("/virtual-memory"); | |
475 | n = prom_getproplen(node, "translations"); | |
405599bd | 476 | if (unlikely(n == 0 || n == -1)) { |
b206fc4c | 477 | prom_printf("prom_mappings: Couldn't get size.\n"); |
1da177e4 LT |
478 | prom_halt(); |
479 | } | |
405599bd DM |
480 | if (unlikely(n > sizeof(prom_trans))) { |
481 | prom_printf("prom_mappings: Size %Zd is too big.\n", n); | |
1da177e4 LT |
482 | prom_halt(); |
483 | } | |
405599bd | 484 | |
b206fc4c | 485 | if ((n = prom_getproperty(node, "translations", |
405599bd DM |
486 | (char *)&prom_trans[0], |
487 | sizeof(prom_trans))) == -1) { | |
b206fc4c | 488 | prom_printf("prom_mappings: Couldn't get property.\n"); |
1da177e4 LT |
489 | prom_halt(); |
490 | } | |
9ad98c5b | 491 | |
b206fc4c | 492 | n = n / sizeof(struct linux_prom_translation); |
9ad98c5b | 493 | |
c9c10830 DM |
494 | ents = n; |
495 | ||
496 | sort(prom_trans, ents, sizeof(struct linux_prom_translation), | |
497 | cmp_ptrans, NULL); | |
498 | ||
499 | /* Now kick out all the non-OBP entries. */ | |
500 | for (i = 0; i < ents; i++) { | |
501 | if (in_obp_range(prom_trans[i].virt)) | |
502 | break; | |
503 | } | |
504 | first = i; | |
505 | for (; i < ents; i++) { | |
506 | if (!in_obp_range(prom_trans[i].virt)) | |
507 | break; | |
508 | } | |
509 | last = i; | |
510 | ||
511 | for (i = 0; i < (last - first); i++) { | |
512 | struct linux_prom_translation *src = &prom_trans[i + first]; | |
513 | struct linux_prom_translation *dest = &prom_trans[i]; | |
514 | ||
515 | *dest = *src; | |
516 | } | |
517 | for (; i < ents; i++) { | |
518 | struct linux_prom_translation *dest = &prom_trans[i]; | |
519 | dest->virt = dest->size = dest->data = 0x0UL; | |
520 | } | |
521 | ||
522 | prom_trans_ents = last - first; | |
523 | ||
524 | if (tlb_type == spitfire) { | |
525 | /* Clear diag TTE bits. */ | |
526 | for (i = 0; i < prom_trans_ents; i++) | |
527 | prom_trans[i].data &= ~0x0003fe0000000000UL; | |
528 | } | |
f4142cba DM |
529 | |
530 | /* Force execute bit on. */ | |
531 | for (i = 0; i < prom_trans_ents; i++) | |
532 | prom_trans[i].data |= (tlb_type == hypervisor ? | |
533 | _PAGE_EXEC_4V : _PAGE_EXEC_4U); | |
405599bd | 534 | } |
1da177e4 | 535 | |
d82ace7d DM |
536 | static void __init hypervisor_tlb_lock(unsigned long vaddr, |
537 | unsigned long pte, | |
538 | unsigned long mmu) | |
539 | { | |
7db35f31 DM |
540 | unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); |
541 | ||
542 | if (ret != 0) { | |
12e126ad | 543 | prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " |
7db35f31 | 544 | "errors with %lx\n", vaddr, 0, pte, mmu, ret); |
12e126ad DM |
545 | prom_halt(); |
546 | } | |
d82ace7d DM |
547 | } |
548 | ||
c4bce90e DM |
549 | static unsigned long kern_large_tte(unsigned long paddr); |
550 | ||
898cf0ec | 551 | static void __init remap_kernel(void) |
405599bd DM |
552 | { |
553 | unsigned long phys_page, tte_vaddr, tte_data; | |
64658743 | 554 | int i, tlb_ent = sparc64_highest_locked_tlbent(); |
405599bd | 555 | |
1da177e4 | 556 | tte_vaddr = (unsigned long) KERNBASE; |
bff06d55 | 557 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
c4bce90e | 558 | tte_data = kern_large_tte(phys_page); |
1da177e4 LT |
559 | |
560 | kern_locked_tte_data = tte_data; | |
561 | ||
d82ace7d DM |
562 | /* Now lock us into the TLBs via Hypervisor or OBP. */ |
563 | if (tlb_type == hypervisor) { | |
64658743 | 564 | for (i = 0; i < num_kernel_image_mappings; i++) { |
d82ace7d DM |
565 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); |
566 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); | |
64658743 DM |
567 | tte_vaddr += 0x400000; |
568 | tte_data += 0x400000; | |
d82ace7d DM |
569 | } |
570 | } else { | |
64658743 DM |
571 | for (i = 0; i < num_kernel_image_mappings; i++) { |
572 | prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); | |
573 | prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); | |
574 | tte_vaddr += 0x400000; | |
575 | tte_data += 0x400000; | |
d82ace7d | 576 | } |
64658743 | 577 | sparc64_highest_unlocked_tlb_ent = tlb_ent - i; |
1da177e4 | 578 | } |
0835ae0f DM |
579 | if (tlb_type == cheetah_plus) { |
580 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | |
581 | CTX_CHEETAH_PLUS_NUC); | |
582 | sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; | |
583 | sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; | |
584 | } | |
405599bd | 585 | } |
1da177e4 | 586 | |
405599bd | 587 | |
c9c10830 | 588 | static void __init inherit_prom_mappings(void) |
9ad98c5b | 589 | { |
405599bd | 590 | /* Now fixup OBP's idea about where we really are mapped. */ |
3c62a2d3 | 591 | printk("Remapping the kernel... "); |
405599bd | 592 | remap_kernel(); |
3c62a2d3 | 593 | printk("done.\n"); |
1da177e4 LT |
594 | } |
595 | ||
1da177e4 LT |
596 | void prom_world(int enter) |
597 | { | |
1da177e4 LT |
598 | if (!enter) |
599 | set_fs((mm_segment_t) { get_thread_current_ds() }); | |
600 | ||
3487d1d4 | 601 | __asm__ __volatile__("flushw"); |
1da177e4 LT |
602 | } |
603 | ||
1da177e4 LT |
604 | void __flush_dcache_range(unsigned long start, unsigned long end) |
605 | { | |
606 | unsigned long va; | |
607 | ||
608 | if (tlb_type == spitfire) { | |
609 | int n = 0; | |
610 | ||
611 | for (va = start; va < end; va += 32) { | |
612 | spitfire_put_dcache_tag(va & 0x3fe0, 0x0); | |
613 | if (++n >= 512) | |
614 | break; | |
615 | } | |
a43fe0e7 | 616 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1da177e4 LT |
617 | start = __pa(start); |
618 | end = __pa(end); | |
619 | for (va = start; va < end; va += 32) | |
620 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
621 | "membar #Sync" | |
622 | : /* no outputs */ | |
623 | : "r" (va), | |
624 | "i" (ASI_DCACHE_INVALIDATE)); | |
625 | } | |
626 | } | |
917c3660 | 627 | EXPORT_SYMBOL(__flush_dcache_range); |
1da177e4 | 628 | |
85f1e1f6 DM |
629 | /* get_new_mmu_context() uses "cache + 1". */ |
630 | DEFINE_SPINLOCK(ctx_alloc_lock); | |
631 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | |
632 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) | |
633 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) | |
634 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); | |
635 | ||
1da177e4 LT |
636 | /* Caller does TLB context flushing on local CPU if necessary. |
637 | * The caller also ensures that CTX_VALID(mm->context) is false. | |
638 | * | |
639 | * We must be careful about boundary cases so that we never | |
640 | * let the user have CTX 0 (nucleus) or we ever use a CTX | |
641 | * version of zero (and thus NO_CONTEXT would not be caught | |
642 | * by version mis-match tests in mmu_context.h). | |
a0663a79 DM |
643 | * |
644 | * Always invoked with interrupts disabled. | |
1da177e4 LT |
645 | */ |
646 | void get_new_mmu_context(struct mm_struct *mm) | |
647 | { | |
648 | unsigned long ctx, new_ctx; | |
649 | unsigned long orig_pgsz_bits; | |
a77754b4 | 650 | unsigned long flags; |
a0663a79 | 651 | int new_version; |
1da177e4 | 652 | |
a77754b4 | 653 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
1da177e4 LT |
654 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); |
655 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | |
656 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | |
a0663a79 | 657 | new_version = 0; |
1da177e4 LT |
658 | if (new_ctx >= (1 << CTX_NR_BITS)) { |
659 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | |
660 | if (new_ctx >= ctx) { | |
661 | int i; | |
662 | new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + | |
663 | CTX_FIRST_VERSION; | |
664 | if (new_ctx == 1) | |
665 | new_ctx = CTX_FIRST_VERSION; | |
666 | ||
667 | /* Don't call memset, for 16 entries that's just | |
668 | * plain silly... | |
669 | */ | |
670 | mmu_context_bmap[0] = 3; | |
671 | mmu_context_bmap[1] = 0; | |
672 | mmu_context_bmap[2] = 0; | |
673 | mmu_context_bmap[3] = 0; | |
674 | for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { | |
675 | mmu_context_bmap[i + 0] = 0; | |
676 | mmu_context_bmap[i + 1] = 0; | |
677 | mmu_context_bmap[i + 2] = 0; | |
678 | mmu_context_bmap[i + 3] = 0; | |
679 | } | |
a0663a79 | 680 | new_version = 1; |
1da177e4 LT |
681 | goto out; |
682 | } | |
683 | } | |
684 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); | |
685 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); | |
686 | out: | |
687 | tlb_context_cache = new_ctx; | |
688 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | |
a77754b4 | 689 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); |
a0663a79 DM |
690 | |
691 | if (unlikely(new_version)) | |
692 | smp_new_mmu_context_version(); | |
1da177e4 LT |
693 | } |
694 | ||
919ee677 DM |
695 | static int numa_enabled = 1; |
696 | static int numa_debug; | |
697 | ||
698 | static int __init early_numa(char *p) | |
1da177e4 | 699 | { |
919ee677 DM |
700 | if (!p) |
701 | return 0; | |
702 | ||
703 | if (strstr(p, "off")) | |
704 | numa_enabled = 0; | |
d1112018 | 705 | |
919ee677 DM |
706 | if (strstr(p, "debug")) |
707 | numa_debug = 1; | |
d1112018 | 708 | |
919ee677 | 709 | return 0; |
d1112018 | 710 | } |
919ee677 DM |
711 | early_param("numa", early_numa); |
712 | ||
713 | #define numadbg(f, a...) \ | |
714 | do { if (numa_debug) \ | |
715 | printk(KERN_INFO f, ## a); \ | |
716 | } while (0) | |
d1112018 | 717 | |
4e82c9a6 DM |
718 | static void __init find_ramdisk(unsigned long phys_base) |
719 | { | |
720 | #ifdef CONFIG_BLK_DEV_INITRD | |
721 | if (sparc_ramdisk_image || sparc_ramdisk_image64) { | |
722 | unsigned long ramdisk_image; | |
723 | ||
724 | /* Older versions of the bootloader only supported a | |
725 | * 32-bit physical address for the ramdisk image | |
726 | * location, stored at sparc_ramdisk_image. Newer | |
727 | * SILO versions set sparc_ramdisk_image to zero and | |
728 | * provide a full 64-bit physical address at | |
729 | * sparc_ramdisk_image64. | |
730 | */ | |
731 | ramdisk_image = sparc_ramdisk_image; | |
732 | if (!ramdisk_image) | |
733 | ramdisk_image = sparc_ramdisk_image64; | |
734 | ||
735 | /* Another bootloader quirk. The bootloader normalizes | |
736 | * the physical address to KERNBASE, so we have to | |
737 | * factor that back out and add in the lowest valid | |
738 | * physical page address to get the true physical address. | |
739 | */ | |
740 | ramdisk_image -= KERNBASE; | |
741 | ramdisk_image += phys_base; | |
742 | ||
919ee677 DM |
743 | numadbg("Found ramdisk at physical address 0x%lx, size %u\n", |
744 | ramdisk_image, sparc_ramdisk_size); | |
745 | ||
4e82c9a6 DM |
746 | initrd_start = ramdisk_image; |
747 | initrd_end = ramdisk_image + sparc_ramdisk_size; | |
3b2a7e23 | 748 | |
95f72d1e | 749 | memblock_reserve(initrd_start, sparc_ramdisk_size); |
d45100f7 DM |
750 | |
751 | initrd_start += PAGE_OFFSET; | |
752 | initrd_end += PAGE_OFFSET; | |
4e82c9a6 DM |
753 | } |
754 | #endif | |
755 | } | |
756 | ||
919ee677 DM |
757 | struct node_mem_mask { |
758 | unsigned long mask; | |
759 | unsigned long val; | |
919ee677 DM |
760 | }; |
761 | static struct node_mem_mask node_masks[MAX_NUMNODES]; | |
762 | static int num_node_masks; | |
763 | ||
764 | int numa_cpu_lookup_table[NR_CPUS]; | |
765 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | |
766 | ||
767 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
919ee677 DM |
768 | |
769 | struct mdesc_mblock { | |
770 | u64 base; | |
771 | u64 size; | |
772 | u64 offset; /* RA-to-PA */ | |
773 | }; | |
774 | static struct mdesc_mblock *mblocks; | |
775 | static int num_mblocks; | |
776 | ||
777 | static unsigned long ra_to_pa(unsigned long addr) | |
778 | { | |
779 | int i; | |
780 | ||
781 | for (i = 0; i < num_mblocks; i++) { | |
782 | struct mdesc_mblock *m = &mblocks[i]; | |
783 | ||
784 | if (addr >= m->base && | |
785 | addr < (m->base + m->size)) { | |
786 | addr += m->offset; | |
787 | break; | |
788 | } | |
789 | } | |
790 | return addr; | |
791 | } | |
792 | ||
793 | static int find_node(unsigned long addr) | |
794 | { | |
795 | int i; | |
796 | ||
797 | addr = ra_to_pa(addr); | |
798 | for (i = 0; i < num_node_masks; i++) { | |
799 | struct node_mem_mask *p = &node_masks[i]; | |
800 | ||
801 | if ((addr & p->mask) == p->val) | |
802 | return i; | |
803 | } | |
804 | return -1; | |
805 | } | |
806 | ||
f9b18db3 | 807 | static u64 memblock_nid_range(u64 start, u64 end, int *nid) |
919ee677 DM |
808 | { |
809 | *nid = find_node(start); | |
810 | start += PAGE_SIZE; | |
811 | while (start < end) { | |
812 | int n = find_node(start); | |
813 | ||
814 | if (n != *nid) | |
815 | break; | |
816 | start += PAGE_SIZE; | |
817 | } | |
818 | ||
c918dcce DM |
819 | if (start > end) |
820 | start = end; | |
821 | ||
919ee677 DM |
822 | return start; |
823 | } | |
919ee677 DM |
824 | #endif |
825 | ||
826 | /* This must be invoked after performing all of the necessary | |
2a4814df | 827 | * memblock_set_node() calls for 'nid'. We need to be able to get |
919ee677 | 828 | * correct data from get_pfn_range_for_nid(). |
f1cfdb55 | 829 | */ |
919ee677 DM |
830 | static void __init allocate_node_data(int nid) |
831 | { | |
919ee677 | 832 | struct pglist_data *p; |
aa6f0790 | 833 | unsigned long start_pfn, end_pfn; |
919ee677 | 834 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
aa6f0790 PG |
835 | unsigned long paddr; |
836 | ||
9d1e2492 | 837 | paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); |
919ee677 DM |
838 | if (!paddr) { |
839 | prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); | |
840 | prom_halt(); | |
841 | } | |
842 | NODE_DATA(nid) = __va(paddr); | |
843 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | |
844 | ||
625d693e | 845 | NODE_DATA(nid)->node_id = nid; |
919ee677 DM |
846 | #endif |
847 | ||
848 | p = NODE_DATA(nid); | |
849 | ||
850 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | |
851 | p->node_start_pfn = start_pfn; | |
852 | p->node_spanned_pages = end_pfn - start_pfn; | |
919ee677 DM |
853 | } |
854 | ||
855 | static void init_node_masks_nonnuma(void) | |
d1112018 | 856 | { |
1da177e4 LT |
857 | int i; |
858 | ||
919ee677 | 859 | numadbg("Initializing tables for non-numa.\n"); |
6fc5bae7 | 860 | |
919ee677 DM |
861 | node_masks[0].mask = node_masks[0].val = 0; |
862 | num_node_masks = 1; | |
d1112018 | 863 | |
919ee677 DM |
864 | for (i = 0; i < NR_CPUS; i++) |
865 | numa_cpu_lookup_table[i] = 0; | |
1da177e4 | 866 | |
fb1fece5 | 867 | cpumask_setall(&numa_cpumask_lookup_table[0]); |
919ee677 DM |
868 | } |
869 | ||
870 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
871 | struct pglist_data *node_data[MAX_NUMNODES]; | |
872 | ||
873 | EXPORT_SYMBOL(numa_cpu_lookup_table); | |
874 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | |
875 | EXPORT_SYMBOL(node_data); | |
876 | ||
877 | struct mdesc_mlgroup { | |
878 | u64 node; | |
879 | u64 latency; | |
880 | u64 match; | |
881 | u64 mask; | |
882 | }; | |
883 | static struct mdesc_mlgroup *mlgroups; | |
884 | static int num_mlgroups; | |
885 | ||
886 | static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, | |
887 | u32 cfg_handle) | |
888 | { | |
889 | u64 arc; | |
890 | ||
891 | mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { | |
892 | u64 target = mdesc_arc_target(md, arc); | |
893 | const u64 *val; | |
894 | ||
895 | val = mdesc_get_property(md, target, | |
896 | "cfg-handle", NULL); | |
897 | if (val && *val == cfg_handle) | |
898 | return 0; | |
899 | } | |
900 | return -ENODEV; | |
901 | } | |
902 | ||
903 | static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, | |
904 | u32 cfg_handle) | |
905 | { | |
906 | u64 arc, candidate, best_latency = ~(u64)0; | |
907 | ||
908 | candidate = MDESC_NODE_NULL; | |
909 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { | |
910 | u64 target = mdesc_arc_target(md, arc); | |
911 | const char *name = mdesc_node_name(md, target); | |
912 | const u64 *val; | |
913 | ||
914 | if (strcmp(name, "pio-latency-group")) | |
915 | continue; | |
916 | ||
917 | val = mdesc_get_property(md, target, "latency", NULL); | |
918 | if (!val) | |
919 | continue; | |
920 | ||
921 | if (*val < best_latency) { | |
922 | candidate = target; | |
923 | best_latency = *val; | |
924 | } | |
925 | } | |
926 | ||
927 | if (candidate == MDESC_NODE_NULL) | |
928 | return -ENODEV; | |
929 | ||
930 | return scan_pio_for_cfg_handle(md, candidate, cfg_handle); | |
931 | } | |
932 | ||
933 | int of_node_to_nid(struct device_node *dp) | |
934 | { | |
935 | const struct linux_prom64_registers *regs; | |
936 | struct mdesc_handle *md; | |
937 | u32 cfg_handle; | |
938 | int count, nid; | |
939 | u64 grp; | |
940 | ||
072bd413 DM |
941 | /* This is the right thing to do on currently supported |
942 | * SUN4U NUMA platforms as well, as the PCI controller does | |
943 | * not sit behind any particular memory controller. | |
944 | */ | |
919ee677 DM |
945 | if (!mlgroups) |
946 | return -1; | |
947 | ||
948 | regs = of_get_property(dp, "reg", NULL); | |
949 | if (!regs) | |
950 | return -1; | |
951 | ||
952 | cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; | |
953 | ||
954 | md = mdesc_grab(); | |
955 | ||
956 | count = 0; | |
957 | nid = -1; | |
958 | mdesc_for_each_node_by_name(md, grp, "group") { | |
959 | if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { | |
960 | nid = count; | |
961 | break; | |
962 | } | |
963 | count++; | |
964 | } | |
965 | ||
966 | mdesc_release(md); | |
967 | ||
968 | return nid; | |
969 | } | |
970 | ||
01c45381 | 971 | static void __init add_node_ranges(void) |
919ee677 | 972 | { |
08b84798 | 973 | struct memblock_region *reg; |
919ee677 | 974 | |
08b84798 BH |
975 | for_each_memblock(memory, reg) { |
976 | unsigned long size = reg->size; | |
919ee677 DM |
977 | unsigned long start, end; |
978 | ||
08b84798 | 979 | start = reg->base; |
919ee677 DM |
980 | end = start + size; |
981 | while (start < end) { | |
982 | unsigned long this_end; | |
983 | int nid; | |
984 | ||
35a1f0bd | 985 | this_end = memblock_nid_range(start, end, &nid); |
919ee677 | 986 | |
2a4814df | 987 | numadbg("Setting memblock NUMA node nid[%d] " |
919ee677 DM |
988 | "start[%lx] end[%lx]\n", |
989 | nid, start, this_end); | |
990 | ||
2a4814df | 991 | memblock_set_node(start, this_end - start, nid); |
919ee677 DM |
992 | start = this_end; |
993 | } | |
994 | } | |
995 | } | |
996 | ||
997 | static int __init grab_mlgroups(struct mdesc_handle *md) | |
998 | { | |
999 | unsigned long paddr; | |
1000 | int count = 0; | |
1001 | u64 node; | |
1002 | ||
1003 | mdesc_for_each_node_by_name(md, node, "memory-latency-group") | |
1004 | count++; | |
1005 | if (!count) | |
1006 | return -ENOENT; | |
1007 | ||
95f72d1e | 1008 | paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), |
919ee677 DM |
1009 | SMP_CACHE_BYTES); |
1010 | if (!paddr) | |
1011 | return -ENOMEM; | |
1012 | ||
1013 | mlgroups = __va(paddr); | |
1014 | num_mlgroups = count; | |
1015 | ||
1016 | count = 0; | |
1017 | mdesc_for_each_node_by_name(md, node, "memory-latency-group") { | |
1018 | struct mdesc_mlgroup *m = &mlgroups[count++]; | |
1019 | const u64 *val; | |
1020 | ||
1021 | m->node = node; | |
1022 | ||
1023 | val = mdesc_get_property(md, node, "latency", NULL); | |
1024 | m->latency = *val; | |
1025 | val = mdesc_get_property(md, node, "address-match", NULL); | |
1026 | m->match = *val; | |
1027 | val = mdesc_get_property(md, node, "address-mask", NULL); | |
1028 | m->mask = *val; | |
1029 | ||
90181136 SR |
1030 | numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " |
1031 | "match[%llx] mask[%llx]\n", | |
919ee677 DM |
1032 | count - 1, m->node, m->latency, m->match, m->mask); |
1033 | } | |
1034 | ||
1035 | return 0; | |
1036 | } | |
1037 | ||
1038 | static int __init grab_mblocks(struct mdesc_handle *md) | |
1039 | { | |
1040 | unsigned long paddr; | |
1041 | int count = 0; | |
1042 | u64 node; | |
1043 | ||
1044 | mdesc_for_each_node_by_name(md, node, "mblock") | |
1045 | count++; | |
1046 | if (!count) | |
1047 | return -ENOENT; | |
1048 | ||
95f72d1e | 1049 | paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), |
919ee677 DM |
1050 | SMP_CACHE_BYTES); |
1051 | if (!paddr) | |
1052 | return -ENOMEM; | |
1053 | ||
1054 | mblocks = __va(paddr); | |
1055 | num_mblocks = count; | |
1056 | ||
1057 | count = 0; | |
1058 | mdesc_for_each_node_by_name(md, node, "mblock") { | |
1059 | struct mdesc_mblock *m = &mblocks[count++]; | |
1060 | const u64 *val; | |
1061 | ||
1062 | val = mdesc_get_property(md, node, "base", NULL); | |
1063 | m->base = *val; | |
1064 | val = mdesc_get_property(md, node, "size", NULL); | |
1065 | m->size = *val; | |
1066 | val = mdesc_get_property(md, node, | |
1067 | "address-congruence-offset", NULL); | |
1068 | m->offset = *val; | |
1069 | ||
90181136 | 1070 | numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", |
919ee677 DM |
1071 | count - 1, m->base, m->size, m->offset); |
1072 | } | |
1073 | ||
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, | |
1078 | u64 grp, cpumask_t *mask) | |
1079 | { | |
1080 | u64 arc; | |
1081 | ||
fb1fece5 | 1082 | cpumask_clear(mask); |
919ee677 DM |
1083 | |
1084 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { | |
1085 | u64 target = mdesc_arc_target(md, arc); | |
1086 | const char *name = mdesc_node_name(md, target); | |
1087 | const u64 *id; | |
1088 | ||
1089 | if (strcmp(name, "cpu")) | |
1090 | continue; | |
1091 | id = mdesc_get_property(md, target, "id", NULL); | |
e305cb8f | 1092 | if (*id < nr_cpu_ids) |
fb1fece5 | 1093 | cpumask_set_cpu(*id, mask); |
919ee677 DM |
1094 | } |
1095 | } | |
1096 | ||
1097 | static struct mdesc_mlgroup * __init find_mlgroup(u64 node) | |
1098 | { | |
1099 | int i; | |
1100 | ||
1101 | for (i = 0; i < num_mlgroups; i++) { | |
1102 | struct mdesc_mlgroup *m = &mlgroups[i]; | |
1103 | if (m->node == node) | |
1104 | return m; | |
1105 | } | |
1106 | return NULL; | |
1107 | } | |
1108 | ||
1109 | static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, | |
1110 | int index) | |
1111 | { | |
1112 | struct mdesc_mlgroup *candidate = NULL; | |
1113 | u64 arc, best_latency = ~(u64)0; | |
1114 | struct node_mem_mask *n; | |
1115 | ||
1116 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { | |
1117 | u64 target = mdesc_arc_target(md, arc); | |
1118 | struct mdesc_mlgroup *m = find_mlgroup(target); | |
1119 | if (!m) | |
1120 | continue; | |
1121 | if (m->latency < best_latency) { | |
1122 | candidate = m; | |
1123 | best_latency = m->latency; | |
1124 | } | |
1125 | } | |
1126 | if (!candidate) | |
1127 | return -ENOENT; | |
1128 | ||
1129 | if (num_node_masks != index) { | |
1130 | printk(KERN_ERR "Inconsistent NUMA state, " | |
1131 | "index[%d] != num_node_masks[%d]\n", | |
1132 | index, num_node_masks); | |
1133 | return -EINVAL; | |
1134 | } | |
1135 | ||
1136 | n = &node_masks[num_node_masks++]; | |
1137 | ||
1138 | n->mask = candidate->mask; | |
1139 | n->val = candidate->match; | |
1da177e4 | 1140 | |
90181136 | 1141 | numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", |
919ee677 | 1142 | index, n->mask, n->val, candidate->latency); |
1da177e4 | 1143 | |
919ee677 DM |
1144 | return 0; |
1145 | } | |
1146 | ||
1147 | static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, | |
1148 | int index) | |
1149 | { | |
1150 | cpumask_t mask; | |
1151 | int cpu; | |
1152 | ||
1153 | numa_parse_mdesc_group_cpus(md, grp, &mask); | |
1154 | ||
fb1fece5 | 1155 | for_each_cpu(cpu, &mask) |
919ee677 | 1156 | numa_cpu_lookup_table[cpu] = index; |
fb1fece5 | 1157 | cpumask_copy(&numa_cpumask_lookup_table[index], &mask); |
919ee677 DM |
1158 | |
1159 | if (numa_debug) { | |
1160 | printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); | |
fb1fece5 | 1161 | for_each_cpu(cpu, &mask) |
919ee677 DM |
1162 | printk("%d ", cpu); |
1163 | printk("]\n"); | |
1164 | } | |
1165 | ||
1166 | return numa_attach_mlgroup(md, grp, index); | |
1167 | } | |
1168 | ||
1169 | static int __init numa_parse_mdesc(void) | |
1170 | { | |
1171 | struct mdesc_handle *md = mdesc_grab(); | |
1172 | int i, err, count; | |
1173 | u64 node; | |
1174 | ||
1175 | node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); | |
1176 | if (node == MDESC_NODE_NULL) { | |
1177 | mdesc_release(md); | |
1178 | return -ENOENT; | |
1179 | } | |
1180 | ||
1181 | err = grab_mblocks(md); | |
1182 | if (err < 0) | |
1183 | goto out; | |
1184 | ||
1185 | err = grab_mlgroups(md); | |
1186 | if (err < 0) | |
1187 | goto out; | |
1188 | ||
1189 | count = 0; | |
1190 | mdesc_for_each_node_by_name(md, node, "group") { | |
1191 | err = numa_parse_mdesc_group(md, node, count); | |
1192 | if (err < 0) | |
1193 | break; | |
1194 | count++; | |
1195 | } | |
1196 | ||
1197 | add_node_ranges(); | |
1198 | ||
1199 | for (i = 0; i < num_node_masks; i++) { | |
1200 | allocate_node_data(i); | |
1201 | node_set_online(i); | |
1202 | } | |
1203 | ||
1204 | err = 0; | |
1205 | out: | |
1206 | mdesc_release(md); | |
1207 | return err; | |
1208 | } | |
1209 | ||
072bd413 DM |
1210 | static int __init numa_parse_jbus(void) |
1211 | { | |
1212 | unsigned long cpu, index; | |
1213 | ||
1214 | /* NUMA node id is encoded in bits 36 and higher, and there is | |
1215 | * a 1-to-1 mapping from CPU ID to NUMA node ID. | |
1216 | */ | |
1217 | index = 0; | |
1218 | for_each_present_cpu(cpu) { | |
1219 | numa_cpu_lookup_table[cpu] = index; | |
fb1fece5 | 1220 | cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); |
072bd413 DM |
1221 | node_masks[index].mask = ~((1UL << 36UL) - 1UL); |
1222 | node_masks[index].val = cpu << 36UL; | |
1223 | ||
1224 | index++; | |
1225 | } | |
1226 | num_node_masks = index; | |
1227 | ||
1228 | add_node_ranges(); | |
1229 | ||
1230 | for (index = 0; index < num_node_masks; index++) { | |
1231 | allocate_node_data(index); | |
1232 | node_set_online(index); | |
1233 | } | |
1234 | ||
1235 | return 0; | |
1236 | } | |
1237 | ||
919ee677 DM |
1238 | static int __init numa_parse_sun4u(void) |
1239 | { | |
072bd413 DM |
1240 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1241 | unsigned long ver; | |
1242 | ||
1243 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | |
1244 | if ((ver >> 32UL) == __JALAPENO_ID || | |
1245 | (ver >> 32UL) == __SERRANO_ID) | |
1246 | return numa_parse_jbus(); | |
1247 | } | |
919ee677 DM |
1248 | return -1; |
1249 | } | |
1250 | ||
1251 | static int __init bootmem_init_numa(void) | |
1252 | { | |
1253 | int err = -1; | |
1254 | ||
1255 | numadbg("bootmem_init_numa()\n"); | |
1256 | ||
1257 | if (numa_enabled) { | |
1258 | if (tlb_type == hypervisor) | |
1259 | err = numa_parse_mdesc(); | |
1260 | else | |
1261 | err = numa_parse_sun4u(); | |
1262 | } | |
1263 | return err; | |
1264 | } | |
1265 | ||
1266 | #else | |
1da177e4 | 1267 | |
919ee677 DM |
1268 | static int bootmem_init_numa(void) |
1269 | { | |
1270 | return -1; | |
1271 | } | |
1272 | ||
1273 | #endif | |
1274 | ||
1275 | static void __init bootmem_init_nonnuma(void) | |
1276 | { | |
95f72d1e YL |
1277 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
1278 | unsigned long total_ram = memblock_phys_mem_size(); | |
919ee677 DM |
1279 | |
1280 | numadbg("bootmem_init_nonnuma()\n"); | |
1281 | ||
1282 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | |
1283 | top_of_ram, total_ram); | |
1284 | printk(KERN_INFO "Memory hole size: %ldMB\n", | |
1285 | (top_of_ram - total_ram) >> 20); | |
1286 | ||
1287 | init_node_masks_nonnuma(); | |
2a4814df | 1288 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); |
919ee677 | 1289 | allocate_node_data(0); |
919ee677 DM |
1290 | node_set_online(0); |
1291 | } | |
1292 | ||
919ee677 DM |
1293 | static unsigned long __init bootmem_init(unsigned long phys_base) |
1294 | { | |
1295 | unsigned long end_pfn; | |
919ee677 | 1296 | |
95f72d1e | 1297 | end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
919ee677 DM |
1298 | max_pfn = max_low_pfn = end_pfn; |
1299 | min_low_pfn = (phys_base >> PAGE_SHIFT); | |
1300 | ||
1301 | if (bootmem_init_numa() < 0) | |
1302 | bootmem_init_nonnuma(); | |
1303 | ||
625d693e DM |
1304 | /* Dump memblock with node info. */ |
1305 | memblock_dump_all(); | |
919ee677 | 1306 | |
625d693e | 1307 | /* XXX cpu notifier XXX */ |
d1112018 | 1308 | |
625d693e | 1309 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
d1112018 DM |
1310 | sparse_init(); |
1311 | ||
1da177e4 LT |
1312 | return end_pfn; |
1313 | } | |
1314 | ||
9cc3a1ac DM |
1315 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; |
1316 | static int pall_ents __initdata; | |
1317 | ||
56425306 | 1318 | #ifdef CONFIG_DEBUG_PAGEALLOC |
896aef43 SR |
1319 | static unsigned long __ref kernel_map_range(unsigned long pstart, |
1320 | unsigned long pend, pgprot_t prot) | |
56425306 DM |
1321 | { |
1322 | unsigned long vstart = PAGE_OFFSET + pstart; | |
1323 | unsigned long vend = PAGE_OFFSET + pend; | |
1324 | unsigned long alloc_bytes = 0UL; | |
1325 | ||
1326 | if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { | |
13edad7a | 1327 | prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", |
56425306 DM |
1328 | vstart, vend); |
1329 | prom_halt(); | |
1330 | } | |
1331 | ||
1332 | while (vstart < vend) { | |
1333 | unsigned long this_end, paddr = __pa(vstart); | |
1334 | pgd_t *pgd = pgd_offset_k(vstart); | |
1335 | pud_t *pud; | |
1336 | pmd_t *pmd; | |
1337 | pte_t *pte; | |
1338 | ||
1339 | pud = pud_offset(pgd, vstart); | |
1340 | if (pud_none(*pud)) { | |
1341 | pmd_t *new; | |
1342 | ||
1343 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1344 | alloc_bytes += PAGE_SIZE; | |
1345 | pud_populate(&init_mm, pud, new); | |
1346 | } | |
1347 | ||
1348 | pmd = pmd_offset(pud, vstart); | |
1349 | if (!pmd_present(*pmd)) { | |
1350 | pte_t *new; | |
1351 | ||
1352 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1353 | alloc_bytes += PAGE_SIZE; | |
1354 | pmd_populate_kernel(&init_mm, pmd, new); | |
1355 | } | |
1356 | ||
1357 | pte = pte_offset_kernel(pmd, vstart); | |
1358 | this_end = (vstart + PMD_SIZE) & PMD_MASK; | |
1359 | if (this_end > vend) | |
1360 | this_end = vend; | |
1361 | ||
1362 | while (vstart < this_end) { | |
1363 | pte_val(*pte) = (paddr | pgprot_val(prot)); | |
1364 | ||
1365 | vstart += PAGE_SIZE; | |
1366 | paddr += PAGE_SIZE; | |
1367 | pte++; | |
1368 | } | |
1369 | } | |
1370 | ||
1371 | return alloc_bytes; | |
1372 | } | |
1373 | ||
56425306 | 1374 | extern unsigned int kvmap_linear_patch[1]; |
9cc3a1ac DM |
1375 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1376 | ||
4f93d21d | 1377 | static void __init kpte_set_val(unsigned long index, unsigned long val) |
9cc3a1ac | 1378 | { |
4f93d21d | 1379 | unsigned long *ptr = kpte_linear_bitmap; |
9cc3a1ac | 1380 | |
4f93d21d DM |
1381 | val <<= ((index % (BITS_PER_LONG / 2)) * 2); |
1382 | ptr += (index / (BITS_PER_LONG / 2)); | |
9cc3a1ac | 1383 | |
4f93d21d DM |
1384 | *ptr |= val; |
1385 | } | |
f7c00338 | 1386 | |
4f93d21d DM |
1387 | static const unsigned long kpte_shift_min = 28; /* 256MB */ |
1388 | static const unsigned long kpte_shift_max = 34; /* 16GB */ | |
1389 | static const unsigned long kpte_shift_incr = 3; | |
9cc3a1ac | 1390 | |
4f93d21d DM |
1391 | static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end, |
1392 | unsigned long shift) | |
1393 | { | |
1394 | unsigned long size = (1UL << shift); | |
1395 | unsigned long mask = (size - 1UL); | |
1396 | unsigned long remains = end - start; | |
1397 | unsigned long val; | |
9cc3a1ac | 1398 | |
4f93d21d DM |
1399 | if (remains < size || (start & mask)) |
1400 | return start; | |
9cc3a1ac | 1401 | |
4f93d21d DM |
1402 | /* VAL maps: |
1403 | * | |
1404 | * shift 28 --> kern_linear_pte_xor index 1 | |
1405 | * shift 31 --> kern_linear_pte_xor index 2 | |
1406 | * shift 34 --> kern_linear_pte_xor index 3 | |
1407 | */ | |
1408 | val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1; | |
1409 | ||
1410 | remains &= ~mask; | |
1411 | if (shift != kpte_shift_max) | |
1412 | remains = size; | |
1413 | ||
1414 | while (remains) { | |
1415 | unsigned long index = start >> kpte_shift_min; | |
1416 | ||
1417 | kpte_set_val(index, val); | |
1418 | ||
1419 | start += 1UL << kpte_shift_min; | |
1420 | remains -= 1UL << kpte_shift_min; | |
1421 | } | |
1422 | ||
1423 | return start; | |
1424 | } | |
1425 | ||
1426 | static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) | |
1427 | { | |
1428 | unsigned long smallest_size, smallest_mask; | |
1429 | unsigned long s; | |
1430 | ||
1431 | smallest_size = (1UL << kpte_shift_min); | |
1432 | smallest_mask = (smallest_size - 1UL); | |
1433 | ||
1434 | while (start < end) { | |
1435 | unsigned long orig_start = start; | |
1436 | ||
1437 | for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) { | |
1438 | start = kpte_mark_using_shift(start, end, s); | |
1439 | ||
1440 | if (start != orig_start) | |
1441 | break; | |
9cc3a1ac | 1442 | } |
4f93d21d DM |
1443 | |
1444 | if (start == orig_start) | |
1445 | start = (start + smallest_size) & ~smallest_mask; | |
9cc3a1ac DM |
1446 | } |
1447 | } | |
56425306 | 1448 | |
8f361453 | 1449 | static void __init init_kpte_bitmap(void) |
56425306 | 1450 | { |
9cc3a1ac | 1451 | unsigned long i; |
13edad7a DM |
1452 | |
1453 | for (i = 0; i < pall_ents; i++) { | |
56425306 DM |
1454 | unsigned long phys_start, phys_end; |
1455 | ||
13edad7a DM |
1456 | phys_start = pall[i].phys_addr; |
1457 | phys_end = phys_start + pall[i].reg_size; | |
9cc3a1ac DM |
1458 | |
1459 | mark_kpte_bitmap(phys_start, phys_end); | |
8f361453 DM |
1460 | } |
1461 | } | |
9cc3a1ac | 1462 | |
8f361453 DM |
1463 | static void __init kernel_physical_mapping_init(void) |
1464 | { | |
9cc3a1ac | 1465 | #ifdef CONFIG_DEBUG_PAGEALLOC |
8f361453 DM |
1466 | unsigned long i, mem_alloced = 0UL; |
1467 | ||
1468 | for (i = 0; i < pall_ents; i++) { | |
1469 | unsigned long phys_start, phys_end; | |
1470 | ||
1471 | phys_start = pall[i].phys_addr; | |
1472 | phys_end = phys_start + pall[i].reg_size; | |
1473 | ||
56425306 DM |
1474 | mem_alloced += kernel_map_range(phys_start, phys_end, |
1475 | PAGE_KERNEL); | |
56425306 DM |
1476 | } |
1477 | ||
1478 | printk("Allocated %ld bytes for kernel page tables.\n", | |
1479 | mem_alloced); | |
1480 | ||
1481 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | |
1482 | flushi(&kvmap_linear_patch[0]); | |
1483 | ||
1484 | __flush_tlb_all(); | |
9cc3a1ac | 1485 | #endif |
56425306 DM |
1486 | } |
1487 | ||
9cc3a1ac | 1488 | #ifdef CONFIG_DEBUG_PAGEALLOC |
56425306 DM |
1489 | void kernel_map_pages(struct page *page, int numpages, int enable) |
1490 | { | |
1491 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | |
1492 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | |
1493 | ||
1494 | kernel_map_range(phys_start, phys_end, | |
1495 | (enable ? PAGE_KERNEL : __pgprot(0))); | |
1496 | ||
74bf4312 DM |
1497 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, |
1498 | PAGE_OFFSET + phys_end); | |
1499 | ||
56425306 DM |
1500 | /* we should perform an IPI and flush all tlbs, |
1501 | * but that can deadlock->flush only current cpu. | |
1502 | */ | |
1503 | __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, | |
1504 | PAGE_OFFSET + phys_end); | |
1505 | } | |
1506 | #endif | |
1507 | ||
10147570 DM |
1508 | unsigned long __init find_ecache_flush_span(unsigned long size) |
1509 | { | |
0836a0eb DM |
1510 | int i; |
1511 | ||
13edad7a DM |
1512 | for (i = 0; i < pavail_ents; i++) { |
1513 | if (pavail[i].reg_size >= size) | |
1514 | return pavail[i].phys_addr; | |
0836a0eb DM |
1515 | } |
1516 | ||
13edad7a | 1517 | return ~0UL; |
0836a0eb DM |
1518 | } |
1519 | ||
517af332 DM |
1520 | static void __init tsb_phys_patch(void) |
1521 | { | |
d257d5da | 1522 | struct tsb_ldquad_phys_patch_entry *pquad; |
517af332 DM |
1523 | struct tsb_phys_patch_entry *p; |
1524 | ||
d257d5da DM |
1525 | pquad = &__tsb_ldquad_phys_patch; |
1526 | while (pquad < &__tsb_ldquad_phys_patch_end) { | |
1527 | unsigned long addr = pquad->addr; | |
1528 | ||
1529 | if (tlb_type == hypervisor) | |
1530 | *(unsigned int *) addr = pquad->sun4v_insn; | |
1531 | else | |
1532 | *(unsigned int *) addr = pquad->sun4u_insn; | |
1533 | wmb(); | |
1534 | __asm__ __volatile__("flush %0" | |
1535 | : /* no outputs */ | |
1536 | : "r" (addr)); | |
1537 | ||
1538 | pquad++; | |
1539 | } | |
1540 | ||
517af332 DM |
1541 | p = &__tsb_phys_patch; |
1542 | while (p < &__tsb_phys_patch_end) { | |
1543 | unsigned long addr = p->addr; | |
1544 | ||
1545 | *(unsigned int *) addr = p->insn; | |
1546 | wmb(); | |
1547 | __asm__ __volatile__("flush %0" | |
1548 | : /* no outputs */ | |
1549 | : "r" (addr)); | |
1550 | ||
1551 | p++; | |
1552 | } | |
1553 | } | |
1554 | ||
490384e7 | 1555 | /* Don't mark as init, we give this to the Hypervisor. */ |
d1acb421 DM |
1556 | #ifndef CONFIG_DEBUG_PAGEALLOC |
1557 | #define NUM_KTSB_DESCR 2 | |
1558 | #else | |
1559 | #define NUM_KTSB_DESCR 1 | |
1560 | #endif | |
1561 | static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; | |
490384e7 DM |
1562 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
1563 | ||
9076d0e7 DM |
1564 | static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) |
1565 | { | |
1566 | pa >>= KTSB_PHYS_SHIFT; | |
1567 | ||
1568 | while (start < end) { | |
1569 | unsigned int *ia = (unsigned int *)(unsigned long)*start; | |
1570 | ||
1571 | ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); | |
1572 | __asm__ __volatile__("flush %0" : : "r" (ia)); | |
1573 | ||
1574 | ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); | |
1575 | __asm__ __volatile__("flush %0" : : "r" (ia + 1)); | |
1576 | ||
1577 | start++; | |
1578 | } | |
1579 | } | |
1580 | ||
1581 | static void ktsb_phys_patch(void) | |
1582 | { | |
1583 | extern unsigned int __swapper_tsb_phys_patch; | |
1584 | extern unsigned int __swapper_tsb_phys_patch_end; | |
9076d0e7 DM |
1585 | unsigned long ktsb_pa; |
1586 | ||
1587 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); | |
1588 | patch_one_ktsb_phys(&__swapper_tsb_phys_patch, | |
1589 | &__swapper_tsb_phys_patch_end, ktsb_pa); | |
1590 | #ifndef CONFIG_DEBUG_PAGEALLOC | |
0785a8e8 DM |
1591 | { |
1592 | extern unsigned int __swapper_4m_tsb_phys_patch; | |
1593 | extern unsigned int __swapper_4m_tsb_phys_patch_end; | |
9076d0e7 DM |
1594 | ktsb_pa = (kern_base + |
1595 | ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); | |
1596 | patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, | |
1597 | &__swapper_4m_tsb_phys_patch_end, ktsb_pa); | |
0785a8e8 | 1598 | } |
9076d0e7 DM |
1599 | #endif |
1600 | } | |
1601 | ||
490384e7 DM |
1602 | static void __init sun4v_ktsb_init(void) |
1603 | { | |
1604 | unsigned long ktsb_pa; | |
1605 | ||
d7744a09 | 1606 | /* First KTSB for PAGE_SIZE mappings. */ |
490384e7 DM |
1607 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); |
1608 | ||
1609 | switch (PAGE_SIZE) { | |
1610 | case 8 * 1024: | |
1611 | default: | |
1612 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; | |
1613 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; | |
1614 | break; | |
1615 | ||
1616 | case 64 * 1024: | |
1617 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; | |
1618 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; | |
1619 | break; | |
1620 | ||
1621 | case 512 * 1024: | |
1622 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; | |
1623 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; | |
1624 | break; | |
1625 | ||
1626 | case 4 * 1024 * 1024: | |
1627 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; | |
1628 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; | |
1629 | break; | |
6cb79b3f | 1630 | } |
490384e7 | 1631 | |
3f19a84e | 1632 | ktsb_descr[0].assoc = 1; |
490384e7 DM |
1633 | ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; |
1634 | ktsb_descr[0].ctx_idx = 0; | |
1635 | ktsb_descr[0].tsb_base = ktsb_pa; | |
1636 | ktsb_descr[0].resv = 0; | |
1637 | ||
d1acb421 | 1638 | #ifndef CONFIG_DEBUG_PAGEALLOC |
4f93d21d | 1639 | /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */ |
d7744a09 DM |
1640 | ktsb_pa = (kern_base + |
1641 | ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); | |
1642 | ||
1643 | ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; | |
1644 | ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | | |
1645 | HV_PGSZ_MASK_256MB); | |
4f93d21d DM |
1646 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA4) |
1647 | ktsb_descr[1].pgsz_mask |= HV_PGSZ_MASK_2GB; | |
d7744a09 DM |
1648 | ktsb_descr[1].assoc = 1; |
1649 | ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; | |
1650 | ktsb_descr[1].ctx_idx = 0; | |
1651 | ktsb_descr[1].tsb_base = ktsb_pa; | |
1652 | ktsb_descr[1].resv = 0; | |
d1acb421 | 1653 | #endif |
490384e7 DM |
1654 | } |
1655 | ||
1656 | void __cpuinit sun4v_ktsb_register(void) | |
1657 | { | |
7db35f31 | 1658 | unsigned long pa, ret; |
490384e7 DM |
1659 | |
1660 | pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); | |
1661 | ||
7db35f31 DM |
1662 | ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); |
1663 | if (ret != 0) { | |
1664 | prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " | |
1665 | "errors with %lx\n", pa, ret); | |
1666 | prom_halt(); | |
1667 | } | |
490384e7 DM |
1668 | } |
1669 | ||
1da177e4 LT |
1670 | /* paging_init() sets up the page tables */ |
1671 | ||
1da177e4 | 1672 | static unsigned long last_valid_pfn; |
56425306 | 1673 | pgd_t swapper_pg_dir[2048]; |
1da177e4 | 1674 | |
c4bce90e DM |
1675 | static void sun4u_pgprot_init(void); |
1676 | static void sun4v_pgprot_init(void); | |
1677 | ||
1da177e4 LT |
1678 | void __init paging_init(void) |
1679 | { | |
919ee677 | 1680 | unsigned long end_pfn, shift, phys_base; |
0836a0eb | 1681 | unsigned long real_end, i; |
aa6f0790 | 1682 | int node; |
0836a0eb | 1683 | |
22adb358 DM |
1684 | /* These build time checkes make sure that the dcache_dirty_cpu() |
1685 | * page->flags usage will work. | |
1686 | * | |
1687 | * When a page gets marked as dcache-dirty, we store the | |
1688 | * cpu number starting at bit 32 in the page->flags. Also, | |
1689 | * functions like clear_dcache_dirty_cpu use the cpu mask | |
1690 | * in 13-bit signed-immediate instruction fields. | |
1691 | */ | |
9223b419 CL |
1692 | |
1693 | /* | |
1694 | * Page flags must not reach into upper 32 bits that are used | |
1695 | * for the cpu number | |
1696 | */ | |
1697 | BUILD_BUG_ON(NR_PAGEFLAGS > 32); | |
1698 | ||
1699 | /* | |
1700 | * The bit fields placed in the high range must not reach below | |
1701 | * the 32 bit boundary. Otherwise we cannot place the cpu field | |
1702 | * at the 32 bit boundary. | |
1703 | */ | |
22adb358 | 1704 | BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + |
9223b419 CL |
1705 | ilog2(roundup_pow_of_two(NR_CPUS)) > 32); |
1706 | ||
22adb358 DM |
1707 | BUILD_BUG_ON(NR_CPUS > 4096); |
1708 | ||
481295f9 DM |
1709 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
1710 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | |
1711 | ||
d7744a09 | 1712 | /* Invalidate both kernel TSBs. */ |
8b234274 | 1713 | memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); |
d1acb421 | 1714 | #ifndef CONFIG_DEBUG_PAGEALLOC |
d7744a09 | 1715 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); |
d1acb421 | 1716 | #endif |
8b234274 | 1717 | |
c4bce90e DM |
1718 | if (tlb_type == hypervisor) |
1719 | sun4v_pgprot_init(); | |
1720 | else | |
1721 | sun4u_pgprot_init(); | |
1722 | ||
d257d5da | 1723 | if (tlb_type == cheetah_plus || |
9076d0e7 | 1724 | tlb_type == hypervisor) { |
517af332 | 1725 | tsb_phys_patch(); |
9076d0e7 DM |
1726 | ktsb_phys_patch(); |
1727 | } | |
517af332 | 1728 | |
490384e7 | 1729 | if (tlb_type == hypervisor) { |
d257d5da | 1730 | sun4v_patch_tlb_handlers(); |
490384e7 DM |
1731 | sun4v_ktsb_init(); |
1732 | } | |
d257d5da | 1733 | |
a94a172d DM |
1734 | /* Find available physical memory... |
1735 | * | |
1736 | * Read it twice in order to work around a bug in openfirmware. | |
1737 | * The call to grab this table itself can cause openfirmware to | |
1738 | * allocate memory, which in turn can take away some space from | |
1739 | * the list of available memory. Reading it twice makes sure | |
1740 | * we really do get the final value. | |
1741 | */ | |
1742 | read_obp_translations(); | |
1743 | read_obp_memory("reg", &pall[0], &pall_ents); | |
1744 | read_obp_memory("available", &pavail[0], &pavail_ents); | |
13edad7a | 1745 | read_obp_memory("available", &pavail[0], &pavail_ents); |
0836a0eb DM |
1746 | |
1747 | phys_base = 0xffffffffffffffffUL; | |
3b2a7e23 | 1748 | for (i = 0; i < pavail_ents; i++) { |
13edad7a | 1749 | phys_base = min(phys_base, pavail[i].phys_addr); |
95f72d1e | 1750 | memblock_add(pavail[i].phys_addr, pavail[i].reg_size); |
3b2a7e23 DM |
1751 | } |
1752 | ||
95f72d1e | 1753 | memblock_reserve(kern_base, kern_size); |
0836a0eb | 1754 | |
4e82c9a6 DM |
1755 | find_ramdisk(phys_base); |
1756 | ||
95f72d1e | 1757 | memblock_enforce_memory_limit(cmdline_memory_size); |
25b0c659 | 1758 | |
1aadc056 | 1759 | memblock_allow_resize(); |
95f72d1e | 1760 | memblock_dump_all(); |
3b2a7e23 | 1761 | |
1da177e4 LT |
1762 | set_bit(0, mmu_context_bmap); |
1763 | ||
2bdb3cb2 DM |
1764 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
1765 | ||
1da177e4 | 1766 | real_end = (unsigned long)_end; |
64658743 DM |
1767 | num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); |
1768 | printk("Kernel: Using %d locked TLB entries for main kernel image.\n", | |
1769 | num_kernel_image_mappings); | |
2bdb3cb2 DM |
1770 | |
1771 | /* Set kernel pgd to upper alias so physical page computations | |
1da177e4 LT |
1772 | * work. |
1773 | */ | |
1774 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); | |
1775 | ||
56425306 | 1776 | memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); |
1da177e4 LT |
1777 | |
1778 | /* Now can init the kernel/bad page tables. */ | |
1779 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | |
56425306 | 1780 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
1da177e4 | 1781 | |
c9c10830 | 1782 | inherit_prom_mappings(); |
5085b4a5 | 1783 | |
8f361453 DM |
1784 | init_kpte_bitmap(); |
1785 | ||
a8b900d8 DM |
1786 | /* Ok, we can use our TLB miss and window trap handlers safely. */ |
1787 | setup_tba(); | |
1da177e4 | 1788 | |
c9c10830 | 1789 | __flush_tlb_all(); |
9ad98c5b | 1790 | |
490384e7 DM |
1791 | if (tlb_type == hypervisor) |
1792 | sun4v_ktsb_register(); | |
1793 | ||
ad072004 | 1794 | prom_build_devicetree(); |
b696fdc2 | 1795 | of_populate_present_mask(); |
b99c6ebe DM |
1796 | #ifndef CONFIG_SMP |
1797 | of_fill_in_cpu_data(); | |
1798 | #endif | |
ad072004 | 1799 | |
890db403 | 1800 | if (tlb_type == hypervisor) { |
4a283339 | 1801 | sun4v_mdesc_init(); |
6ac5c610 | 1802 | mdesc_populate_present_mask(cpu_all_mask); |
b99c6ebe DM |
1803 | #ifndef CONFIG_SMP |
1804 | mdesc_fill_in_cpu_data(cpu_all_mask); | |
1805 | #endif | |
890db403 | 1806 | } |
4a283339 | 1807 | |
5ed56f1a DM |
1808 | /* Setup bootmem... */ |
1809 | last_valid_pfn = end_pfn = bootmem_init(phys_base); | |
1810 | ||
4f70f7a9 DM |
1811 | /* Once the OF device tree and MDESC have been setup, we know |
1812 | * the list of possible cpus. Therefore we can allocate the | |
1813 | * IRQ stacks. | |
1814 | */ | |
1815 | for_each_possible_cpu(i) { | |
aa6f0790 | 1816 | node = cpu_to_node(i); |
5ed56f1a DM |
1817 | |
1818 | softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), | |
1819 | THREAD_SIZE, | |
1820 | THREAD_SIZE, 0); | |
1821 | hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), | |
1822 | THREAD_SIZE, | |
1823 | THREAD_SIZE, 0); | |
4f70f7a9 DM |
1824 | } |
1825 | ||
56425306 | 1826 | kernel_physical_mapping_init(); |
56425306 | 1827 | |
1da177e4 | 1828 | { |
919ee677 | 1829 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
1da177e4 | 1830 | |
919ee677 | 1831 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
1da177e4 | 1832 | |
919ee677 | 1833 | max_zone_pfns[ZONE_NORMAL] = end_pfn; |
1da177e4 | 1834 | |
919ee677 | 1835 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
1836 | } |
1837 | ||
3c62a2d3 | 1838 | printk("Booting Linux...\n"); |
1da177e4 LT |
1839 | } |
1840 | ||
9a2ed5cc | 1841 | int __devinit page_in_phys_avail(unsigned long paddr) |
919ee677 DM |
1842 | { |
1843 | int i; | |
1844 | ||
1845 | paddr &= PAGE_MASK; | |
1846 | ||
1847 | for (i = 0; i < pavail_ents; i++) { | |
1848 | unsigned long start, end; | |
1849 | ||
1850 | start = pavail[i].phys_addr; | |
1851 | end = start + pavail[i].reg_size; | |
1852 | ||
1853 | if (paddr >= start && paddr < end) | |
1854 | return 1; | |
1855 | } | |
1856 | if (paddr >= kern_base && paddr < (kern_base + kern_size)) | |
1857 | return 1; | |
1858 | #ifdef CONFIG_BLK_DEV_INITRD | |
1859 | if (paddr >= __pa(initrd_start) && | |
1860 | paddr < __pa(PAGE_ALIGN(initrd_end))) | |
1861 | return 1; | |
1862 | #endif | |
1863 | ||
1864 | return 0; | |
1865 | } | |
1866 | ||
1867 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | |
1868 | static int pavail_rescan_ents __initdata; | |
1869 | ||
1870 | /* Certain OBP calls, such as fetching "available" properties, can | |
1871 | * claim physical memory. So, along with initializing the valid | |
1872 | * address bitmap, what we do here is refetch the physical available | |
1873 | * memory list again, and make sure it provides at least as much | |
1874 | * memory as 'pavail' does. | |
1875 | */ | |
d8ed1d43 | 1876 | static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) |
1da177e4 | 1877 | { |
1da177e4 LT |
1878 | int i; |
1879 | ||
13edad7a | 1880 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); |
1da177e4 | 1881 | |
13edad7a | 1882 | for (i = 0; i < pavail_ents; i++) { |
1da177e4 LT |
1883 | unsigned long old_start, old_end; |
1884 | ||
13edad7a | 1885 | old_start = pavail[i].phys_addr; |
919ee677 | 1886 | old_end = old_start + pavail[i].reg_size; |
1da177e4 LT |
1887 | while (old_start < old_end) { |
1888 | int n; | |
1889 | ||
c2a5a46b | 1890 | for (n = 0; n < pavail_rescan_ents; n++) { |
1da177e4 LT |
1891 | unsigned long new_start, new_end; |
1892 | ||
13edad7a DM |
1893 | new_start = pavail_rescan[n].phys_addr; |
1894 | new_end = new_start + | |
1895 | pavail_rescan[n].reg_size; | |
1da177e4 LT |
1896 | |
1897 | if (new_start <= old_start && | |
1898 | new_end >= (old_start + PAGE_SIZE)) { | |
d8ed1d43 | 1899 | set_bit(old_start >> 22, bitmap); |
1da177e4 LT |
1900 | goto do_next_page; |
1901 | } | |
1902 | } | |
919ee677 DM |
1903 | |
1904 | prom_printf("mem_init: Lost memory in pavail\n"); | |
1905 | prom_printf("mem_init: OLD start[%lx] size[%lx]\n", | |
1906 | pavail[i].phys_addr, | |
1907 | pavail[i].reg_size); | |
1908 | prom_printf("mem_init: NEW start[%lx] size[%lx]\n", | |
1909 | pavail_rescan[i].phys_addr, | |
1910 | pavail_rescan[i].reg_size); | |
1911 | prom_printf("mem_init: Cannot continue, aborting.\n"); | |
1912 | prom_halt(); | |
1da177e4 LT |
1913 | |
1914 | do_next_page: | |
1915 | old_start += PAGE_SIZE; | |
1916 | } | |
1917 | } | |
1918 | } | |
1919 | ||
d8ed1d43 DM |
1920 | static void __init patch_tlb_miss_handler_bitmap(void) |
1921 | { | |
1922 | extern unsigned int valid_addr_bitmap_insn[]; | |
1923 | extern unsigned int valid_addr_bitmap_patch[]; | |
1924 | ||
1925 | valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; | |
1926 | mb(); | |
1927 | valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; | |
1928 | flushi(&valid_addr_bitmap_insn[0]); | |
1929 | } | |
1930 | ||
1da177e4 LT |
1931 | void __init mem_init(void) |
1932 | { | |
1933 | unsigned long codepages, datapages, initpages; | |
1934 | unsigned long addr, last; | |
1da177e4 LT |
1935 | |
1936 | addr = PAGE_OFFSET + kern_base; | |
1937 | last = PAGE_ALIGN(kern_size) + addr; | |
1938 | while (addr < last) { | |
1939 | set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); | |
1940 | addr += PAGE_SIZE; | |
1941 | } | |
1942 | ||
d8ed1d43 DM |
1943 | setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); |
1944 | patch_tlb_miss_handler_bitmap(); | |
1da177e4 | 1945 | |
1da177e4 LT |
1946 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
1947 | ||
919ee677 | 1948 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
d8ed1d43 DM |
1949 | { |
1950 | int i; | |
1951 | for_each_online_node(i) { | |
1952 | if (NODE_DATA(i)->node_spanned_pages != 0) { | |
1953 | totalram_pages += | |
1954 | free_all_bootmem_node(NODE_DATA(i)); | |
1955 | } | |
919ee677 | 1956 | } |
625d693e | 1957 | totalram_pages += free_low_memory_core_early(MAX_NUMNODES); |
919ee677 DM |
1958 | } |
1959 | #else | |
1960 | totalram_pages = free_all_bootmem(); | |
1961 | #endif | |
1962 | ||
f1cfdb55 DM |
1963 | /* We subtract one to account for the mem_map_zero page |
1964 | * allocated below. | |
1965 | */ | |
919ee677 DM |
1966 | totalram_pages -= 1; |
1967 | num_physpages = totalram_pages; | |
1da177e4 LT |
1968 | |
1969 | /* | |
1970 | * Set up the zero page, mark it reserved, so that page count | |
1971 | * is not manipulated when freeing the page from user ptes. | |
1972 | */ | |
1973 | mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); | |
1974 | if (mem_map_zero == NULL) { | |
1975 | prom_printf("paging_init: Cannot alloc zero page.\n"); | |
1976 | prom_halt(); | |
1977 | } | |
1978 | SetPageReserved(mem_map_zero); | |
1979 | ||
1980 | codepages = (((unsigned long) _etext) - ((unsigned long) _start)); | |
1981 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | |
1982 | datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); | |
1983 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | |
1984 | initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); | |
1985 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | |
1986 | ||
96177299 | 1987 | printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", |
1da177e4 LT |
1988 | nr_free_pages() << (PAGE_SHIFT-10), |
1989 | codepages << (PAGE_SHIFT-10), | |
1990 | datapages << (PAGE_SHIFT-10), | |
1991 | initpages << (PAGE_SHIFT-10), | |
1992 | PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); | |
1993 | ||
1994 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
1995 | cheetah_ecache_flush_init(); | |
1996 | } | |
1997 | ||
898cf0ec | 1998 | void free_initmem(void) |
1da177e4 LT |
1999 | { |
2000 | unsigned long addr, initend; | |
f2b60794 DM |
2001 | int do_free = 1; |
2002 | ||
2003 | /* If the physical memory maps were trimmed by kernel command | |
2004 | * line options, don't even try freeing this initmem stuff up. | |
2005 | * The kernel image could have been in the trimmed out region | |
2006 | * and if so the freeing below will free invalid page structs. | |
2007 | */ | |
2008 | if (cmdline_memory_size) | |
2009 | do_free = 0; | |
1da177e4 LT |
2010 | |
2011 | /* | |
2012 | * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. | |
2013 | */ | |
2014 | addr = PAGE_ALIGN((unsigned long)(__init_begin)); | |
2015 | initend = (unsigned long)(__init_end) & PAGE_MASK; | |
2016 | for (; addr < initend; addr += PAGE_SIZE) { | |
2017 | unsigned long page; | |
2018 | struct page *p; | |
2019 | ||
2020 | page = (addr + | |
2021 | ((unsigned long) __va(kern_base)) - | |
2022 | ((unsigned long) KERNBASE)); | |
c9cf5528 | 2023 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
1da177e4 | 2024 | |
f2b60794 DM |
2025 | if (do_free) { |
2026 | p = virt_to_page(page); | |
2027 | ||
2028 | ClearPageReserved(p); | |
2029 | init_page_count(p); | |
2030 | __free_page(p); | |
2031 | num_physpages++; | |
2032 | totalram_pages++; | |
2033 | } | |
1da177e4 LT |
2034 | } |
2035 | } | |
2036 | ||
2037 | #ifdef CONFIG_BLK_DEV_INITRD | |
2038 | void free_initrd_mem(unsigned long start, unsigned long end) | |
2039 | { | |
2040 | if (start < end) | |
2041 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
2042 | for (; start < end; start += PAGE_SIZE) { | |
2043 | struct page *p = virt_to_page(start); | |
2044 | ||
2045 | ClearPageReserved(p); | |
7835e98b | 2046 | init_page_count(p); |
1da177e4 LT |
2047 | __free_page(p); |
2048 | num_physpages++; | |
2049 | totalram_pages++; | |
2050 | } | |
2051 | } | |
2052 | #endif | |
c4bce90e | 2053 | |
c4bce90e DM |
2054 | #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) |
2055 | #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) | |
2056 | #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) | |
2057 | #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) | |
2058 | #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) | |
2059 | #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) | |
2060 | ||
2061 | pgprot_t PAGE_KERNEL __read_mostly; | |
2062 | EXPORT_SYMBOL(PAGE_KERNEL); | |
2063 | ||
2064 | pgprot_t PAGE_KERNEL_LOCKED __read_mostly; | |
2065 | pgprot_t PAGE_COPY __read_mostly; | |
0f15952a DM |
2066 | |
2067 | pgprot_t PAGE_SHARED __read_mostly; | |
2068 | EXPORT_SYMBOL(PAGE_SHARED); | |
2069 | ||
c4bce90e DM |
2070 | unsigned long pg_iobits __read_mostly; |
2071 | ||
2072 | unsigned long _PAGE_IE __read_mostly; | |
987c74fc | 2073 | EXPORT_SYMBOL(_PAGE_IE); |
b2bef442 | 2074 | |
c4bce90e | 2075 | unsigned long _PAGE_E __read_mostly; |
b2bef442 DM |
2076 | EXPORT_SYMBOL(_PAGE_E); |
2077 | ||
c4bce90e | 2078 | unsigned long _PAGE_CACHE __read_mostly; |
b2bef442 | 2079 | EXPORT_SYMBOL(_PAGE_CACHE); |
c4bce90e | 2080 | |
46644c24 | 2081 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
46644c24 DM |
2082 | unsigned long vmemmap_table[VMEMMAP_SIZE]; |
2083 | ||
2856cc2e DM |
2084 | static long __meminitdata addr_start, addr_end; |
2085 | static int __meminitdata node_start; | |
2086 | ||
46644c24 DM |
2087 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) |
2088 | { | |
2089 | unsigned long vstart = (unsigned long) start; | |
2090 | unsigned long vend = (unsigned long) (start + nr); | |
2091 | unsigned long phys_start = (vstart - VMEMMAP_BASE); | |
2092 | unsigned long phys_end = (vend - VMEMMAP_BASE); | |
2093 | unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; | |
2094 | unsigned long end = VMEMMAP_ALIGN(phys_end); | |
2095 | unsigned long pte_base; | |
2096 | ||
2097 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | | |
2098 | _PAGE_CP_4U | _PAGE_CV_4U | | |
2099 | _PAGE_P_4U | _PAGE_W_4U); | |
2100 | if (tlb_type == hypervisor) | |
2101 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | | |
2102 | _PAGE_CP_4V | _PAGE_CV_4V | | |
2103 | _PAGE_P_4V | _PAGE_W_4V); | |
2104 | ||
2105 | for (; addr < end; addr += VMEMMAP_CHUNK) { | |
2106 | unsigned long *vmem_pp = | |
2107 | vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); | |
2108 | void *block; | |
2109 | ||
2110 | if (!(*vmem_pp & _PAGE_VALID)) { | |
2111 | block = vmemmap_alloc_block(1UL << 22, node); | |
2112 | if (!block) | |
2113 | return -ENOMEM; | |
2114 | ||
2115 | *vmem_pp = pte_base | __pa(block); | |
2116 | ||
2856cc2e DM |
2117 | /* check to see if we have contiguous blocks */ |
2118 | if (addr_end != addr || node_start != node) { | |
2119 | if (addr_start) | |
2120 | printk(KERN_DEBUG " [%lx-%lx] on node %d\n", | |
2121 | addr_start, addr_end-1, node_start); | |
2122 | addr_start = addr; | |
2123 | node_start = node; | |
2124 | } | |
2125 | addr_end = addr + VMEMMAP_CHUNK; | |
46644c24 DM |
2126 | } |
2127 | } | |
2128 | return 0; | |
2129 | } | |
2856cc2e DM |
2130 | |
2131 | void __meminit vmemmap_populate_print_last(void) | |
2132 | { | |
2133 | if (addr_start) { | |
2134 | printk(KERN_DEBUG " [%lx-%lx] on node %d\n", | |
2135 | addr_start, addr_end-1, node_start); | |
2136 | addr_start = 0; | |
2137 | addr_end = 0; | |
2138 | node_start = 0; | |
2139 | } | |
2140 | } | |
46644c24 DM |
2141 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
2142 | ||
c4bce90e DM |
2143 | static void prot_init_common(unsigned long page_none, |
2144 | unsigned long page_shared, | |
2145 | unsigned long page_copy, | |
2146 | unsigned long page_readonly, | |
2147 | unsigned long page_exec_bit) | |
2148 | { | |
2149 | PAGE_COPY = __pgprot(page_copy); | |
0f15952a | 2150 | PAGE_SHARED = __pgprot(page_shared); |
c4bce90e DM |
2151 | |
2152 | protection_map[0x0] = __pgprot(page_none); | |
2153 | protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); | |
2154 | protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); | |
2155 | protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); | |
2156 | protection_map[0x4] = __pgprot(page_readonly); | |
2157 | protection_map[0x5] = __pgprot(page_readonly); | |
2158 | protection_map[0x6] = __pgprot(page_copy); | |
2159 | protection_map[0x7] = __pgprot(page_copy); | |
2160 | protection_map[0x8] = __pgprot(page_none); | |
2161 | protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); | |
2162 | protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); | |
2163 | protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); | |
2164 | protection_map[0xc] = __pgprot(page_readonly); | |
2165 | protection_map[0xd] = __pgprot(page_readonly); | |
2166 | protection_map[0xe] = __pgprot(page_shared); | |
2167 | protection_map[0xf] = __pgprot(page_shared); | |
2168 | } | |
2169 | ||
2170 | static void __init sun4u_pgprot_init(void) | |
2171 | { | |
2172 | unsigned long page_none, page_shared, page_copy, page_readonly; | |
2173 | unsigned long page_exec_bit; | |
4f93d21d | 2174 | int i; |
c4bce90e DM |
2175 | |
2176 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | |
2177 | _PAGE_CACHE_4U | _PAGE_P_4U | | |
2178 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | |
2179 | _PAGE_EXEC_4U); | |
2180 | PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | |
2181 | _PAGE_CACHE_4U | _PAGE_P_4U | | |
2182 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | |
2183 | _PAGE_EXEC_4U | _PAGE_L_4U); | |
c4bce90e DM |
2184 | |
2185 | _PAGE_IE = _PAGE_IE_4U; | |
2186 | _PAGE_E = _PAGE_E_4U; | |
2187 | _PAGE_CACHE = _PAGE_CACHE_4U; | |
2188 | ||
2189 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | | |
2190 | __ACCESS_BITS_4U | _PAGE_E_4U); | |
2191 | ||
d1acb421 DM |
2192 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2193 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ | |
af1ee569 | 2194 | 0xfffff80000000000UL; |
d1acb421 | 2195 | #else |
9cc3a1ac | 2196 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ |
af1ee569 | 2197 | 0xfffff80000000000UL; |
d1acb421 | 2198 | #endif |
9cc3a1ac DM |
2199 | kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | |
2200 | _PAGE_P_4U | _PAGE_W_4U); | |
2201 | ||
2202 | /* XXX Should use 256MB on Panther. XXX */ | |
4f93d21d DM |
2203 | for (i = 1; i < 4; i++) |
2204 | kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; | |
c4bce90e DM |
2205 | |
2206 | _PAGE_SZBITS = _PAGE_SZBITS_4U; | |
2207 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | | |
2208 | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | | |
2209 | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); | |
2210 | ||
2211 | ||
2212 | page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; | |
2213 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | |
2214 | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); | |
2215 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | |
2216 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | |
2217 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | |
2218 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | |
2219 | ||
2220 | page_exec_bit = _PAGE_EXEC_4U; | |
2221 | ||
2222 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | |
2223 | page_exec_bit); | |
2224 | } | |
2225 | ||
2226 | static void __init sun4v_pgprot_init(void) | |
2227 | { | |
2228 | unsigned long page_none, page_shared, page_copy, page_readonly; | |
2229 | unsigned long page_exec_bit; | |
4f93d21d | 2230 | int i; |
c4bce90e DM |
2231 | |
2232 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | | |
2233 | _PAGE_CACHE_4V | _PAGE_P_4V | | |
2234 | __ACCESS_BITS_4V | __DIRTY_BITS_4V | | |
2235 | _PAGE_EXEC_4V); | |
2236 | PAGE_KERNEL_LOCKED = PAGE_KERNEL; | |
c4bce90e DM |
2237 | |
2238 | _PAGE_IE = _PAGE_IE_4V; | |
2239 | _PAGE_E = _PAGE_E_4V; | |
2240 | _PAGE_CACHE = _PAGE_CACHE_4V; | |
2241 | ||
d1acb421 DM |
2242 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2243 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ | |
af1ee569 | 2244 | 0xfffff80000000000UL; |
d1acb421 | 2245 | #else |
9cc3a1ac | 2246 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ |
af1ee569 | 2247 | 0xfffff80000000000UL; |
d1acb421 | 2248 | #endif |
9cc3a1ac DM |
2249 | kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | |
2250 | _PAGE_P_4V | _PAGE_W_4V); | |
2251 | ||
d1acb421 DM |
2252 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2253 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ | |
af1ee569 | 2254 | 0xfffff80000000000UL; |
d1acb421 | 2255 | #else |
9cc3a1ac | 2256 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ |
af1ee569 | 2257 | 0xfffff80000000000UL; |
d1acb421 | 2258 | #endif |
9cc3a1ac DM |
2259 | kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | |
2260 | _PAGE_P_4V | _PAGE_W_4V); | |
c4bce90e | 2261 | |
4f93d21d DM |
2262 | i = 2; |
2263 | ||
2264 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA4) { | |
2265 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
2266 | kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ | |
2267 | 0xfffff80000000000UL; | |
2268 | #else | |
2269 | kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ | |
2270 | 0xfffff80000000000UL; | |
2271 | #endif | |
2272 | kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | | |
2273 | _PAGE_P_4V | _PAGE_W_4V); | |
2274 | ||
2275 | i = 3; | |
2276 | } | |
2277 | ||
2278 | for (; i < 4; i++) | |
2279 | kern_linear_pte_xor[i] = kern_linear_pte_xor[i - 1]; | |
2280 | ||
c4bce90e DM |
2281 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | |
2282 | __ACCESS_BITS_4V | _PAGE_E_4V); | |
2283 | ||
2284 | _PAGE_SZBITS = _PAGE_SZBITS_4V; | |
2285 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | | |
2286 | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | | |
2287 | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | | |
2288 | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); | |
2289 | ||
2290 | page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; | |
2291 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | |
2292 | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); | |
2293 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | |
2294 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | |
2295 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | |
2296 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | |
2297 | ||
2298 | page_exec_bit = _PAGE_EXEC_4V; | |
2299 | ||
2300 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | |
2301 | page_exec_bit); | |
2302 | } | |
2303 | ||
2304 | unsigned long pte_sz_bits(unsigned long sz) | |
2305 | { | |
2306 | if (tlb_type == hypervisor) { | |
2307 | switch (sz) { | |
2308 | case 8 * 1024: | |
2309 | default: | |
2310 | return _PAGE_SZ8K_4V; | |
2311 | case 64 * 1024: | |
2312 | return _PAGE_SZ64K_4V; | |
2313 | case 512 * 1024: | |
2314 | return _PAGE_SZ512K_4V; | |
2315 | case 4 * 1024 * 1024: | |
2316 | return _PAGE_SZ4MB_4V; | |
6cb79b3f | 2317 | } |
c4bce90e DM |
2318 | } else { |
2319 | switch (sz) { | |
2320 | case 8 * 1024: | |
2321 | default: | |
2322 | return _PAGE_SZ8K_4U; | |
2323 | case 64 * 1024: | |
2324 | return _PAGE_SZ64K_4U; | |
2325 | case 512 * 1024: | |
2326 | return _PAGE_SZ512K_4U; | |
2327 | case 4 * 1024 * 1024: | |
2328 | return _PAGE_SZ4MB_4U; | |
6cb79b3f | 2329 | } |
c4bce90e DM |
2330 | } |
2331 | } | |
2332 | ||
2333 | pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) | |
2334 | { | |
2335 | pte_t pte; | |
cf627156 DM |
2336 | |
2337 | pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); | |
c4bce90e DM |
2338 | pte_val(pte) |= (((unsigned long)space) << 32); |
2339 | pte_val(pte) |= pte_sz_bits(page_size); | |
c4bce90e | 2340 | |
cf627156 | 2341 | return pte; |
c4bce90e DM |
2342 | } |
2343 | ||
2344 | static unsigned long kern_large_tte(unsigned long paddr) | |
2345 | { | |
2346 | unsigned long val; | |
2347 | ||
2348 | val = (_PAGE_VALID | _PAGE_SZ4MB_4U | | |
2349 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | | |
2350 | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); | |
2351 | if (tlb_type == hypervisor) | |
2352 | val = (_PAGE_VALID | _PAGE_SZ4MB_4V | | |
2353 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | | |
2354 | _PAGE_EXEC_4V | _PAGE_W_4V); | |
2355 | ||
2356 | return val | paddr; | |
2357 | } | |
2358 | ||
c4bce90e DM |
2359 | /* If not locked, zap it. */ |
2360 | void __flush_tlb_all(void) | |
2361 | { | |
2362 | unsigned long pstate; | |
2363 | int i; | |
2364 | ||
2365 | __asm__ __volatile__("flushw\n\t" | |
2366 | "rdpr %%pstate, %0\n\t" | |
2367 | "wrpr %0, %1, %%pstate" | |
2368 | : "=r" (pstate) | |
2369 | : "i" (PSTATE_IE)); | |
8f361453 DM |
2370 | if (tlb_type == hypervisor) { |
2371 | sun4v_mmu_demap_all(); | |
2372 | } else if (tlb_type == spitfire) { | |
c4bce90e DM |
2373 | for (i = 0; i < 64; i++) { |
2374 | /* Spitfire Errata #32 workaround */ | |
2375 | /* NOTE: Always runs on spitfire, so no | |
2376 | * cheetah+ page size encodings. | |
2377 | */ | |
2378 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
2379 | "flush %%g6" | |
2380 | : /* No outputs */ | |
2381 | : "r" (0), | |
2382 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
2383 | ||
2384 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { | |
2385 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
2386 | "membar #Sync" | |
2387 | : /* no outputs */ | |
2388 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
2389 | spitfire_put_dtlb_data(i, 0x0UL); | |
2390 | } | |
2391 | ||
2392 | /* Spitfire Errata #32 workaround */ | |
2393 | /* NOTE: Always runs on spitfire, so no | |
2394 | * cheetah+ page size encodings. | |
2395 | */ | |
2396 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
2397 | "flush %%g6" | |
2398 | : /* No outputs */ | |
2399 | : "r" (0), | |
2400 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
2401 | ||
2402 | if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { | |
2403 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
2404 | "membar #Sync" | |
2405 | : /* no outputs */ | |
2406 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | |
2407 | spitfire_put_itlb_data(i, 0x0UL); | |
2408 | } | |
2409 | } | |
2410 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
2411 | cheetah_flush_dtlb_all(); | |
2412 | cheetah_flush_itlb_all(); | |
2413 | } | |
2414 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | |
2415 | : : "r" (pstate)); | |
2416 | } |