Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $ |
2 | * arch/sparc64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
6 | */ | |
7 | ||
8 | #include <linux/config.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/bootmem.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/hugetlb.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/initrd.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/pagemap.h> | |
20 | #include <linux/fs.h> | |
21 | #include <linux/seq_file.h> | |
05e14cb3 | 22 | #include <linux/kprobes.h> |
1ac4f5eb | 23 | #include <linux/cache.h> |
13edad7a | 24 | #include <linux/sort.h> |
1da177e4 LT |
25 | |
26 | #include <asm/head.h> | |
27 | #include <asm/system.h> | |
28 | #include <asm/page.h> | |
29 | #include <asm/pgalloc.h> | |
30 | #include <asm/pgtable.h> | |
31 | #include <asm/oplib.h> | |
32 | #include <asm/iommu.h> | |
33 | #include <asm/io.h> | |
34 | #include <asm/uaccess.h> | |
35 | #include <asm/mmu_context.h> | |
36 | #include <asm/tlbflush.h> | |
37 | #include <asm/dma.h> | |
38 | #include <asm/starfire.h> | |
39 | #include <asm/tlb.h> | |
40 | #include <asm/spitfire.h> | |
41 | #include <asm/sections.h> | |
42 | ||
43 | extern void device_scan(void); | |
44 | ||
13edad7a DM |
45 | #define MAX_BANKS 32 |
46 | ||
47 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; | |
48 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | |
49 | static int pavail_ents __initdata; | |
50 | static int pavail_rescan_ents __initdata; | |
51 | ||
52 | static int cmp_p64(const void *a, const void *b) | |
53 | { | |
54 | const struct linux_prom64_registers *x = a, *y = b; | |
55 | ||
56 | if (x->phys_addr > y->phys_addr) | |
57 | return 1; | |
58 | if (x->phys_addr < y->phys_addr) | |
59 | return -1; | |
60 | return 0; | |
61 | } | |
62 | ||
63 | static void __init read_obp_memory(const char *property, | |
64 | struct linux_prom64_registers *regs, | |
65 | int *num_ents) | |
66 | { | |
67 | int node = prom_finddevice("/memory"); | |
68 | int prop_size = prom_getproplen(node, property); | |
69 | int ents, ret, i; | |
70 | ||
71 | ents = prop_size / sizeof(struct linux_prom64_registers); | |
72 | if (ents > MAX_BANKS) { | |
73 | prom_printf("The machine has more %s property entries than " | |
74 | "this kernel can support (%d).\n", | |
75 | property, MAX_BANKS); | |
76 | prom_halt(); | |
77 | } | |
78 | ||
79 | ret = prom_getproperty(node, property, (char *) regs, prop_size); | |
80 | if (ret == -1) { | |
81 | prom_printf("Couldn't get %s property from /memory.\n"); | |
82 | prom_halt(); | |
83 | } | |
84 | ||
85 | *num_ents = ents; | |
10147570 | 86 | |
13edad7a DM |
87 | /* Sanitize what we got from the firmware, by page aligning |
88 | * everything. | |
89 | */ | |
90 | for (i = 0; i < ents; i++) { | |
91 | unsigned long base, size; | |
92 | ||
93 | base = regs[i].phys_addr; | |
94 | size = regs[i].reg_size; | |
10147570 | 95 | |
13edad7a DM |
96 | size &= PAGE_MASK; |
97 | if (base & ~PAGE_MASK) { | |
98 | unsigned long new_base = PAGE_ALIGN(base); | |
99 | ||
100 | size -= new_base - base; | |
101 | if ((long) size < 0L) | |
102 | size = 0UL; | |
103 | base = new_base; | |
104 | } | |
105 | regs[i].phys_addr = base; | |
106 | regs[i].reg_size = size; | |
107 | } | |
c9c10830 | 108 | sort(regs, ents, sizeof(struct linux_prom64_registers), |
13edad7a DM |
109 | cmp_p64, NULL); |
110 | } | |
1da177e4 | 111 | |
2bdb3cb2 | 112 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; |
1da177e4 LT |
113 | |
114 | /* Ugly, but necessary... -DaveM */ | |
1ac4f5eb DM |
115 | unsigned long phys_base __read_mostly; |
116 | unsigned long kern_base __read_mostly; | |
117 | unsigned long kern_size __read_mostly; | |
118 | unsigned long pfn_base __read_mostly; | |
1da177e4 | 119 | |
1da177e4 LT |
120 | /* get_new_mmu_context() uses "cache + 1". */ |
121 | DEFINE_SPINLOCK(ctx_alloc_lock); | |
122 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | |
123 | #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6)) | |
124 | unsigned long mmu_context_bmap[CTX_BMAP_SLOTS]; | |
125 | ||
126 | /* References to special section boundaries */ | |
127 | extern char _start[], _end[]; | |
128 | ||
129 | /* Initial ramdisk setup */ | |
130 | extern unsigned long sparc_ramdisk_image64; | |
131 | extern unsigned int sparc_ramdisk_image; | |
132 | extern unsigned int sparc_ramdisk_size; | |
133 | ||
1ac4f5eb | 134 | struct page *mem_map_zero __read_mostly; |
1da177e4 | 135 | |
0835ae0f DM |
136 | unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; |
137 | ||
138 | unsigned long sparc64_kern_pri_context __read_mostly; | |
139 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | |
140 | unsigned long sparc64_kern_sec_context __read_mostly; | |
141 | ||
1da177e4 LT |
142 | int bigkernel = 0; |
143 | ||
144 | /* XXX Tune this... */ | |
145 | #define PGT_CACHE_LOW 25 | |
146 | #define PGT_CACHE_HIGH 50 | |
147 | ||
148 | void check_pgt_cache(void) | |
149 | { | |
150 | preempt_disable(); | |
151 | if (pgtable_cache_size > PGT_CACHE_HIGH) { | |
152 | do { | |
153 | if (pgd_quicklist) | |
154 | free_pgd_slow(get_pgd_fast()); | |
155 | if (pte_quicklist[0]) | |
156 | free_pte_slow(pte_alloc_one_fast(NULL, 0)); | |
157 | if (pte_quicklist[1]) | |
158 | free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))); | |
159 | } while (pgtable_cache_size > PGT_CACHE_LOW); | |
160 | } | |
161 | preempt_enable(); | |
162 | } | |
163 | ||
164 | #ifdef CONFIG_DEBUG_DCFLUSH | |
165 | atomic_t dcpage_flushes = ATOMIC_INIT(0); | |
166 | #ifdef CONFIG_SMP | |
167 | atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); | |
168 | #endif | |
169 | #endif | |
170 | ||
171 | __inline__ void flush_dcache_page_impl(struct page *page) | |
172 | { | |
173 | #ifdef CONFIG_DEBUG_DCFLUSH | |
174 | atomic_inc(&dcpage_flushes); | |
175 | #endif | |
176 | ||
177 | #ifdef DCACHE_ALIASING_POSSIBLE | |
178 | __flush_dcache_page(page_address(page), | |
179 | ((tlb_type == spitfire) && | |
180 | page_mapping(page) != NULL)); | |
181 | #else | |
182 | if (page_mapping(page) != NULL && | |
183 | tlb_type == spitfire) | |
184 | __flush_icache_page(__pa(page_address(page))); | |
185 | #endif | |
186 | } | |
187 | ||
188 | #define PG_dcache_dirty PG_arch_1 | |
48b0e548 DM |
189 | #define PG_dcache_cpu_shift 24 |
190 | #define PG_dcache_cpu_mask (256 - 1) | |
191 | ||
192 | #if NR_CPUS > 256 | |
193 | #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus | |
194 | #endif | |
1da177e4 LT |
195 | |
196 | #define dcache_dirty_cpu(page) \ | |
48b0e548 | 197 | (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) |
1da177e4 LT |
198 | |
199 | static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) | |
200 | { | |
201 | unsigned long mask = this_cpu; | |
48b0e548 DM |
202 | unsigned long non_cpu_bits; |
203 | ||
204 | non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); | |
205 | mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); | |
206 | ||
1da177e4 LT |
207 | __asm__ __volatile__("1:\n\t" |
208 | "ldx [%2], %%g7\n\t" | |
209 | "and %%g7, %1, %%g1\n\t" | |
210 | "or %%g1, %0, %%g1\n\t" | |
211 | "casx [%2], %%g7, %%g1\n\t" | |
212 | "cmp %%g7, %%g1\n\t" | |
b445e26c | 213 | "membar #StoreLoad | #StoreStore\n\t" |
1da177e4 | 214 | "bne,pn %%xcc, 1b\n\t" |
b445e26c | 215 | " nop" |
1da177e4 LT |
216 | : /* no outputs */ |
217 | : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) | |
218 | : "g1", "g7"); | |
219 | } | |
220 | ||
221 | static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) | |
222 | { | |
223 | unsigned long mask = (1UL << PG_dcache_dirty); | |
224 | ||
225 | __asm__ __volatile__("! test_and_clear_dcache_dirty\n" | |
226 | "1:\n\t" | |
227 | "ldx [%2], %%g7\n\t" | |
48b0e548 | 228 | "srlx %%g7, %4, %%g1\n\t" |
1da177e4 LT |
229 | "and %%g1, %3, %%g1\n\t" |
230 | "cmp %%g1, %0\n\t" | |
231 | "bne,pn %%icc, 2f\n\t" | |
232 | " andn %%g7, %1, %%g1\n\t" | |
233 | "casx [%2], %%g7, %%g1\n\t" | |
234 | "cmp %%g7, %%g1\n\t" | |
b445e26c | 235 | "membar #StoreLoad | #StoreStore\n\t" |
1da177e4 | 236 | "bne,pn %%xcc, 1b\n\t" |
b445e26c | 237 | " nop\n" |
1da177e4 LT |
238 | "2:" |
239 | : /* no outputs */ | |
240 | : "r" (cpu), "r" (mask), "r" (&page->flags), | |
48b0e548 DM |
241 | "i" (PG_dcache_cpu_mask), |
242 | "i" (PG_dcache_cpu_shift) | |
1da177e4 LT |
243 | : "g1", "g7"); |
244 | } | |
245 | ||
1da177e4 LT |
246 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
247 | { | |
248 | struct page *page; | |
249 | unsigned long pfn; | |
250 | unsigned long pg_flags; | |
251 | ||
252 | pfn = pte_pfn(pte); | |
253 | if (pfn_valid(pfn) && | |
254 | (page = pfn_to_page(pfn), page_mapping(page)) && | |
255 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | |
48b0e548 DM |
256 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & |
257 | PG_dcache_cpu_mask); | |
1da177e4 LT |
258 | int this_cpu = get_cpu(); |
259 | ||
260 | /* This is just to optimize away some function calls | |
261 | * in the SMP case. | |
262 | */ | |
263 | if (cpu == this_cpu) | |
264 | flush_dcache_page_impl(page); | |
265 | else | |
266 | smp_flush_dcache_page_impl(page, cpu); | |
267 | ||
268 | clear_dcache_dirty_cpu(page, cpu); | |
269 | ||
270 | put_cpu(); | |
271 | } | |
1da177e4 LT |
272 | } |
273 | ||
274 | void flush_dcache_page(struct page *page) | |
275 | { | |
a9546f59 DM |
276 | struct address_space *mapping; |
277 | int this_cpu; | |
1da177e4 | 278 | |
a9546f59 DM |
279 | /* Do not bother with the expensive D-cache flush if it |
280 | * is merely the zero page. The 'bigcore' testcase in GDB | |
281 | * causes this case to run millions of times. | |
282 | */ | |
283 | if (page == ZERO_PAGE(0)) | |
284 | return; | |
285 | ||
286 | this_cpu = get_cpu(); | |
287 | ||
288 | mapping = page_mapping(page); | |
1da177e4 | 289 | if (mapping && !mapping_mapped(mapping)) { |
a9546f59 | 290 | int dirty = test_bit(PG_dcache_dirty, &page->flags); |
1da177e4 | 291 | if (dirty) { |
a9546f59 DM |
292 | int dirty_cpu = dcache_dirty_cpu(page); |
293 | ||
1da177e4 LT |
294 | if (dirty_cpu == this_cpu) |
295 | goto out; | |
296 | smp_flush_dcache_page_impl(page, dirty_cpu); | |
297 | } | |
298 | set_dcache_dirty(page, this_cpu); | |
299 | } else { | |
300 | /* We could delay the flush for the !page_mapping | |
301 | * case too. But that case is for exec env/arg | |
302 | * pages and those are %99 certainly going to get | |
303 | * faulted into the tlb (and thus flushed) anyways. | |
304 | */ | |
305 | flush_dcache_page_impl(page); | |
306 | } | |
307 | ||
308 | out: | |
309 | put_cpu(); | |
310 | } | |
311 | ||
05e14cb3 | 312 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) |
1da177e4 LT |
313 | { |
314 | /* Cheetah has coherent I-cache. */ | |
315 | if (tlb_type == spitfire) { | |
316 | unsigned long kaddr; | |
317 | ||
318 | for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) | |
319 | __flush_icache_page(__get_phys(kaddr)); | |
320 | } | |
321 | } | |
322 | ||
323 | unsigned long page_to_pfn(struct page *page) | |
324 | { | |
325 | return (unsigned long) ((page - mem_map) + pfn_base); | |
326 | } | |
327 | ||
328 | struct page *pfn_to_page(unsigned long pfn) | |
329 | { | |
330 | return (mem_map + (pfn - pfn_base)); | |
331 | } | |
332 | ||
333 | void show_mem(void) | |
334 | { | |
335 | printk("Mem-info:\n"); | |
336 | show_free_areas(); | |
337 | printk("Free swap: %6ldkB\n", | |
338 | nr_swap_pages << (PAGE_SHIFT-10)); | |
339 | printk("%ld pages of RAM\n", num_physpages); | |
340 | printk("%d free pages\n", nr_free_pages()); | |
341 | printk("%d pages in page table cache\n",pgtable_cache_size); | |
342 | } | |
343 | ||
344 | void mmu_info(struct seq_file *m) | |
345 | { | |
346 | if (tlb_type == cheetah) | |
347 | seq_printf(m, "MMU Type\t: Cheetah\n"); | |
348 | else if (tlb_type == cheetah_plus) | |
349 | seq_printf(m, "MMU Type\t: Cheetah+\n"); | |
350 | else if (tlb_type == spitfire) | |
351 | seq_printf(m, "MMU Type\t: Spitfire\n"); | |
352 | else | |
353 | seq_printf(m, "MMU Type\t: ???\n"); | |
354 | ||
355 | #ifdef CONFIG_DEBUG_DCFLUSH | |
356 | seq_printf(m, "DCPageFlushes\t: %d\n", | |
357 | atomic_read(&dcpage_flushes)); | |
358 | #ifdef CONFIG_SMP | |
359 | seq_printf(m, "DCPageFlushesXC\t: %d\n", | |
360 | atomic_read(&dcpage_flushes_xcall)); | |
361 | #endif /* CONFIG_SMP */ | |
362 | #endif /* CONFIG_DEBUG_DCFLUSH */ | |
363 | } | |
364 | ||
365 | struct linux_prom_translation { | |
366 | unsigned long virt; | |
367 | unsigned long size; | |
368 | unsigned long data; | |
369 | }; | |
c9c10830 DM |
370 | |
371 | /* Exported for kernel TLB miss handling in ktlb.S */ | |
372 | struct linux_prom_translation prom_trans[512] __read_mostly; | |
373 | unsigned int prom_trans_ents __read_mostly; | |
374 | unsigned int swapper_pgd_zero __read_mostly; | |
1da177e4 LT |
375 | |
376 | extern unsigned long prom_boot_page; | |
377 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); | |
378 | extern int prom_get_mmu_ihandle(void); | |
379 | extern void register_prom_callbacks(void); | |
380 | ||
381 | /* Exported for SMP bootup purposes. */ | |
382 | unsigned long kern_locked_tte_data; | |
383 | ||
1da177e4 LT |
384 | /* |
385 | * Translate PROM's mapping we capture at boot time into physical address. | |
386 | * The second parameter is only set from prom_callback() invocations. | |
387 | */ | |
388 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | |
389 | { | |
c9c10830 | 390 | int i; |
405599bd | 391 | |
c9c10830 DM |
392 | for (i = 0; i < prom_trans_ents; i++) { |
393 | struct linux_prom_translation *p = &prom_trans[i]; | |
405599bd | 394 | |
c9c10830 DM |
395 | if (promva >= p->virt && |
396 | promva < (p->virt + p->size)) { | |
397 | unsigned long base = p->data & _PAGE_PADDR; | |
5085b4a5 | 398 | |
c9c10830 DM |
399 | if (error) |
400 | *error = 0; | |
401 | return base + (promva & (8192 - 1)); | |
405599bd | 402 | } |
405599bd | 403 | } |
c9c10830 DM |
404 | if (error) |
405 | *error = 1; | |
406 | return 0UL; | |
405599bd DM |
407 | } |
408 | ||
c9c10830 DM |
409 | /* The obp translations are saved based on 8k pagesize, since obp can |
410 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | |
411 | * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte | |
412 | * scheme (also, see rant in inherit_locked_prom_mappings()). | |
413 | */ | |
5085b4a5 DM |
414 | static inline int in_obp_range(unsigned long vaddr) |
415 | { | |
416 | return (vaddr >= LOW_OBP_ADDRESS && | |
417 | vaddr < HI_OBP_ADDRESS); | |
418 | } | |
419 | ||
c9c10830 | 420 | static int cmp_ptrans(const void *a, const void *b) |
405599bd | 421 | { |
c9c10830 | 422 | const struct linux_prom_translation *x = a, *y = b; |
405599bd | 423 | |
c9c10830 DM |
424 | if (x->virt > y->virt) |
425 | return 1; | |
426 | if (x->virt < y->virt) | |
427 | return -1; | |
428 | return 0; | |
405599bd DM |
429 | } |
430 | ||
c9c10830 | 431 | /* Read OBP translations property into 'prom_trans[]'. */ |
9ad98c5b | 432 | static void __init read_obp_translations(void) |
405599bd | 433 | { |
c9c10830 | 434 | int n, node, ents, first, last, i; |
1da177e4 LT |
435 | |
436 | node = prom_finddevice("/virtual-memory"); | |
437 | n = prom_getproplen(node, "translations"); | |
405599bd | 438 | if (unlikely(n == 0 || n == -1)) { |
b206fc4c | 439 | prom_printf("prom_mappings: Couldn't get size.\n"); |
1da177e4 LT |
440 | prom_halt(); |
441 | } | |
405599bd DM |
442 | if (unlikely(n > sizeof(prom_trans))) { |
443 | prom_printf("prom_mappings: Size %Zd is too big.\n", n); | |
1da177e4 LT |
444 | prom_halt(); |
445 | } | |
405599bd | 446 | |
b206fc4c | 447 | if ((n = prom_getproperty(node, "translations", |
405599bd DM |
448 | (char *)&prom_trans[0], |
449 | sizeof(prom_trans))) == -1) { | |
b206fc4c | 450 | prom_printf("prom_mappings: Couldn't get property.\n"); |
1da177e4 LT |
451 | prom_halt(); |
452 | } | |
9ad98c5b | 453 | |
b206fc4c | 454 | n = n / sizeof(struct linux_prom_translation); |
9ad98c5b | 455 | |
c9c10830 DM |
456 | ents = n; |
457 | ||
458 | sort(prom_trans, ents, sizeof(struct linux_prom_translation), | |
459 | cmp_ptrans, NULL); | |
460 | ||
461 | /* Now kick out all the non-OBP entries. */ | |
462 | for (i = 0; i < ents; i++) { | |
463 | if (in_obp_range(prom_trans[i].virt)) | |
464 | break; | |
465 | } | |
466 | first = i; | |
467 | for (; i < ents; i++) { | |
468 | if (!in_obp_range(prom_trans[i].virt)) | |
469 | break; | |
470 | } | |
471 | last = i; | |
472 | ||
473 | for (i = 0; i < (last - first); i++) { | |
474 | struct linux_prom_translation *src = &prom_trans[i + first]; | |
475 | struct linux_prom_translation *dest = &prom_trans[i]; | |
476 | ||
477 | *dest = *src; | |
478 | } | |
479 | for (; i < ents; i++) { | |
480 | struct linux_prom_translation *dest = &prom_trans[i]; | |
481 | dest->virt = dest->size = dest->data = 0x0UL; | |
482 | } | |
483 | ||
484 | prom_trans_ents = last - first; | |
485 | ||
486 | if (tlb_type == spitfire) { | |
487 | /* Clear diag TTE bits. */ | |
488 | for (i = 0; i < prom_trans_ents; i++) | |
489 | prom_trans[i].data &= ~0x0003fe0000000000UL; | |
490 | } | |
405599bd | 491 | } |
1da177e4 | 492 | |
898cf0ec | 493 | static void __init remap_kernel(void) |
405599bd DM |
494 | { |
495 | unsigned long phys_page, tte_vaddr, tte_data; | |
405599bd DM |
496 | int tlb_ent = sparc64_highest_locked_tlbent(); |
497 | ||
1da177e4 | 498 | tte_vaddr = (unsigned long) KERNBASE; |
bff06d55 DM |
499 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
500 | tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | | |
501 | _PAGE_CP | _PAGE_CV | _PAGE_P | | |
502 | _PAGE_L | _PAGE_W)); | |
1da177e4 LT |
503 | |
504 | kern_locked_tte_data = tte_data; | |
505 | ||
bff06d55 | 506 | /* Now lock us into the TLBs via OBP. */ |
405599bd DM |
507 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); |
508 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | |
1da177e4 | 509 | if (bigkernel) { |
0835ae0f DM |
510 | tlb_ent -= 1; |
511 | prom_dtlb_load(tlb_ent, | |
405599bd DM |
512 | tte_data + 0x400000, |
513 | tte_vaddr + 0x400000); | |
0835ae0f | 514 | prom_itlb_load(tlb_ent, |
405599bd DM |
515 | tte_data + 0x400000, |
516 | tte_vaddr + 0x400000); | |
1da177e4 | 517 | } |
0835ae0f DM |
518 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; |
519 | if (tlb_type == cheetah_plus) { | |
520 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | |
521 | CTX_CHEETAH_PLUS_NUC); | |
522 | sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; | |
523 | sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; | |
524 | } | |
405599bd | 525 | } |
1da177e4 | 526 | |
405599bd | 527 | |
c9c10830 | 528 | static void __init inherit_prom_mappings(void) |
9ad98c5b DM |
529 | { |
530 | read_obp_translations(); | |
405599bd DM |
531 | |
532 | /* Now fixup OBP's idea about where we really are mapped. */ | |
533 | prom_printf("Remapping the kernel... "); | |
534 | remap_kernel(); | |
1da177e4 LT |
535 | prom_printf("done.\n"); |
536 | ||
c9c10830 | 537 | prom_printf("Registering callbacks... "); |
1da177e4 | 538 | register_prom_callbacks(); |
c9c10830 | 539 | prom_printf("done.\n"); |
1da177e4 LT |
540 | } |
541 | ||
542 | /* The OBP specifications for sun4u mark 0xfffffffc00000000 and | |
543 | * upwards as reserved for use by the firmware (I wonder if this | |
544 | * will be the same on Cheetah...). We use this virtual address | |
545 | * range for the VPTE table mappings of the nucleus so we need | |
546 | * to zap them when we enter the PROM. -DaveM | |
547 | */ | |
548 | static void __flush_nucleus_vptes(void) | |
549 | { | |
550 | unsigned long prom_reserved_base = 0xfffffffc00000000UL; | |
551 | int i; | |
552 | ||
553 | /* Only DTLB must be checked for VPTE entries. */ | |
554 | if (tlb_type == spitfire) { | |
555 | for (i = 0; i < 63; i++) { | |
556 | unsigned long tag; | |
557 | ||
558 | /* Spitfire Errata #32 workaround */ | |
559 | /* NOTE: Always runs on spitfire, so no cheetah+ | |
560 | * page size encodings. | |
561 | */ | |
562 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
563 | "flush %%g6" | |
564 | : /* No outputs */ | |
565 | : "r" (0), | |
566 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
567 | ||
568 | tag = spitfire_get_dtlb_tag(i); | |
569 | if (((tag & ~(PAGE_MASK)) == 0) && | |
570 | ((tag & (PAGE_MASK)) >= prom_reserved_base)) { | |
571 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
572 | "membar #Sync" | |
573 | : /* no outputs */ | |
574 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
575 | spitfire_put_dtlb_data(i, 0x0UL); | |
576 | } | |
577 | } | |
578 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
579 | for (i = 0; i < 512; i++) { | |
580 | unsigned long tag = cheetah_get_dtlb_tag(i, 2); | |
581 | ||
582 | if ((tag & ~PAGE_MASK) == 0 && | |
583 | (tag & PAGE_MASK) >= prom_reserved_base) { | |
584 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
585 | "membar #Sync" | |
586 | : /* no outputs */ | |
587 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
588 | cheetah_put_dtlb_data(i, 0x0UL, 2); | |
589 | } | |
590 | ||
591 | if (tlb_type != cheetah_plus) | |
592 | continue; | |
593 | ||
594 | tag = cheetah_get_dtlb_tag(i, 3); | |
595 | ||
596 | if ((tag & ~PAGE_MASK) == 0 && | |
597 | (tag & PAGE_MASK) >= prom_reserved_base) { | |
598 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
599 | "membar #Sync" | |
600 | : /* no outputs */ | |
601 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
602 | cheetah_put_dtlb_data(i, 0x0UL, 3); | |
603 | } | |
604 | } | |
605 | } else { | |
606 | /* Implement me :-) */ | |
607 | BUG(); | |
608 | } | |
609 | } | |
610 | ||
611 | static int prom_ditlb_set; | |
612 | struct prom_tlb_entry { | |
613 | int tlb_ent; | |
614 | unsigned long tlb_tag; | |
615 | unsigned long tlb_data; | |
616 | }; | |
617 | struct prom_tlb_entry prom_itlb[16], prom_dtlb[16]; | |
618 | ||
619 | void prom_world(int enter) | |
620 | { | |
621 | unsigned long pstate; | |
622 | int i; | |
623 | ||
624 | if (!enter) | |
625 | set_fs((mm_segment_t) { get_thread_current_ds() }); | |
626 | ||
627 | if (!prom_ditlb_set) | |
628 | return; | |
629 | ||
630 | /* Make sure the following runs atomically. */ | |
631 | __asm__ __volatile__("flushw\n\t" | |
632 | "rdpr %%pstate, %0\n\t" | |
633 | "wrpr %0, %1, %%pstate" | |
634 | : "=r" (pstate) | |
635 | : "i" (PSTATE_IE)); | |
636 | ||
637 | if (enter) { | |
638 | /* Kick out nucleus VPTEs. */ | |
639 | __flush_nucleus_vptes(); | |
640 | ||
641 | /* Install PROM world. */ | |
642 | for (i = 0; i < 16; i++) { | |
643 | if (prom_dtlb[i].tlb_ent != -1) { | |
644 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
645 | "membar #Sync" | |
646 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | |
647 | "i" (ASI_DMMU)); | |
648 | if (tlb_type == spitfire) | |
649 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | |
650 | prom_dtlb[i].tlb_data); | |
651 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
652 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | |
653 | prom_dtlb[i].tlb_data); | |
654 | } | |
655 | if (prom_itlb[i].tlb_ent != -1) { | |
656 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
657 | "membar #Sync" | |
658 | : : "r" (prom_itlb[i].tlb_tag), | |
659 | "r" (TLB_TAG_ACCESS), | |
660 | "i" (ASI_IMMU)); | |
661 | if (tlb_type == spitfire) | |
662 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | |
663 | prom_itlb[i].tlb_data); | |
664 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
665 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | |
666 | prom_itlb[i].tlb_data); | |
667 | } | |
668 | } | |
669 | } else { | |
670 | for (i = 0; i < 16; i++) { | |
671 | if (prom_dtlb[i].tlb_ent != -1) { | |
672 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
673 | "membar #Sync" | |
674 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
675 | if (tlb_type == spitfire) | |
676 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | |
677 | else | |
678 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | |
679 | } | |
680 | if (prom_itlb[i].tlb_ent != -1) { | |
681 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
682 | "membar #Sync" | |
683 | : : "r" (TLB_TAG_ACCESS), | |
684 | "i" (ASI_IMMU)); | |
685 | if (tlb_type == spitfire) | |
686 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); | |
687 | else | |
688 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL); | |
689 | } | |
690 | } | |
691 | } | |
692 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | |
693 | : : "r" (pstate)); | |
694 | } | |
695 | ||
696 | void inherit_locked_prom_mappings(int save_p) | |
697 | { | |
698 | int i; | |
699 | int dtlb_seen = 0; | |
700 | int itlb_seen = 0; | |
701 | ||
702 | /* Fucking losing PROM has more mappings in the TLB, but | |
703 | * it (conveniently) fails to mention any of these in the | |
704 | * translations property. The only ones that matter are | |
705 | * the locked PROM tlb entries, so we impose the following | |
706 | * irrecovable rule on the PROM, it is allowed 8 locked | |
707 | * entries in the ITLB and 8 in the DTLB. | |
708 | * | |
709 | * Supposedly the upper 16GB of the address space is | |
710 | * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED | |
711 | * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface | |
712 | * used between the client program and the firmware on sun5 | |
713 | * systems to coordinate mmu mappings is also COMPLETELY | |
714 | * UNDOCUMENTED!!!!!! Thanks S(t)un! | |
715 | */ | |
716 | if (save_p) { | |
717 | for (i = 0; i < 16; i++) { | |
718 | prom_itlb[i].tlb_ent = -1; | |
719 | prom_dtlb[i].tlb_ent = -1; | |
720 | } | |
721 | } | |
722 | if (tlb_type == spitfire) { | |
0835ae0f DM |
723 | int high = sparc64_highest_unlocked_tlb_ent; |
724 | for (i = 0; i <= high; i++) { | |
1da177e4 LT |
725 | unsigned long data; |
726 | ||
727 | /* Spitfire Errata #32 workaround */ | |
728 | /* NOTE: Always runs on spitfire, so no cheetah+ | |
729 | * page size encodings. | |
730 | */ | |
731 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
732 | "flush %%g6" | |
733 | : /* No outputs */ | |
734 | : "r" (0), | |
735 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
736 | ||
737 | data = spitfire_get_dtlb_data(i); | |
738 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
739 | unsigned long tag; | |
740 | ||
741 | /* Spitfire Errata #32 workaround */ | |
742 | /* NOTE: Always runs on spitfire, so no | |
743 | * cheetah+ page size encodings. | |
744 | */ | |
745 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
746 | "flush %%g6" | |
747 | : /* No outputs */ | |
748 | : "r" (0), | |
749 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
750 | ||
751 | tag = spitfire_get_dtlb_tag(i); | |
752 | if (save_p) { | |
753 | prom_dtlb[dtlb_seen].tlb_ent = i; | |
754 | prom_dtlb[dtlb_seen].tlb_tag = tag; | |
755 | prom_dtlb[dtlb_seen].tlb_data = data; | |
756 | } | |
757 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
758 | "membar #Sync" | |
759 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
760 | spitfire_put_dtlb_data(i, 0x0UL); | |
761 | ||
762 | dtlb_seen++; | |
763 | if (dtlb_seen > 15) | |
764 | break; | |
765 | } | |
766 | } | |
767 | ||
768 | for (i = 0; i < high; i++) { | |
769 | unsigned long data; | |
770 | ||
771 | /* Spitfire Errata #32 workaround */ | |
772 | /* NOTE: Always runs on spitfire, so no | |
773 | * cheetah+ page size encodings. | |
774 | */ | |
775 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
776 | "flush %%g6" | |
777 | : /* No outputs */ | |
778 | : "r" (0), | |
779 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
780 | ||
781 | data = spitfire_get_itlb_data(i); | |
782 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
783 | unsigned long tag; | |
784 | ||
785 | /* Spitfire Errata #32 workaround */ | |
786 | /* NOTE: Always runs on spitfire, so no | |
787 | * cheetah+ page size encodings. | |
788 | */ | |
789 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
790 | "flush %%g6" | |
791 | : /* No outputs */ | |
792 | : "r" (0), | |
793 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
794 | ||
795 | tag = spitfire_get_itlb_tag(i); | |
796 | if (save_p) { | |
797 | prom_itlb[itlb_seen].tlb_ent = i; | |
798 | prom_itlb[itlb_seen].tlb_tag = tag; | |
799 | prom_itlb[itlb_seen].tlb_data = data; | |
800 | } | |
801 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
802 | "membar #Sync" | |
803 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | |
804 | spitfire_put_itlb_data(i, 0x0UL); | |
805 | ||
806 | itlb_seen++; | |
807 | if (itlb_seen > 15) | |
808 | break; | |
809 | } | |
810 | } | |
811 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
0835ae0f | 812 | int high = sparc64_highest_unlocked_tlb_ent; |
1da177e4 | 813 | |
0835ae0f | 814 | for (i = 0; i <= high; i++) { |
1da177e4 LT |
815 | unsigned long data; |
816 | ||
817 | data = cheetah_get_ldtlb_data(i); | |
818 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
819 | unsigned long tag; | |
820 | ||
821 | tag = cheetah_get_ldtlb_tag(i); | |
822 | if (save_p) { | |
823 | prom_dtlb[dtlb_seen].tlb_ent = i; | |
824 | prom_dtlb[dtlb_seen].tlb_tag = tag; | |
825 | prom_dtlb[dtlb_seen].tlb_data = data; | |
826 | } | |
827 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
828 | "membar #Sync" | |
829 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
830 | cheetah_put_ldtlb_data(i, 0x0UL); | |
831 | ||
832 | dtlb_seen++; | |
833 | if (dtlb_seen > 15) | |
834 | break; | |
835 | } | |
836 | } | |
837 | ||
838 | for (i = 0; i < high; i++) { | |
839 | unsigned long data; | |
840 | ||
841 | data = cheetah_get_litlb_data(i); | |
842 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
843 | unsigned long tag; | |
844 | ||
845 | tag = cheetah_get_litlb_tag(i); | |
846 | if (save_p) { | |
847 | prom_itlb[itlb_seen].tlb_ent = i; | |
848 | prom_itlb[itlb_seen].tlb_tag = tag; | |
849 | prom_itlb[itlb_seen].tlb_data = data; | |
850 | } | |
851 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
852 | "membar #Sync" | |
853 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | |
854 | cheetah_put_litlb_data(i, 0x0UL); | |
855 | ||
856 | itlb_seen++; | |
857 | if (itlb_seen > 15) | |
858 | break; | |
859 | } | |
860 | } | |
861 | } else { | |
862 | /* Implement me :-) */ | |
863 | BUG(); | |
864 | } | |
865 | if (save_p) | |
866 | prom_ditlb_set = 1; | |
867 | } | |
868 | ||
869 | /* Give PROM back his world, done during reboots... */ | |
870 | void prom_reload_locked(void) | |
871 | { | |
872 | int i; | |
873 | ||
874 | for (i = 0; i < 16; i++) { | |
875 | if (prom_dtlb[i].tlb_ent != -1) { | |
876 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
877 | "membar #Sync" | |
878 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | |
879 | "i" (ASI_DMMU)); | |
880 | if (tlb_type == spitfire) | |
881 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | |
882 | prom_dtlb[i].tlb_data); | |
883 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
884 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | |
885 | prom_dtlb[i].tlb_data); | |
886 | } | |
887 | ||
888 | if (prom_itlb[i].tlb_ent != -1) { | |
889 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
890 | "membar #Sync" | |
891 | : : "r" (prom_itlb[i].tlb_tag), | |
892 | "r" (TLB_TAG_ACCESS), | |
893 | "i" (ASI_IMMU)); | |
894 | if (tlb_type == spitfire) | |
895 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | |
896 | prom_itlb[i].tlb_data); | |
897 | else | |
898 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | |
899 | prom_itlb[i].tlb_data); | |
900 | } | |
901 | } | |
902 | } | |
903 | ||
904 | #ifdef DCACHE_ALIASING_POSSIBLE | |
905 | void __flush_dcache_range(unsigned long start, unsigned long end) | |
906 | { | |
907 | unsigned long va; | |
908 | ||
909 | if (tlb_type == spitfire) { | |
910 | int n = 0; | |
911 | ||
912 | for (va = start; va < end; va += 32) { | |
913 | spitfire_put_dcache_tag(va & 0x3fe0, 0x0); | |
914 | if (++n >= 512) | |
915 | break; | |
916 | } | |
917 | } else { | |
918 | start = __pa(start); | |
919 | end = __pa(end); | |
920 | for (va = start; va < end; va += 32) | |
921 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
922 | "membar #Sync" | |
923 | : /* no outputs */ | |
924 | : "r" (va), | |
925 | "i" (ASI_DCACHE_INVALIDATE)); | |
926 | } | |
927 | } | |
928 | #endif /* DCACHE_ALIASING_POSSIBLE */ | |
929 | ||
930 | /* If not locked, zap it. */ | |
931 | void __flush_tlb_all(void) | |
932 | { | |
933 | unsigned long pstate; | |
934 | int i; | |
935 | ||
936 | __asm__ __volatile__("flushw\n\t" | |
937 | "rdpr %%pstate, %0\n\t" | |
938 | "wrpr %0, %1, %%pstate" | |
939 | : "=r" (pstate) | |
940 | : "i" (PSTATE_IE)); | |
941 | if (tlb_type == spitfire) { | |
942 | for (i = 0; i < 64; i++) { | |
943 | /* Spitfire Errata #32 workaround */ | |
944 | /* NOTE: Always runs on spitfire, so no | |
945 | * cheetah+ page size encodings. | |
946 | */ | |
947 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
948 | "flush %%g6" | |
949 | : /* No outputs */ | |
950 | : "r" (0), | |
951 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
952 | ||
953 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) { | |
954 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
955 | "membar #Sync" | |
956 | : /* no outputs */ | |
957 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
958 | spitfire_put_dtlb_data(i, 0x0UL); | |
959 | } | |
960 | ||
961 | /* Spitfire Errata #32 workaround */ | |
962 | /* NOTE: Always runs on spitfire, so no | |
963 | * cheetah+ page size encodings. | |
964 | */ | |
965 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
966 | "flush %%g6" | |
967 | : /* No outputs */ | |
968 | : "r" (0), | |
969 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
970 | ||
971 | if (!(spitfire_get_itlb_data(i) & _PAGE_L)) { | |
972 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
973 | "membar #Sync" | |
974 | : /* no outputs */ | |
975 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | |
976 | spitfire_put_itlb_data(i, 0x0UL); | |
977 | } | |
978 | } | |
979 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
980 | cheetah_flush_dtlb_all(); | |
981 | cheetah_flush_itlb_all(); | |
982 | } | |
983 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | |
984 | : : "r" (pstate)); | |
985 | } | |
986 | ||
987 | /* Caller does TLB context flushing on local CPU if necessary. | |
988 | * The caller also ensures that CTX_VALID(mm->context) is false. | |
989 | * | |
990 | * We must be careful about boundary cases so that we never | |
991 | * let the user have CTX 0 (nucleus) or we ever use a CTX | |
992 | * version of zero (and thus NO_CONTEXT would not be caught | |
993 | * by version mis-match tests in mmu_context.h). | |
994 | */ | |
995 | void get_new_mmu_context(struct mm_struct *mm) | |
996 | { | |
997 | unsigned long ctx, new_ctx; | |
998 | unsigned long orig_pgsz_bits; | |
999 | ||
1000 | ||
1001 | spin_lock(&ctx_alloc_lock); | |
1002 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | |
1003 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | |
1004 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | |
1005 | if (new_ctx >= (1 << CTX_NR_BITS)) { | |
1006 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | |
1007 | if (new_ctx >= ctx) { | |
1008 | int i; | |
1009 | new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + | |
1010 | CTX_FIRST_VERSION; | |
1011 | if (new_ctx == 1) | |
1012 | new_ctx = CTX_FIRST_VERSION; | |
1013 | ||
1014 | /* Don't call memset, for 16 entries that's just | |
1015 | * plain silly... | |
1016 | */ | |
1017 | mmu_context_bmap[0] = 3; | |
1018 | mmu_context_bmap[1] = 0; | |
1019 | mmu_context_bmap[2] = 0; | |
1020 | mmu_context_bmap[3] = 0; | |
1021 | for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { | |
1022 | mmu_context_bmap[i + 0] = 0; | |
1023 | mmu_context_bmap[i + 1] = 0; | |
1024 | mmu_context_bmap[i + 2] = 0; | |
1025 | mmu_context_bmap[i + 3] = 0; | |
1026 | } | |
1027 | goto out; | |
1028 | } | |
1029 | } | |
1030 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); | |
1031 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); | |
1032 | out: | |
1033 | tlb_context_cache = new_ctx; | |
1034 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | |
1035 | spin_unlock(&ctx_alloc_lock); | |
1036 | } | |
1037 | ||
1038 | #ifndef CONFIG_SMP | |
1039 | struct pgtable_cache_struct pgt_quicklists; | |
1040 | #endif | |
1041 | ||
1042 | /* OK, we have to color these pages. The page tables are accessed | |
1043 | * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S | |
1044 | * code, as well as by PAGE_OFFSET range direct-mapped addresses by | |
1045 | * other parts of the kernel. By coloring, we make sure that the tlbmiss | |
1046 | * fast handlers do not get data from old/garbage dcache lines that | |
1047 | * correspond to an old/stale virtual address (user/kernel) that | |
1048 | * previously mapped the pagetable page while accessing vpte range | |
1049 | * addresses. The idea is that if the vpte color and PAGE_OFFSET range | |
1050 | * color is the same, then when the kernel initializes the pagetable | |
1051 | * using the later address range, accesses with the first address | |
1052 | * range will see the newly initialized data rather than the garbage. | |
1053 | */ | |
1054 | #ifdef DCACHE_ALIASING_POSSIBLE | |
1055 | #define DC_ALIAS_SHIFT 1 | |
1056 | #else | |
1057 | #define DC_ALIAS_SHIFT 0 | |
1058 | #endif | |
8edf72eb | 1059 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
1da177e4 LT |
1060 | { |
1061 | struct page *page; | |
1062 | unsigned long color; | |
1063 | ||
1064 | { | |
1065 | pte_t *ptep = pte_alloc_one_fast(mm, address); | |
1066 | ||
1067 | if (ptep) | |
1068 | return ptep; | |
1069 | } | |
1070 | ||
1071 | color = VPTE_COLOR(address); | |
1072 | page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT); | |
1073 | if (page) { | |
1074 | unsigned long *to_free; | |
1075 | unsigned long paddr; | |
1076 | pte_t *pte; | |
1077 | ||
1078 | #ifdef DCACHE_ALIASING_POSSIBLE | |
1079 | set_page_count(page, 1); | |
1080 | ClearPageCompound(page); | |
1081 | ||
1082 | set_page_count((page + 1), 1); | |
1083 | ClearPageCompound(page + 1); | |
1084 | #endif | |
1085 | paddr = (unsigned long) page_address(page); | |
1086 | memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT)); | |
1087 | ||
1088 | if (!color) { | |
1089 | pte = (pte_t *) paddr; | |
1090 | to_free = (unsigned long *) (paddr + PAGE_SIZE); | |
1091 | } else { | |
1092 | pte = (pte_t *) (paddr + PAGE_SIZE); | |
1093 | to_free = (unsigned long *) paddr; | |
1094 | } | |
1095 | ||
1096 | #ifdef DCACHE_ALIASING_POSSIBLE | |
1097 | /* Now free the other one up, adjust cache size. */ | |
1098 | preempt_disable(); | |
1099 | *to_free = (unsigned long) pte_quicklist[color ^ 0x1]; | |
1100 | pte_quicklist[color ^ 0x1] = to_free; | |
1101 | pgtable_cache_size++; | |
1102 | preempt_enable(); | |
1103 | #endif | |
1104 | ||
1105 | return pte; | |
1106 | } | |
1107 | return NULL; | |
1108 | } | |
1109 | ||
1110 | void sparc_ultra_dump_itlb(void) | |
1111 | { | |
1112 | int slot; | |
1113 | ||
1114 | if (tlb_type == spitfire) { | |
1115 | printk ("Contents of itlb: "); | |
1116 | for (slot = 0; slot < 14; slot++) printk (" "); | |
1117 | printk ("%2x:%016lx,%016lx\n", | |
1118 | 0, | |
1119 | spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0)); | |
1120 | for (slot = 1; slot < 64; slot+=3) { | |
1121 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1122 | slot, | |
1123 | spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot), | |
1124 | slot+1, | |
1125 | spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1), | |
1126 | slot+2, | |
1127 | spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2)); | |
1128 | } | |
1129 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
1130 | printk ("Contents of itlb0:\n"); | |
1131 | for (slot = 0; slot < 16; slot+=2) { | |
1132 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1133 | slot, | |
1134 | cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot), | |
1135 | slot+1, | |
1136 | cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1)); | |
1137 | } | |
1138 | printk ("Contents of itlb2:\n"); | |
1139 | for (slot = 0; slot < 128; slot+=2) { | |
1140 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1141 | slot, | |
1142 | cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot), | |
1143 | slot+1, | |
1144 | cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1)); | |
1145 | } | |
1146 | } | |
1147 | } | |
1148 | ||
1149 | void sparc_ultra_dump_dtlb(void) | |
1150 | { | |
1151 | int slot; | |
1152 | ||
1153 | if (tlb_type == spitfire) { | |
1154 | printk ("Contents of dtlb: "); | |
1155 | for (slot = 0; slot < 14; slot++) printk (" "); | |
1156 | printk ("%2x:%016lx,%016lx\n", 0, | |
1157 | spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0)); | |
1158 | for (slot = 1; slot < 64; slot+=3) { | |
1159 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1160 | slot, | |
1161 | spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot), | |
1162 | slot+1, | |
1163 | spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1), | |
1164 | slot+2, | |
1165 | spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2)); | |
1166 | } | |
1167 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
1168 | printk ("Contents of dtlb0:\n"); | |
1169 | for (slot = 0; slot < 16; slot+=2) { | |
1170 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1171 | slot, | |
1172 | cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot), | |
1173 | slot+1, | |
1174 | cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1)); | |
1175 | } | |
1176 | printk ("Contents of dtlb2:\n"); | |
1177 | for (slot = 0; slot < 512; slot+=2) { | |
1178 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1179 | slot, | |
1180 | cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2), | |
1181 | slot+1, | |
1182 | cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2)); | |
1183 | } | |
1184 | if (tlb_type == cheetah_plus) { | |
1185 | printk ("Contents of dtlb3:\n"); | |
1186 | for (slot = 0; slot < 512; slot+=2) { | |
1187 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1188 | slot, | |
1189 | cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3), | |
1190 | slot+1, | |
1191 | cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3)); | |
1192 | } | |
1193 | } | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | extern unsigned long cmdline_memory_size; | |
1198 | ||
1199 | unsigned long __init bootmem_init(unsigned long *pages_avail) | |
1200 | { | |
1201 | unsigned long bootmap_size, start_pfn, end_pfn; | |
1202 | unsigned long end_of_phys_memory = 0UL; | |
1203 | unsigned long bootmap_pfn, bytes_avail, size; | |
1204 | int i; | |
1205 | ||
1206 | #ifdef CONFIG_DEBUG_BOOTMEM | |
13edad7a | 1207 | prom_printf("bootmem_init: Scan pavail, "); |
1da177e4 LT |
1208 | #endif |
1209 | ||
1210 | bytes_avail = 0UL; | |
13edad7a DM |
1211 | for (i = 0; i < pavail_ents; i++) { |
1212 | end_of_phys_memory = pavail[i].phys_addr + | |
1213 | pavail[i].reg_size; | |
1214 | bytes_avail += pavail[i].reg_size; | |
1da177e4 LT |
1215 | if (cmdline_memory_size) { |
1216 | if (bytes_avail > cmdline_memory_size) { | |
1217 | unsigned long slack = bytes_avail - cmdline_memory_size; | |
1218 | ||
1219 | bytes_avail -= slack; | |
1220 | end_of_phys_memory -= slack; | |
1221 | ||
13edad7a DM |
1222 | pavail[i].reg_size -= slack; |
1223 | if ((long)pavail[i].reg_size <= 0L) { | |
1224 | pavail[i].phys_addr = 0xdeadbeefUL; | |
1225 | pavail[i].reg_size = 0UL; | |
1226 | pavail_ents = i; | |
1da177e4 | 1227 | } else { |
13edad7a DM |
1228 | pavail[i+1].reg_size = 0Ul; |
1229 | pavail[i+1].phys_addr = 0xdeadbeefUL; | |
1230 | pavail_ents = i + 1; | |
1da177e4 LT |
1231 | } |
1232 | break; | |
1233 | } | |
1234 | } | |
1235 | } | |
1236 | ||
1237 | *pages_avail = bytes_avail >> PAGE_SHIFT; | |
1238 | ||
1239 | /* Start with page aligned address of last symbol in kernel | |
1240 | * image. The kernel is hard mapped below PAGE_OFFSET in a | |
1241 | * 4MB locked TLB translation. | |
1242 | */ | |
1243 | start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT; | |
1244 | ||
1245 | bootmap_pfn = start_pfn; | |
1246 | ||
1247 | end_pfn = end_of_phys_memory >> PAGE_SHIFT; | |
1248 | ||
1249 | #ifdef CONFIG_BLK_DEV_INITRD | |
1250 | /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ | |
1251 | if (sparc_ramdisk_image || sparc_ramdisk_image64) { | |
1252 | unsigned long ramdisk_image = sparc_ramdisk_image ? | |
1253 | sparc_ramdisk_image : sparc_ramdisk_image64; | |
1254 | if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE) | |
1255 | ramdisk_image -= KERNBASE; | |
1256 | initrd_start = ramdisk_image + phys_base; | |
1257 | initrd_end = initrd_start + sparc_ramdisk_size; | |
1258 | if (initrd_end > end_of_phys_memory) { | |
1259 | printk(KERN_CRIT "initrd extends beyond end of memory " | |
1260 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", | |
1261 | initrd_end, end_of_phys_memory); | |
1262 | initrd_start = 0; | |
1263 | } | |
1264 | if (initrd_start) { | |
1265 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && | |
1266 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) | |
1267 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; | |
1268 | } | |
1269 | } | |
1270 | #endif | |
1271 | /* Initialize the boot-time allocator. */ | |
1272 | max_pfn = max_low_pfn = end_pfn; | |
1273 | min_low_pfn = pfn_base; | |
1274 | ||
1275 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1276 | prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", | |
1277 | min_low_pfn, bootmap_pfn, max_low_pfn); | |
1278 | #endif | |
1279 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); | |
1280 | ||
1da177e4 LT |
1281 | /* Now register the available physical memory with the |
1282 | * allocator. | |
1283 | */ | |
13edad7a | 1284 | for (i = 0; i < pavail_ents; i++) { |
1da177e4 | 1285 | #ifdef CONFIG_DEBUG_BOOTMEM |
13edad7a DM |
1286 | prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n", |
1287 | i, pavail[i].phys_addr, pavail[i].reg_size); | |
1da177e4 | 1288 | #endif |
13edad7a | 1289 | free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); |
1da177e4 LT |
1290 | } |
1291 | ||
1292 | #ifdef CONFIG_BLK_DEV_INITRD | |
1293 | if (initrd_start) { | |
1294 | size = initrd_end - initrd_start; | |
1295 | ||
1296 | /* Resert the initrd image area. */ | |
1297 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1298 | prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n", | |
1299 | initrd_start, initrd_end); | |
1300 | #endif | |
1301 | reserve_bootmem(initrd_start, size); | |
1302 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | |
1303 | ||
1304 | initrd_start += PAGE_OFFSET; | |
1305 | initrd_end += PAGE_OFFSET; | |
1306 | } | |
1307 | #endif | |
1308 | /* Reserve the kernel text/data/bss. */ | |
1309 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1310 | prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size); | |
1311 | #endif | |
1312 | reserve_bootmem(kern_base, kern_size); | |
1313 | *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; | |
1314 | ||
1315 | /* Reserve the bootmem map. We do not account for it | |
1316 | * in pages_avail because we will release that memory | |
1317 | * in free_all_bootmem. | |
1318 | */ | |
1319 | size = bootmap_size; | |
1320 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1321 | prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n", | |
1322 | (bootmap_pfn << PAGE_SHIFT), size); | |
1323 | #endif | |
1324 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); | |
1325 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | |
1326 | ||
1327 | return end_pfn; | |
1328 | } | |
1329 | ||
56425306 DM |
1330 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1331 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) | |
1332 | { | |
1333 | unsigned long vstart = PAGE_OFFSET + pstart; | |
1334 | unsigned long vend = PAGE_OFFSET + pend; | |
1335 | unsigned long alloc_bytes = 0UL; | |
1336 | ||
1337 | if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { | |
13edad7a | 1338 | prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", |
56425306 DM |
1339 | vstart, vend); |
1340 | prom_halt(); | |
1341 | } | |
1342 | ||
1343 | while (vstart < vend) { | |
1344 | unsigned long this_end, paddr = __pa(vstart); | |
1345 | pgd_t *pgd = pgd_offset_k(vstart); | |
1346 | pud_t *pud; | |
1347 | pmd_t *pmd; | |
1348 | pte_t *pte; | |
1349 | ||
1350 | pud = pud_offset(pgd, vstart); | |
1351 | if (pud_none(*pud)) { | |
1352 | pmd_t *new; | |
1353 | ||
1354 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1355 | alloc_bytes += PAGE_SIZE; | |
1356 | pud_populate(&init_mm, pud, new); | |
1357 | } | |
1358 | ||
1359 | pmd = pmd_offset(pud, vstart); | |
1360 | if (!pmd_present(*pmd)) { | |
1361 | pte_t *new; | |
1362 | ||
1363 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1364 | alloc_bytes += PAGE_SIZE; | |
1365 | pmd_populate_kernel(&init_mm, pmd, new); | |
1366 | } | |
1367 | ||
1368 | pte = pte_offset_kernel(pmd, vstart); | |
1369 | this_end = (vstart + PMD_SIZE) & PMD_MASK; | |
1370 | if (this_end > vend) | |
1371 | this_end = vend; | |
1372 | ||
1373 | while (vstart < this_end) { | |
1374 | pte_val(*pte) = (paddr | pgprot_val(prot)); | |
1375 | ||
1376 | vstart += PAGE_SIZE; | |
1377 | paddr += PAGE_SIZE; | |
1378 | pte++; | |
1379 | } | |
1380 | } | |
1381 | ||
1382 | return alloc_bytes; | |
1383 | } | |
1384 | ||
13edad7a DM |
1385 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; |
1386 | static int pall_ents __initdata; | |
1387 | ||
56425306 DM |
1388 | extern unsigned int kvmap_linear_patch[1]; |
1389 | ||
1390 | static void __init kernel_physical_mapping_init(void) | |
1391 | { | |
13edad7a | 1392 | unsigned long i, mem_alloced = 0UL; |
56425306 | 1393 | |
13edad7a DM |
1394 | read_obp_memory("reg", &pall[0], &pall_ents); |
1395 | ||
1396 | for (i = 0; i < pall_ents; i++) { | |
56425306 DM |
1397 | unsigned long phys_start, phys_end; |
1398 | ||
13edad7a DM |
1399 | phys_start = pall[i].phys_addr; |
1400 | phys_end = phys_start + pall[i].reg_size; | |
56425306 DM |
1401 | mem_alloced += kernel_map_range(phys_start, phys_end, |
1402 | PAGE_KERNEL); | |
56425306 DM |
1403 | } |
1404 | ||
1405 | printk("Allocated %ld bytes for kernel page tables.\n", | |
1406 | mem_alloced); | |
1407 | ||
1408 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | |
1409 | flushi(&kvmap_linear_patch[0]); | |
1410 | ||
1411 | __flush_tlb_all(); | |
1412 | } | |
1413 | ||
1414 | void kernel_map_pages(struct page *page, int numpages, int enable) | |
1415 | { | |
1416 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | |
1417 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | |
1418 | ||
1419 | kernel_map_range(phys_start, phys_end, | |
1420 | (enable ? PAGE_KERNEL : __pgprot(0))); | |
1421 | ||
1422 | /* we should perform an IPI and flush all tlbs, | |
1423 | * but that can deadlock->flush only current cpu. | |
1424 | */ | |
1425 | __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, | |
1426 | PAGE_OFFSET + phys_end); | |
1427 | } | |
1428 | #endif | |
1429 | ||
10147570 DM |
1430 | unsigned long __init find_ecache_flush_span(unsigned long size) |
1431 | { | |
0836a0eb DM |
1432 | int i; |
1433 | ||
13edad7a DM |
1434 | for (i = 0; i < pavail_ents; i++) { |
1435 | if (pavail[i].reg_size >= size) | |
1436 | return pavail[i].phys_addr; | |
0836a0eb DM |
1437 | } |
1438 | ||
13edad7a | 1439 | return ~0UL; |
0836a0eb DM |
1440 | } |
1441 | ||
1da177e4 LT |
1442 | /* paging_init() sets up the page tables */ |
1443 | ||
1444 | extern void cheetah_ecache_flush_init(void); | |
1445 | ||
1446 | static unsigned long last_valid_pfn; | |
56425306 | 1447 | pgd_t swapper_pg_dir[2048]; |
1da177e4 LT |
1448 | |
1449 | void __init paging_init(void) | |
1450 | { | |
2bdb3cb2 | 1451 | unsigned long end_pfn, pages_avail, shift; |
0836a0eb DM |
1452 | unsigned long real_end, i; |
1453 | ||
13edad7a DM |
1454 | /* Find available physical memory... */ |
1455 | read_obp_memory("available", &pavail[0], &pavail_ents); | |
0836a0eb DM |
1456 | |
1457 | phys_base = 0xffffffffffffffffUL; | |
13edad7a DM |
1458 | for (i = 0; i < pavail_ents; i++) |
1459 | phys_base = min(phys_base, pavail[i].phys_addr); | |
0836a0eb | 1460 | |
0836a0eb DM |
1461 | pfn_base = phys_base >> PAGE_SHIFT; |
1462 | ||
1463 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | |
1464 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | |
1da177e4 LT |
1465 | |
1466 | set_bit(0, mmu_context_bmap); | |
1467 | ||
2bdb3cb2 DM |
1468 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
1469 | ||
1da177e4 LT |
1470 | real_end = (unsigned long)_end; |
1471 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) | |
1472 | bigkernel = 1; | |
2bdb3cb2 DM |
1473 | if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { |
1474 | prom_printf("paging_init: Kernel > 8MB, too large.\n"); | |
1475 | prom_halt(); | |
1da177e4 | 1476 | } |
2bdb3cb2 DM |
1477 | |
1478 | /* Set kernel pgd to upper alias so physical page computations | |
1da177e4 LT |
1479 | * work. |
1480 | */ | |
1481 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); | |
1482 | ||
56425306 | 1483 | memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); |
1da177e4 LT |
1484 | |
1485 | /* Now can init the kernel/bad page tables. */ | |
1486 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | |
56425306 | 1487 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
1da177e4 | 1488 | |
2bdb3cb2 | 1489 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); |
1da177e4 | 1490 | |
c9c10830 | 1491 | inherit_prom_mappings(); |
5085b4a5 | 1492 | |
1da177e4 LT |
1493 | /* Ok, we can use our TLB miss and window trap handlers safely. |
1494 | * We need to do a quick peek here to see if we are on StarFire | |
1495 | * or not, so setup_tba can setup the IRQ globals correctly (it | |
1496 | * needs to get the hard smp processor id correctly). | |
1497 | */ | |
1498 | { | |
1499 | extern void setup_tba(int); | |
1500 | setup_tba(this_is_starfire); | |
1501 | } | |
1da177e4 | 1502 | |
c9c10830 DM |
1503 | inherit_locked_prom_mappings(1); |
1504 | ||
1505 | __flush_tlb_all(); | |
9ad98c5b | 1506 | |
2bdb3cb2 DM |
1507 | /* Setup bootmem... */ |
1508 | pages_avail = 0; | |
1509 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | |
1510 | ||
56425306 DM |
1511 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1512 | kernel_physical_mapping_init(); | |
1513 | #endif | |
1514 | ||
1da177e4 LT |
1515 | { |
1516 | unsigned long zones_size[MAX_NR_ZONES]; | |
1517 | unsigned long zholes_size[MAX_NR_ZONES]; | |
1518 | unsigned long npages; | |
1519 | int znum; | |
1520 | ||
1521 | for (znum = 0; znum < MAX_NR_ZONES; znum++) | |
1522 | zones_size[znum] = zholes_size[znum] = 0; | |
1523 | ||
1524 | npages = end_pfn - pfn_base; | |
1525 | zones_size[ZONE_DMA] = npages; | |
1526 | zholes_size[ZONE_DMA] = npages - pages_avail; | |
1527 | ||
1528 | free_area_init_node(0, &contig_page_data, zones_size, | |
1529 | phys_base >> PAGE_SHIFT, zholes_size); | |
1530 | } | |
1531 | ||
1532 | device_scan(); | |
1533 | } | |
1534 | ||
1da177e4 LT |
1535 | static void __init taint_real_pages(void) |
1536 | { | |
1da177e4 LT |
1537 | int i; |
1538 | ||
13edad7a | 1539 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); |
1da177e4 | 1540 | |
13edad7a | 1541 | /* Find changes discovered in the physmem available rescan and |
1da177e4 LT |
1542 | * reserve the lost portions in the bootmem maps. |
1543 | */ | |
13edad7a | 1544 | for (i = 0; i < pavail_ents; i++) { |
1da177e4 LT |
1545 | unsigned long old_start, old_end; |
1546 | ||
13edad7a | 1547 | old_start = pavail[i].phys_addr; |
1da177e4 | 1548 | old_end = old_start + |
13edad7a | 1549 | pavail[i].reg_size; |
1da177e4 LT |
1550 | while (old_start < old_end) { |
1551 | int n; | |
1552 | ||
13edad7a | 1553 | for (n = 0; pavail_rescan_ents; n++) { |
1da177e4 LT |
1554 | unsigned long new_start, new_end; |
1555 | ||
13edad7a DM |
1556 | new_start = pavail_rescan[n].phys_addr; |
1557 | new_end = new_start + | |
1558 | pavail_rescan[n].reg_size; | |
1da177e4 LT |
1559 | |
1560 | if (new_start <= old_start && | |
1561 | new_end >= (old_start + PAGE_SIZE)) { | |
13edad7a DM |
1562 | set_bit(old_start >> 22, |
1563 | sparc64_valid_addr_bitmap); | |
1da177e4 LT |
1564 | goto do_next_page; |
1565 | } | |
1566 | } | |
1567 | reserve_bootmem(old_start, PAGE_SIZE); | |
1568 | ||
1569 | do_next_page: | |
1570 | old_start += PAGE_SIZE; | |
1571 | } | |
1572 | } | |
1573 | } | |
1574 | ||
1575 | void __init mem_init(void) | |
1576 | { | |
1577 | unsigned long codepages, datapages, initpages; | |
1578 | unsigned long addr, last; | |
1579 | int i; | |
1580 | ||
1581 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); | |
1582 | i += 1; | |
2bdb3cb2 | 1583 | sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); |
1da177e4 LT |
1584 | if (sparc64_valid_addr_bitmap == NULL) { |
1585 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | |
1586 | prom_halt(); | |
1587 | } | |
1588 | memset(sparc64_valid_addr_bitmap, 0, i << 3); | |
1589 | ||
1590 | addr = PAGE_OFFSET + kern_base; | |
1591 | last = PAGE_ALIGN(kern_size) + addr; | |
1592 | while (addr < last) { | |
1593 | set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); | |
1594 | addr += PAGE_SIZE; | |
1595 | } | |
1596 | ||
1597 | taint_real_pages(); | |
1598 | ||
1599 | max_mapnr = last_valid_pfn - pfn_base; | |
1600 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | |
1601 | ||
1602 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1603 | prom_printf("mem_init: Calling free_all_bootmem().\n"); | |
1604 | #endif | |
1605 | totalram_pages = num_physpages = free_all_bootmem() - 1; | |
1606 | ||
1607 | /* | |
1608 | * Set up the zero page, mark it reserved, so that page count | |
1609 | * is not manipulated when freeing the page from user ptes. | |
1610 | */ | |
1611 | mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); | |
1612 | if (mem_map_zero == NULL) { | |
1613 | prom_printf("paging_init: Cannot alloc zero page.\n"); | |
1614 | prom_halt(); | |
1615 | } | |
1616 | SetPageReserved(mem_map_zero); | |
1617 | ||
1618 | codepages = (((unsigned long) _etext) - ((unsigned long) _start)); | |
1619 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | |
1620 | datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); | |
1621 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | |
1622 | initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); | |
1623 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | |
1624 | ||
1625 | printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", | |
1626 | nr_free_pages() << (PAGE_SHIFT-10), | |
1627 | codepages << (PAGE_SHIFT-10), | |
1628 | datapages << (PAGE_SHIFT-10), | |
1629 | initpages << (PAGE_SHIFT-10), | |
1630 | PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); | |
1631 | ||
1632 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
1633 | cheetah_ecache_flush_init(); | |
1634 | } | |
1635 | ||
898cf0ec | 1636 | void free_initmem(void) |
1da177e4 LT |
1637 | { |
1638 | unsigned long addr, initend; | |
1639 | ||
1640 | /* | |
1641 | * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. | |
1642 | */ | |
1643 | addr = PAGE_ALIGN((unsigned long)(__init_begin)); | |
1644 | initend = (unsigned long)(__init_end) & PAGE_MASK; | |
1645 | for (; addr < initend; addr += PAGE_SIZE) { | |
1646 | unsigned long page; | |
1647 | struct page *p; | |
1648 | ||
1649 | page = (addr + | |
1650 | ((unsigned long) __va(kern_base)) - | |
1651 | ((unsigned long) KERNBASE)); | |
1652 | memset((void *)addr, 0xcc, PAGE_SIZE); | |
1653 | p = virt_to_page(page); | |
1654 | ||
1655 | ClearPageReserved(p); | |
1656 | set_page_count(p, 1); | |
1657 | __free_page(p); | |
1658 | num_physpages++; | |
1659 | totalram_pages++; | |
1660 | } | |
1661 | } | |
1662 | ||
1663 | #ifdef CONFIG_BLK_DEV_INITRD | |
1664 | void free_initrd_mem(unsigned long start, unsigned long end) | |
1665 | { | |
1666 | if (start < end) | |
1667 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
1668 | for (; start < end; start += PAGE_SIZE) { | |
1669 | struct page *p = virt_to_page(start); | |
1670 | ||
1671 | ClearPageReserved(p); | |
1672 | set_page_count(p, 1); | |
1673 | __free_page(p); | |
1674 | num_physpages++; | |
1675 | totalram_pages++; | |
1676 | } | |
1677 | } | |
1678 | #endif |