x86: clean up pte_exec
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / mm / init_32.c
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
8
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
33
34 #include <asm/processor.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/pgtable.h>
38 #include <asm/dma.h>
39 #include <asm/fixmap.h>
40 #include <asm/e820.h>
41 #include <asm/apic.h>
42 #include <asm/tlb.h>
43 #include <asm/tlbflush.h>
44 #include <asm/pgalloc.h>
45 #include <asm/sections.h>
46 #include <asm/paravirt.h>
47
48 unsigned int __VMALLOC_RESERVE = 128 << 20;
49
50 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
51 unsigned long highstart_pfn, highend_pfn;
52
53 static int noinline do_test_wp_bit(void);
54
55 /*
56 * Creates a middle page table and puts a pointer to it in the
57 * given global directory entry. This only returns the gd entry
58 * in non-PAE compilation mode, since the middle layer is folded.
59 */
60 static pmd_t * __init one_md_table_init(pgd_t *pgd)
61 {
62 pud_t *pud;
63 pmd_t *pmd_table;
64
65 #ifdef CONFIG_X86_PAE
66 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
67 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
68
69 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
70 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
71 pud = pud_offset(pgd, 0);
72 if (pmd_table != pmd_offset(pud, 0))
73 BUG();
74 }
75 #endif
76 pud = pud_offset(pgd, 0);
77 pmd_table = pmd_offset(pud, 0);
78 return pmd_table;
79 }
80
81 /*
82 * Create a page table and place a pointer to it in a middle page
83 * directory entry.
84 */
85 static pte_t * __init one_page_table_init(pmd_t *pmd)
86 {
87 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
88 pte_t *page_table = NULL;
89
90 #ifdef CONFIG_DEBUG_PAGEALLOC
91 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
92 #endif
93 if (!page_table)
94 page_table =
95 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
96
97 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
98 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
99 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
100 }
101
102 return pte_offset_kernel(pmd, 0);
103 }
104
105 /*
106 * This function initializes a certain range of kernel virtual memory
107 * with new bootmem page tables, everywhere page tables are missing in
108 * the given range.
109 */
110
111 /*
112 * NOTE: The pagetables are allocated contiguous on the physical space
113 * so we can cache the place of the first one and move around without
114 * checking the pgd every time.
115 */
116 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
117 {
118 pgd_t *pgd;
119 pmd_t *pmd;
120 int pgd_idx, pmd_idx;
121 unsigned long vaddr;
122
123 vaddr = start;
124 pgd_idx = pgd_index(vaddr);
125 pmd_idx = pmd_index(vaddr);
126 pgd = pgd_base + pgd_idx;
127
128 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
129 pmd = one_md_table_init(pgd);
130 pmd = pmd + pmd_index(vaddr);
131 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
132 one_page_table_init(pmd);
133
134 vaddr += PMD_SIZE;
135 }
136 pmd_idx = 0;
137 }
138 }
139
140 static inline int is_kernel_text(unsigned long addr)
141 {
142 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
143 return 1;
144 return 0;
145 }
146
147 /*
148 * This maps the physical memory to kernel virtual address space, a total
149 * of max_low_pfn pages, by creating page tables starting from address
150 * PAGE_OFFSET.
151 */
152 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
153 {
154 unsigned long pfn;
155 pgd_t *pgd;
156 pmd_t *pmd;
157 pte_t *pte;
158 int pgd_idx, pmd_idx, pte_ofs;
159
160 pgd_idx = pgd_index(PAGE_OFFSET);
161 pgd = pgd_base + pgd_idx;
162 pfn = 0;
163
164 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
165 pmd = one_md_table_init(pgd);
166 if (pfn >= max_low_pfn)
167 continue;
168 for (pmd_idx = 0;
169 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
170 pmd++, pmd_idx++) {
171 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
172
173 /* Map with big pages if possible, otherwise
174 create normal page tables. */
175 if (cpu_has_pse) {
176 unsigned int address2;
177 pgprot_t prot = PAGE_KERNEL_LARGE;
178
179 address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE +
180 PAGE_OFFSET + PAGE_SIZE-1;
181
182 if (is_kernel_text(address) ||
183 is_kernel_text(address2))
184 prot = PAGE_KERNEL_LARGE_EXEC;
185
186 set_pmd(pmd, pfn_pmd(pfn, prot));
187
188 pfn += PTRS_PER_PTE;
189 } else {
190 pte = one_page_table_init(pmd);
191
192 for (pte_ofs = 0;
193 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
194 pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
195 pgprot_t prot = PAGE_KERNEL;
196
197 if (is_kernel_text(address))
198 prot = PAGE_KERNEL_EXEC;
199
200 set_pte(pte, pfn_pte(pfn, prot));
201 }
202 }
203 }
204 }
205 }
206
207 static inline int page_kills_ppro(unsigned long pagenr)
208 {
209 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
210 return 1;
211 return 0;
212 }
213
214 int page_is_ram(unsigned long pagenr)
215 {
216 int i;
217 unsigned long addr, end;
218
219 for (i = 0; i < e820.nr_map; i++) {
220
221 if (e820.map[i].type != E820_RAM) /* not usable memory */
222 continue;
223 /*
224 * !!!FIXME!!! Some BIOSen report areas as RAM that
225 * are not. Notably the 640->1Mb area. We need a sanity
226 * check here.
227 */
228 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
229 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
230 if ((pagenr >= addr) && (pagenr < end))
231 return 1;
232 }
233 return 0;
234 }
235
236 #ifdef CONFIG_HIGHMEM
237 pte_t *kmap_pte;
238 pgprot_t kmap_prot;
239
240 #define kmap_get_fixmap_pte(vaddr) \
241 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
242
243 static void __init kmap_init(void)
244 {
245 unsigned long kmap_vstart;
246
247 /* cache the first kmap pte */
248 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
249 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
250
251 kmap_prot = PAGE_KERNEL;
252 }
253
254 static void __init permanent_kmaps_init(pgd_t *pgd_base)
255 {
256 pgd_t *pgd;
257 pud_t *pud;
258 pmd_t *pmd;
259 pte_t *pte;
260 unsigned long vaddr;
261
262 vaddr = PKMAP_BASE;
263 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
264
265 pgd = swapper_pg_dir + pgd_index(vaddr);
266 pud = pud_offset(pgd, vaddr);
267 pmd = pmd_offset(pud, vaddr);
268 pte = pte_offset_kernel(pmd, vaddr);
269 pkmap_page_table = pte;
270 }
271
272 static void __meminit free_new_highpage(struct page *page)
273 {
274 init_page_count(page);
275 __free_page(page);
276 totalhigh_pages++;
277 }
278
279 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
280 {
281 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
282 ClearPageReserved(page);
283 free_new_highpage(page);
284 } else
285 SetPageReserved(page);
286 }
287
288 static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
289 {
290 free_new_highpage(page);
291 totalram_pages++;
292 #ifdef CONFIG_FLATMEM
293 max_mapnr = max(pfn, max_mapnr);
294 #endif
295 num_physpages++;
296 return 0;
297 }
298
299 /*
300 * Not currently handling the NUMA case.
301 * Assuming single node and all memory that
302 * has been added dynamically that would be
303 * onlined here is in HIGHMEM
304 */
305 void __meminit online_page(struct page *page)
306 {
307 ClearPageReserved(page);
308 add_one_highpage_hotplug(page, page_to_pfn(page));
309 }
310
311
312 #ifdef CONFIG_NUMA
313 extern void set_highmem_pages_init(int);
314 #else
315 static void __init set_highmem_pages_init(int bad_ppro)
316 {
317 int pfn;
318 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
319 /*
320 * Holes under sparsemem might not have no mem_map[]:
321 */
322 if (pfn_valid(pfn))
323 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
324 }
325 totalram_pages += totalhigh_pages;
326 }
327 #endif /* CONFIG_FLATMEM */
328
329 #else
330 #define kmap_init() do { } while (0)
331 #define permanent_kmaps_init(pgd_base) do { } while (0)
332 #define set_highmem_pages_init(bad_ppro) do { } while (0)
333 #endif /* CONFIG_HIGHMEM */
334
335 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
336 EXPORT_SYMBOL(__PAGE_KERNEL);
337 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
338
339 #ifdef CONFIG_NUMA
340 extern void __init remap_numa_kva(void);
341 #else
342 #define remap_numa_kva() do {} while (0)
343 #endif
344
345 void __init native_pagetable_setup_start(pgd_t *base)
346 {
347 #ifdef CONFIG_X86_PAE
348 int i;
349
350 /*
351 * Init entries of the first-level page table to the
352 * zero page, if they haven't already been set up.
353 *
354 * In a normal native boot, we'll be running on a
355 * pagetable rooted in swapper_pg_dir, but not in PAE
356 * mode, so this will end up clobbering the mappings
357 * for the lower 24Mbytes of the address space,
358 * without affecting the kernel address space.
359 */
360 for (i = 0; i < USER_PTRS_PER_PGD; i++)
361 set_pgd(&base[i],
362 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
363
364 /* Make sure kernel address space is empty so that a pagetable
365 will be allocated for it. */
366 memset(&base[USER_PTRS_PER_PGD], 0,
367 KERNEL_PGD_PTRS * sizeof(pgd_t));
368 #else
369 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
370 #endif
371 }
372
373 void __init native_pagetable_setup_done(pgd_t *base)
374 {
375 #ifdef CONFIG_X86_PAE
376 /*
377 * Add low memory identity-mappings - SMP needs it when
378 * starting up on an AP from real-mode. In the non-PAE
379 * case we already have these mappings through head.S.
380 * All user-space mappings are explicitly cleared after
381 * SMP startup.
382 */
383 set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
384 #endif
385 }
386
387 /*
388 * Build a proper pagetable for the kernel mappings. Up until this
389 * point, we've been running on some set of pagetables constructed by
390 * the boot process.
391 *
392 * If we're booting on native hardware, this will be a pagetable
393 * constructed in arch/i386/kernel/head.S, and not running in PAE mode
394 * (even if we'll end up running in PAE). The root of the pagetable
395 * will be swapper_pg_dir.
396 *
397 * If we're booting paravirtualized under a hypervisor, then there are
398 * more options: we may already be running PAE, and the pagetable may
399 * or may not be based in swapper_pg_dir. In any case,
400 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
401 * appropriately for the rest of the initialization to work.
402 *
403 * In general, pagetable_init() assumes that the pagetable may already
404 * be partially populated, and so it avoids stomping on any existing
405 * mappings.
406 */
407 static void __init pagetable_init (void)
408 {
409 unsigned long vaddr, end;
410 pgd_t *pgd_base = swapper_pg_dir;
411
412 paravirt_pagetable_setup_start(pgd_base);
413
414 /* Enable PSE if available */
415 if (cpu_has_pse)
416 set_in_cr4(X86_CR4_PSE);
417
418 /* Enable PGE if available */
419 if (cpu_has_pge) {
420 set_in_cr4(X86_CR4_PGE);
421 __PAGE_KERNEL |= _PAGE_GLOBAL;
422 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
423 }
424
425 kernel_physical_mapping_init(pgd_base);
426 remap_numa_kva();
427
428 /*
429 * Fixed mappings, only the page table structure has to be
430 * created - mappings will be set by set_fixmap():
431 */
432 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
433 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
434 page_table_range_init(vaddr, end, pgd_base);
435
436 permanent_kmaps_init(pgd_base);
437
438 paravirt_pagetable_setup_done(pgd_base);
439 }
440
441 #if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
442 /*
443 * Swap suspend & friends need this for resume because things like the intel-agp
444 * driver might have split up a kernel 4MB mapping.
445 */
446 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
447 __attribute__ ((aligned (PAGE_SIZE)));
448
449 static inline void save_pg_dir(void)
450 {
451 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
452 }
453 #else
454 static inline void save_pg_dir(void)
455 {
456 }
457 #endif
458
459 void zap_low_mappings (void)
460 {
461 int i;
462
463 save_pg_dir();
464
465 /*
466 * Zap initial low-memory mappings.
467 *
468 * Note that "pgd_clear()" doesn't do it for
469 * us, because pgd_clear() is a no-op on i386.
470 */
471 for (i = 0; i < USER_PTRS_PER_PGD; i++)
472 #ifdef CONFIG_X86_PAE
473 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
474 #else
475 set_pgd(swapper_pg_dir+i, __pgd(0));
476 #endif
477 flush_tlb_all();
478 }
479
480 int nx_enabled = 0;
481
482 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
483 EXPORT_SYMBOL_GPL(__supported_pte_mask);
484
485 #ifdef CONFIG_X86_PAE
486
487 static int disable_nx __initdata = 0;
488
489 /*
490 * noexec = on|off
491 *
492 * Control non executable mappings.
493 *
494 * on Enable
495 * off Disable
496 */
497 static int __init noexec_setup(char *str)
498 {
499 if (!str || !strcmp(str, "on")) {
500 if (cpu_has_nx) {
501 __supported_pte_mask |= _PAGE_NX;
502 disable_nx = 0;
503 }
504 } else if (!strcmp(str,"off")) {
505 disable_nx = 1;
506 __supported_pte_mask &= ~_PAGE_NX;
507 } else
508 return -EINVAL;
509
510 return 0;
511 }
512 early_param("noexec", noexec_setup);
513
514 static void __init set_nx(void)
515 {
516 unsigned int v[4], l, h;
517
518 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
519 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
520 if ((v[3] & (1 << 20)) && !disable_nx) {
521 rdmsr(MSR_EFER, l, h);
522 l |= EFER_NX;
523 wrmsr(MSR_EFER, l, h);
524 nx_enabled = 1;
525 __supported_pte_mask |= _PAGE_NX;
526 }
527 }
528 }
529
530 /*
531 * Enables/disables executability of a given kernel page and
532 * returns the previous setting.
533 */
534 int __init set_kernel_exec(unsigned long vaddr, int enable)
535 {
536 pte_t *pte;
537 int ret = 1;
538
539 if (!nx_enabled)
540 goto out;
541
542 pte = lookup_address(vaddr);
543 BUG_ON(!pte);
544
545 if (!pte_exec(*pte))
546 ret = 0;
547
548 if (enable)
549 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
550 else
551 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
552 pte_update_defer(&init_mm, vaddr, pte);
553 __flush_tlb_all();
554 out:
555 return ret;
556 }
557
558 #endif
559
560 /*
561 * paging_init() sets up the page tables - note that the first 8MB are
562 * already mapped by head.S.
563 *
564 * This routines also unmaps the page at virtual kernel address 0, so
565 * that we can trap those pesky NULL-reference errors in the kernel.
566 */
567 void __init paging_init(void)
568 {
569 #ifdef CONFIG_X86_PAE
570 set_nx();
571 if (nx_enabled)
572 printk("NX (Execute Disable) protection: active\n");
573 #endif
574
575 pagetable_init();
576
577 load_cr3(swapper_pg_dir);
578
579 #ifdef CONFIG_X86_PAE
580 /*
581 * We will bail out later - printk doesn't work right now so
582 * the user would just see a hanging kernel.
583 */
584 if (cpu_has_pae)
585 set_in_cr4(X86_CR4_PAE);
586 #endif
587 __flush_tlb_all();
588
589 kmap_init();
590 }
591
592 /*
593 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
594 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
595 * used to involve black magic jumps to work around some nasty CPU bugs,
596 * but fortunately the switch to using exceptions got rid of all that.
597 */
598
599 static void __init test_wp_bit(void)
600 {
601 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
602
603 /* Any page-aligned address will do, the test is non-destructive */
604 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
605 boot_cpu_data.wp_works_ok = do_test_wp_bit();
606 clear_fixmap(FIX_WP_TEST);
607
608 if (!boot_cpu_data.wp_works_ok) {
609 printk("No.\n");
610 #ifdef CONFIG_X86_WP_WORKS_OK
611 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
612 #endif
613 } else {
614 printk("Ok.\n");
615 }
616 }
617
618 static struct kcore_list kcore_mem, kcore_vmalloc;
619
620 void __init mem_init(void)
621 {
622 extern int ppro_with_ram_bug(void);
623 int codesize, reservedpages, datasize, initsize;
624 int tmp;
625 int bad_ppro;
626
627 #ifdef CONFIG_FLATMEM
628 BUG_ON(!mem_map);
629 #endif
630
631 bad_ppro = ppro_with_ram_bug();
632
633 #ifdef CONFIG_HIGHMEM
634 /* check that fixmap and pkmap do not overlap */
635 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
636 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
637 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
638 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
639 BUG();
640 }
641 #endif
642
643 /* this will put all low memory onto the freelists */
644 totalram_pages += free_all_bootmem();
645
646 reservedpages = 0;
647 for (tmp = 0; tmp < max_low_pfn; tmp++)
648 /*
649 * Only count reserved RAM pages
650 */
651 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
652 reservedpages++;
653
654 set_highmem_pages_init(bad_ppro);
655
656 codesize = (unsigned long) &_etext - (unsigned long) &_text;
657 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
658 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
659
660 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
661 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
662 VMALLOC_END-VMALLOC_START);
663
664 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
665 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
666 num_physpages << (PAGE_SHIFT-10),
667 codesize >> 10,
668 reservedpages << (PAGE_SHIFT-10),
669 datasize >> 10,
670 initsize >> 10,
671 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
672 );
673
674 #if 1 /* double-sanity-check paranoia */
675 printk("virtual kernel memory layout:\n"
676 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
677 #ifdef CONFIG_HIGHMEM
678 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
679 #endif
680 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
681 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
682 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
683 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
684 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
685 FIXADDR_START, FIXADDR_TOP,
686 (FIXADDR_TOP - FIXADDR_START) >> 10,
687
688 #ifdef CONFIG_HIGHMEM
689 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
690 (LAST_PKMAP*PAGE_SIZE) >> 10,
691 #endif
692
693 VMALLOC_START, VMALLOC_END,
694 (VMALLOC_END - VMALLOC_START) >> 20,
695
696 (unsigned long)__va(0), (unsigned long)high_memory,
697 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
698
699 (unsigned long)&__init_begin, (unsigned long)&__init_end,
700 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
701
702 (unsigned long)&_etext, (unsigned long)&_edata,
703 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
704
705 (unsigned long)&_text, (unsigned long)&_etext,
706 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
707
708 #ifdef CONFIG_HIGHMEM
709 BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
710 BUG_ON(VMALLOC_END > PKMAP_BASE);
711 #endif
712 BUG_ON(VMALLOC_START > VMALLOC_END);
713 BUG_ON((unsigned long)high_memory > VMALLOC_START);
714 #endif /* double-sanity-check paranoia */
715
716 #ifdef CONFIG_X86_PAE
717 if (!cpu_has_pae)
718 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
719 #endif
720 if (boot_cpu_data.wp_works_ok < 0)
721 test_wp_bit();
722
723 /*
724 * Subtle. SMP is doing it's boot stuff late (because it has to
725 * fork idle threads) - but it also needs low mappings for the
726 * protected-mode entry to work. We zap these entries only after
727 * the WP-bit has been tested.
728 */
729 #ifndef CONFIG_SMP
730 zap_low_mappings();
731 #endif
732 }
733
734 #ifdef CONFIG_MEMORY_HOTPLUG
735 int arch_add_memory(int nid, u64 start, u64 size)
736 {
737 struct pglist_data *pgdata = NODE_DATA(nid);
738 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
739 unsigned long start_pfn = start >> PAGE_SHIFT;
740 unsigned long nr_pages = size >> PAGE_SHIFT;
741
742 return __add_pages(zone, start_pfn, nr_pages);
743 }
744
745 #endif
746
747 struct kmem_cache *pmd_cache;
748
749 void __init pgtable_cache_init(void)
750 {
751 if (PTRS_PER_PMD > 1)
752 pmd_cache = kmem_cache_create("pmd",
753 PTRS_PER_PMD*sizeof(pmd_t),
754 PTRS_PER_PMD*sizeof(pmd_t),
755 SLAB_PANIC,
756 pmd_ctor);
757 }
758
759 /*
760 * This function cannot be __init, since exceptions don't work in that
761 * section. Put this after the callers, so that it cannot be inlined.
762 */
763 static int noinline do_test_wp_bit(void)
764 {
765 char tmp_reg;
766 int flag;
767
768 __asm__ __volatile__(
769 " movb %0,%1 \n"
770 "1: movb %1,%0 \n"
771 " xorl %2,%2 \n"
772 "2: \n"
773 ".section __ex_table,\"a\"\n"
774 " .align 4 \n"
775 " .long 1b,2b \n"
776 ".previous \n"
777 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
778 "=q" (tmp_reg),
779 "=r" (flag)
780 :"2" (1)
781 :"memory");
782
783 return flag;
784 }
785
786 #ifdef CONFIG_DEBUG_RODATA
787
788 void mark_rodata_ro(void)
789 {
790 unsigned long start = PFN_ALIGN(_text);
791 unsigned long size = PFN_ALIGN(_etext) - start;
792
793 #ifndef CONFIG_KPROBES
794 #ifdef CONFIG_HOTPLUG_CPU
795 /* It must still be possible to apply SMP alternatives. */
796 if (num_possible_cpus() <= 1)
797 #endif
798 {
799 change_page_attr(virt_to_page(start),
800 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
801 printk("Write protecting the kernel text: %luk\n", size >> 10);
802
803 #ifdef CONFIG_CPA_DEBUG
804 global_flush_tlb();
805
806 printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
807 change_page_attr(virt_to_page(start), size>>PAGE_SHIFT,
808 PAGE_KERNEL_EXEC);
809 global_flush_tlb();
810
811 printk("Testing CPA: write protecting again\n");
812 change_page_attr(virt_to_page(start), size>>PAGE_SHIFT,
813 PAGE_KERNEL_RX);
814 global_flush_tlb();
815 #endif
816 }
817 #endif
818 start += size;
819 size = (unsigned long)__end_rodata - start;
820 change_page_attr(virt_to_page(start),
821 size >> PAGE_SHIFT, PAGE_KERNEL_RO);
822 printk("Write protecting the kernel read-only data: %luk\n",
823 size >> 10);
824
825 /*
826 * change_page_attr() requires a global_flush_tlb() call after it.
827 * We do this after the printk so that if something went wrong in the
828 * change, the printk gets out at least to give a better debug hint
829 * of who is the culprit.
830 */
831 global_flush_tlb();
832
833 #ifdef CONFIG_CPA_DEBUG
834 printk("Testing CPA: undo %lx-%lx\n", start, start + size);
835 change_page_attr(virt_to_page(start), size >> PAGE_SHIFT,
836 PAGE_KERNEL);
837 global_flush_tlb();
838
839 printk("Testing CPA: write protecting again\n");
840 change_page_attr(virt_to_page(start), size >> PAGE_SHIFT,
841 PAGE_KERNEL_RO);
842 global_flush_tlb();
843 #endif
844 }
845 #endif
846
847 void free_init_pages(char *what, unsigned long begin, unsigned long end)
848 {
849 unsigned long addr;
850
851 for (addr = begin; addr < end; addr += PAGE_SIZE) {
852 ClearPageReserved(virt_to_page(addr));
853 init_page_count(virt_to_page(addr));
854 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
855 free_page(addr);
856 totalram_pages++;
857 }
858 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
859 }
860
861 void free_initmem(void)
862 {
863 free_init_pages("unused kernel memory",
864 (unsigned long)(&__init_begin),
865 (unsigned long)(&__init_end));
866 }
867
868 #ifdef CONFIG_BLK_DEV_INITRD
869 void free_initrd_mem(unsigned long start, unsigned long end)
870 {
871 free_init_pages("initrd memory", start, end);
872 }
873 #endif
874