include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / blackfin / mm / init.c
1 /*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7 #include <linux/gfp.h>
8 #include <linux/swap.h>
9 #include <linux/bootmem.h>
10 #include <linux/uaccess.h>
11 #include <asm/bfin-global.h>
12 #include <asm/pda.h>
13 #include <asm/cplbinit.h>
14 #include <asm/early_printk.h>
15 #include "blackfin_sram.h"
16
17 /*
18 * BAD_PAGE is the page that is used for page faults when linux
19 * is out-of-memory. Older versions of linux just did a
20 * do_exit(), but using this instead means there is less risk
21 * for a process dying in kernel mode, possibly leaving a inode
22 * unused etc..
23 *
24 * BAD_PAGETABLE is the accompanying page-table: it is initialized
25 * to point to BAD_PAGE entries.
26 *
27 * ZERO_PAGE is a special page that is used for zero-initialized
28 * data and COW.
29 */
30 static unsigned long empty_bad_page_table;
31
32 static unsigned long empty_bad_page;
33
34 static unsigned long empty_zero_page;
35
36 #ifndef CONFIG_EXCEPTION_L1_SCRATCH
37 #if defined CONFIG_SYSCALL_TAB_L1
38 __attribute__((l1_data))
39 #endif
40 static unsigned long exception_stack[NR_CPUS][1024];
41 #endif
42
43 struct blackfin_pda cpu_pda[NR_CPUS];
44 EXPORT_SYMBOL(cpu_pda);
45
46 /*
47 * paging_init() continues the virtual memory environment setup which
48 * was begun by the code in arch/head.S.
49 * The parameters are pointers to where to stick the starting and ending
50 * addresses of available kernel virtual memory.
51 */
52 void __init paging_init(void)
53 {
54 /*
55 * make sure start_mem is page aligned, otherwise bootmem and
56 * page_alloc get different views og the world
57 */
58 unsigned long end_mem = memory_end & PAGE_MASK;
59
60 pr_debug("start_mem is %#lx virtual_end is %#lx\n", PAGE_ALIGN(memory_start), end_mem);
61
62 /*
63 * initialize the bad page table and bad page to point
64 * to a couple of allocated pages
65 */
66 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
67 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
68 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
69 memset((void *)empty_zero_page, 0, PAGE_SIZE);
70
71 /*
72 * Set up SFC/DFC registers (user data space)
73 */
74 set_fs(KERNEL_DS);
75
76 pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n",
77 PAGE_ALIGN(memory_start), end_mem);
78
79 {
80 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
81
82 zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
83 zones_size[ZONE_NORMAL] = 0;
84 #ifdef CONFIG_HIGHMEM
85 zones_size[ZONE_HIGHMEM] = 0;
86 #endif
87 free_area_init(zones_size);
88 }
89 }
90
91 asmlinkage void __init init_pda(void)
92 {
93 unsigned int cpu = raw_smp_processor_id();
94
95 early_shadow_stamp();
96
97 /* Initialize the PDA fields holding references to other parts
98 of the memory. The content of such memory is still
99 undefined at the time of the call, we are only setting up
100 valid pointers to it. */
101 memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));
102
103 cpu_pda[0].next = &cpu_pda[1];
104 cpu_pda[1].next = &cpu_pda[0];
105
106 #ifdef CONFIG_EXCEPTION_L1_SCRATCH
107 cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \
108 L1_SCRATCH_LENGTH);
109 #else
110 cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
111 #endif
112
113 #ifdef CONFIG_SMP
114 cpu_pda[cpu].imask = 0x1f;
115 #endif
116 }
117
118 void __init mem_init(void)
119 {
120 unsigned int codek = 0, datak = 0, initk = 0;
121 unsigned int reservedpages = 0, freepages = 0;
122 unsigned long tmp;
123 unsigned long start_mem = memory_start;
124 unsigned long end_mem = memory_end;
125
126 end_mem &= PAGE_MASK;
127 high_memory = (void *)end_mem;
128
129 start_mem = PAGE_ALIGN(start_mem);
130 max_mapnr = num_physpages = MAP_NR(high_memory);
131 printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages);
132
133 /* This will put all memory onto the freelists. */
134 totalram_pages = free_all_bootmem();
135
136 reservedpages = 0;
137 for (tmp = 0; tmp < max_mapnr; tmp++)
138 if (PageReserved(pfn_to_page(tmp)))
139 reservedpages++;
140 freepages = max_mapnr - reservedpages;
141
142 /* do not count in kernel image between _rambase and _ramstart */
143 reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT;
144 #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
145 reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT;
146 #endif
147
148 codek = (_etext - _stext) >> 10;
149 initk = (__init_end - __init_begin) >> 10;
150 datak = ((_ramstart - _rambase) >> 10) - codek - initk;
151
152 printk(KERN_INFO
153 "Memory available: %luk/%luk RAM, "
154 "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n",
155 (unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10,
156 initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
157 }
158
159 static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
160 {
161 unsigned long addr;
162 /* next to check that the page we free is not a partial page */
163 for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
164 ClearPageReserved(virt_to_page(addr));
165 init_page_count(virt_to_page(addr));
166 free_page(addr);
167 totalram_pages++;
168 }
169 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
170 }
171
172 #ifdef CONFIG_BLK_DEV_INITRD
173 void __init free_initrd_mem(unsigned long start, unsigned long end)
174 {
175 #ifndef CONFIG_MPU
176 free_init_pages("initrd memory", start, end);
177 #endif
178 }
179 #endif
180
181 void __init_refok free_initmem(void)
182 {
183 #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
184 free_init_pages("unused kernel memory",
185 (unsigned long)(&__init_begin),
186 (unsigned long)(&__init_end));
187 #endif
188 }