include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / microblaze / mm / init.c
CommitLineData
a95d0e16
MS
1/*
2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2006 Atmark Techno, Inc.
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9
10#include <linux/bootmem.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/lmb.h>
14#include <linux/mm.h> /* mem_init */
15#include <linux/initrd.h>
16#include <linux/pagemap.h>
17#include <linux/pfn.h>
5a0e3ad6 18#include <linux/slab.h>
a95d0e16
MS
19#include <linux/swap.h>
20
21#include <asm/page.h>
22#include <asm/mmu_context.h>
23#include <asm/pgalloc.h>
24#include <asm/sections.h>
25#include <asm/tlb.h>
26
79bf3a13
MS
27/* Use for MMU and noMMU because of PCI generic code */
28int mem_init_done;
29
4dc60832 30#ifndef CONFIG_MMU
a95d0e16 31unsigned int __page_offset;
5af7fa68 32EXPORT_SYMBOL(__page_offset);
a95d0e16 33
4dc60832
MS
34#else
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
4dc60832
MS
37static int init_bootmem_done;
38#endif /* CONFIG_MMU */
39
a95d0e16
MS
40char *klimit = _end;
41
42/*
43 * Initialize the bootmem system and give it all the memory we
44 * have available.
45 */
4dc60832 46unsigned long memory_start;
fd6ed51f 47EXPORT_SYMBOL(memory_start);
4dc60832
MS
48unsigned long memory_end; /* due to mm/nommu.c */
49unsigned long memory_size;
a95d0e16
MS
50
51/*
52 * paging_init() sets up the page tables - in fact we've already done this.
53 */
54static void __init paging_init(void)
55{
a95d0e16
MS
56 unsigned long zones_size[MAX_NR_ZONES];
57
5af90438
SM
58 /* Clean every zones */
59 memset(zones_size, 0, sizeof(zones_size));
60
a95d0e16
MS
61 /*
62 * old: we can DMA to/from any address.put all page into ZONE_DMA
63 * We use only ZONE_NORMAL
64 */
65 zones_size[ZONE_NORMAL] = max_mapnr;
66
a95d0e16
MS
67 free_area_init(zones_size);
68}
69
70void __init setup_memory(void)
71{
72 int i;
73 unsigned long map_size;
4dc60832 74#ifndef CONFIG_MMU
a95d0e16
MS
75 u32 kernel_align_start, kernel_align_size;
76
77 /* Find main memory where is the kernel */
78 for (i = 0; i < lmb.memory.cnt; i++) {
79 memory_start = (u32) lmb.memory.region[i].base;
80 memory_end = (u32) lmb.memory.region[i].base
81 + (u32) lmb.memory.region[i].size;
82 if ((memory_start <= (u32)_text) &&
83 ((u32)_text <= memory_end)) {
84 memory_size = memory_end - memory_start;
85 PAGE_OFFSET = memory_start;
86 printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, "
db6e3f91
MS
87 "size 0x%08x\n", __func__, (u32) memory_start,
88 (u32) memory_end, (u32) memory_size);
a95d0e16
MS
89 break;
90 }
91 }
92
93 if (!memory_start || !memory_end) {
94 panic("%s: Missing memory setting 0x%08x-0x%08x\n",
db6e3f91 95 __func__, (u32) memory_start, (u32) memory_end);
a95d0e16
MS
96 }
97
98 /* reservation of region where is the kernel */
99 kernel_align_start = PAGE_DOWN((u32)_text);
100 /* ALIGN can be remove because _end in vmlinux.lds.S is align */
101 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
102 lmb_reserve(kernel_align_start, kernel_align_size);
103 printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
104 __func__, kernel_align_start, kernel_align_start
105 + kernel_align_size, kernel_align_size);
106
4dc60832 107#endif
a95d0e16
MS
108 /*
109 * Kernel:
110 * start: base phys address of kernel - page align
111 * end: base phys address of kernel - page align
112 *
113 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
114 * max_low_pfn
115 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
116 * num_physpages - number of all pages
117 */
118
119 /* memory start is from the kernel end (aligned) to higher addr */
120 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
121 /* RAM is assumed contiguous */
122 num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
123 max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT;
124
125 printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
126 printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
127 printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
128
129 /*
130 * Find an area to use for the bootmem bitmap.
131 * We look for the first area which is at least
132 * 128kB in length (128kB is enough for a bitmap
133 * for 4GB of memory, using 4kB pages), plus 1 page
134 * (in case the address isn't page-aligned).
135 */
4dc60832 136#ifndef CONFIG_MMU
8f37b6c9 137 map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)),
a95d0e16 138 min_low_pfn, max_low_pfn);
4dc60832
MS
139#else
140 map_size = init_bootmem_node(&contig_page_data,
8f37b6c9 141 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
4dc60832 142#endif
8f37b6c9 143 lmb_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
a95d0e16
MS
144
145 /* free bootmem is whole main memory */
146 free_bootmem(memory_start, memory_size);
147
148 /* reserve allocate blocks */
149 for (i = 0; i < lmb.reserved.cnt; i++) {
150 pr_debug("reserved %d - 0x%08x-0x%08x\n", i,
151 (u32) lmb.reserved.region[i].base,
152 (u32) lmb_size_bytes(&lmb.reserved, i));
153 reserve_bootmem(lmb.reserved.region[i].base,
154 lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT);
155 }
4dc60832
MS
156#ifdef CONFIG_MMU
157 init_bootmem_done = 1;
158#endif
a95d0e16
MS
159 paging_init();
160}
161
162void free_init_pages(char *what, unsigned long begin, unsigned long end)
163{
164 unsigned long addr;
165
166 for (addr = begin; addr < end; addr += PAGE_SIZE) {
167 ClearPageReserved(virt_to_page(addr));
168 init_page_count(virt_to_page(addr));
169 memset((void *)addr, 0xcc, PAGE_SIZE);
170 free_page(addr);
171 totalram_pages++;
172 }
173 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
174}
175
176#ifdef CONFIG_BLK_DEV_INITRD
177void free_initrd_mem(unsigned long start, unsigned long end)
178{
179 int pages = 0;
180 for (; start < end; start += PAGE_SIZE) {
181 ClearPageReserved(virt_to_page(start));
182 init_page_count(virt_to_page(start));
183 free_page(start);
184 totalram_pages++;
185 pages++;
186 }
d6f61770
LS
187 printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n",
188 (int)(pages * (PAGE_SIZE / 1024)));
a95d0e16
MS
189}
190#endif
191
192void free_initmem(void)
193{
194 free_init_pages("unused kernel memory",
195 (unsigned long)(&__init_begin),
196 (unsigned long)(&__init_end));
197}
198
a95d0e16
MS
199void __init mem_init(void)
200{
201 high_memory = (void *)__va(memory_end);
202 /* this will put all memory onto the freelists */
203 totalram_pages += free_all_bootmem();
204
205 printk(KERN_INFO "Memory: %luk/%luk available\n",
cc013a88 206 nr_free_pages() << (PAGE_SHIFT-10),
a95d0e16 207 num_physpages << (PAGE_SHIFT-10));
4dc60832 208 mem_init_done = 1;
a95d0e16
MS
209}
210
4dc60832 211#ifndef CONFIG_MMU
a95d0e16
MS
212/* Check against bounds of physical memory */
213int ___range_ok(unsigned long addr, unsigned long size)
214{
215 return ((addr < memory_start) ||
216 ((addr + size) > memory_end));
217}
5af7fa68 218EXPORT_SYMBOL(___range_ok);
4dc60832 219
79bf3a13
MS
220int page_is_ram(unsigned long pfn)
221{
222 return __range_ok(pfn, 0);
223}
4dc60832
MS
224#else
225int page_is_ram(unsigned long pfn)
226{
227 return pfn < max_low_pfn;
228}
229
230/*
231 * Check for command-line options that affect what MMU_init will do.
232 */
233static void mm_cmdline_setup(void)
234{
235 unsigned long maxmem = 0;
236 char *p = cmd_line;
237
238 /* Look for mem= option on command line */
239 p = strstr(cmd_line, "mem=");
240 if (p) {
241 p += 4;
242 maxmem = memparse(p, &p);
243 if (maxmem && memory_size > maxmem) {
244 memory_size = maxmem;
245 memory_end = memory_start + memory_size;
246 lmb.memory.region[0].size = memory_size;
247 }
248 }
249}
250
251/*
252 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
253 */
254static void __init mmu_init_hw(void)
255{
256 /*
257 * The Zone Protection Register (ZPR) defines how protection will
258 * be applied to every page which is a member of a given zone. At
259 * present, we utilize only two of the zones.
260 * The zone index bits (of ZSEL) in the PTE are used for software
261 * indicators, except the LSB. For user access, zone 1 is used,
262 * for kernel access, zone 0 is used. We set all but zone 1
263 * to zero, allowing only kernel access as indicated in the PTE.
264 * For zone 1, we set a 01 binary (a value of 10 will not work)
265 * to allow user access as indicated in the PTE. This also allows
266 * kernel access as indicated in the PTE.
267 */
268 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
269 "mts rzpr, r11;"
270 : : : "r11");
271}
272
273/*
274 * MMU_init sets up the basic memory mappings for the kernel,
275 * including both RAM and possibly some I/O regions,
276 * and sets up the page tables and the MMU hardware ready to go.
277 */
278
279/* called from head.S */
280asmlinkage void __init mmu_init(void)
281{
282 unsigned int kstart, ksize;
283
284 if (!lmb.reserved.cnt) {
285 printk(KERN_EMERG "Error memory count\n");
286 machine_restart(NULL);
287 }
288
289 if ((u32) lmb.memory.region[0].size < 0x1000000) {
290 printk(KERN_EMERG "Memory must be greater than 16MB\n");
291 machine_restart(NULL);
292 }
293 /* Find main memory where the kernel is */
294 memory_start = (u32) lmb.memory.region[0].base;
295 memory_end = (u32) lmb.memory.region[0].base +
296 (u32) lmb.memory.region[0].size;
297 memory_size = memory_end - memory_start;
298
299 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
300
301 /*
302 * Map out the kernel text/data/bss from the available physical
303 * memory.
304 */
305 kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
306 /* kernel size */
307 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
308 lmb_reserve(kstart, ksize);
309
310#if defined(CONFIG_BLK_DEV_INITRD)
311 /* Remove the init RAM disk from the available memory. */
312/* if (initrd_start) {
313 mem_pieces_remove(&phys_avail, __pa(initrd_start),
314 initrd_end - initrd_start, 1);
315 }*/
316#endif /* CONFIG_BLK_DEV_INITRD */
317
318 /* Initialize the MMU hardware */
319 mmu_init_hw();
320
321 /* Map in all of RAM starting at CONFIG_KERNEL_START */
322 mapin_ram();
323
324#ifdef HIGHMEM_START_BOOL
325 ioremap_base = HIGHMEM_START;
326#else
327 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
328#endif /* CONFIG_HIGHMEM */
329 ioremap_bot = ioremap_base;
330
331 /* Initialize the context management stuff */
332 mmu_context_init();
333}
334
335/* This is only called until mem_init is done. */
336void __init *early_get_page(void)
337{
338 void *p;
339 if (init_bootmem_done) {
340 p = alloc_bootmem_pages(PAGE_SIZE);
341 } else {
342 /*
343 * Mem start + 32MB -> here is limit
344 * because of mem mapping from head.S
345 */
346 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
347 memory_start + 0x2000000));
348 }
349 return p;
350}
a84642a3 351
79bf3a13
MS
352#endif /* CONFIG_MMU */
353
a84642a3
MS
354void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
355{
356 if (mem_init_done)
357 return kmalloc(size, mask);
358 else
359 return alloc_bootmem(size);
360}
361
362void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
363{
364 void *p;
365
366 if (mem_init_done)
367 p = kzalloc(size, mask);
368 else {
369 p = alloc_bootmem(size);
370 if (p)
371 memset(p, 0, size);
372 }
373 return p;
374}