Add GPIO support for HTC Dream.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/init.c
3 *
90072059 4 * Copyright (C) 1995-2005 Russell King
1da177e4
LT
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/kernel.h>
11#include <linux/errno.h>
1da177e4
LT
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/nodemask.h>
17#include <linux/initrd.h>
3835f6cb 18#include <linux/highmem.h>
5a0e3ad6 19#include <linux/gfp.h>
1da177e4
LT
20
21#include <asm/mach-types.h>
37efe642 22#include <asm/sections.h>
1da177e4 23#include <asm/setup.h>
74d02fb9 24#include <asm/sizes.h>
1da177e4 25#include <asm/tlb.h>
db9ef1af 26#include <asm/fixmap.h>
1da177e4
LT
27
28#include <asm/mach/arch.h>
29#include <asm/mach/map.h>
30
1b2e2b73
RK
31#include "mm.h"
32
012d1f4a
RK
33static unsigned long phys_initrd_start __initdata = 0;
34static unsigned long phys_initrd_size __initdata = 0;
35
2b0d8c25 36static int __init early_initrd(char *p)
012d1f4a
RK
37{
38 unsigned long start, size;
2b0d8c25 39 char *endp;
012d1f4a 40
2b0d8c25
JK
41 start = memparse(p, &endp);
42 if (*endp == ',') {
43 size = memparse(endp + 1, NULL);
012d1f4a
RK
44
45 phys_initrd_start = start;
46 phys_initrd_size = size;
47 }
2b0d8c25 48 return 0;
012d1f4a 49}
2b0d8c25 50early_param("initrd", early_initrd);
012d1f4a
RK
51
52static int __init parse_tag_initrd(const struct tag *tag)
53{
54 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
55 "please update your bootloader.\n");
56 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
57 phys_initrd_size = tag->u.initrd.size;
58 return 0;
59}
60
61__tagtable(ATAG_INITRD, parse_tag_initrd);
62
63static int __init parse_tag_initrd2(const struct tag *tag)
64{
65 phys_initrd_start = tag->u.initrd.start;
66 phys_initrd_size = tag->u.initrd.size;
67 return 0;
68}
69
70__tagtable(ATAG_INITRD2, parse_tag_initrd2);
1da177e4
LT
71
72/*
4b5f32ce
NP
73 * This keeps memory configuration data used by a couple memory
74 * initialization functions, as well as show_mem() for the skipping
75 * of holes in the memory map. It is populated by arm_add_memory().
1da177e4 76 */
4b5f32ce 77struct meminfo meminfo;
5e709827 78
1da177e4
LT
79void show_mem(void)
80{
81 int free = 0, total = 0, reserved = 0;
5e709827
RL
82 int shared = 0, cached = 0, slab = 0, node, i;
83 struct meminfo * mi = &meminfo;
1da177e4
LT
84
85 printk("Mem-info:\n");
86 show_free_areas();
1da177e4 87 for_each_online_node(node) {
5e709827 88 for_each_nodebank (i,mi,node) {
d2a38ef9 89 struct membank *bank = &mi->bank[i];
5e709827
RL
90 unsigned int pfn1, pfn2;
91 struct page *page, *end;
92
d2a38ef9
RK
93 pfn1 = bank_pfn_start(bank);
94 pfn2 = bank_pfn_end(bank);
5e709827 95
ea056df7
CM
96 page = pfn_to_page(pfn1);
97 end = pfn_to_page(pfn2 - 1) + 1;
5e709827
RL
98
99 do {
100 total++;
101 if (PageReserved(page))
102 reserved++;
103 else if (PageSwapCache(page))
104 cached++;
105 else if (PageSlab(page))
106 slab++;
107 else if (!page_count(page))
108 free++;
109 else
110 shared += page_count(page) - 1;
111 page++;
112 } while (page < end);
113 }
1da177e4
LT
114 }
115
116 printk("%d pages of RAM\n", total);
117 printk("%d free pages\n", free);
118 printk("%d reserved pages\n", reserved);
119 printk("%d slab pages\n", slab);
120 printk("%d pages shared\n", shared);
121 printk("%d pages swap cached\n", cached);
122}
123
dde5828f
RK
124static void __init find_node_limits(int node, struct meminfo *mi,
125 unsigned long *min, unsigned long *max_low, unsigned long *max_high)
126{
127 int i;
128
129 *min = -1UL;
130 *max_low = *max_high = 0;
131
132 for_each_nodebank(i, mi, node) {
133 struct membank *bank = &mi->bank[i];
134 unsigned long start, end;
135
136 start = bank_pfn_start(bank);
137 end = bank_pfn_end(bank);
138
139 if (*min > start)
140 *min = start;
141 if (*max_high < end)
142 *max_high = end;
143 if (bank->highmem)
144 continue;
145 if (*max_low < end)
146 *max_low = end;
147 }
148}
149
1da177e4
LT
150/*
151 * FIXME: We really want to avoid allocating the bootmap bitmap
152 * over the top of the initrd. Hopefully, this is located towards
153 * the start of a bank, so if we allocate the bootmap bitmap at
154 * the end, we won't clash.
155 */
156static unsigned int __init
157find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
158{
d2a38ef9 159 unsigned int start_pfn, i, bootmap_pfn;
1da177e4 160
37efe642 161 start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT;
1da177e4
LT
162 bootmap_pfn = 0;
163
d2a38ef9
RK
164 for_each_nodebank(i, mi, node) {
165 struct membank *bank = &mi->bank[i];
1da177e4
LT
166 unsigned int start, end;
167
d2a38ef9
RK
168 start = bank_pfn_start(bank);
169 end = bank_pfn_end(bank);
1da177e4
LT
170
171 if (end < start_pfn)
172 continue;
173
174 if (start < start_pfn)
175 start = start_pfn;
176
177 if (end <= start)
178 continue;
179
180 if (end - start >= bootmap_pages) {
181 bootmap_pfn = start;
182 break;
183 }
184 }
185
186 if (bootmap_pfn == 0)
187 BUG();
188
189 return bootmap_pfn;
190}
191
1da177e4
LT
192static int __init check_initrd(struct meminfo *mi)
193{
194 int initrd_node = -2;
195#ifdef CONFIG_BLK_DEV_INITRD
196 unsigned long end = phys_initrd_start + phys_initrd_size;
197
198 /*
199 * Make sure that the initrd is within a valid area of
200 * memory.
201 */
202 if (phys_initrd_size) {
203 unsigned int i;
204
205 initrd_node = -1;
206
207 for (i = 0; i < mi->nr_banks; i++) {
d2a38ef9
RK
208 struct membank *bank = &mi->bank[i];
209 if (bank_phys_start(bank) <= phys_initrd_start &&
210 end <= bank_phys_end(bank))
211 initrd_node = bank->node;
1da177e4
LT
212 }
213 }
214
215 if (initrd_node == -1) {
b962a286 216 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
1da177e4 217 "physical memory - disabling initrd\n",
b962a286 218 phys_initrd_start, phys_initrd_size);
1da177e4
LT
219 phys_initrd_start = phys_initrd_size = 0;
220 }
221#endif
222
223 return initrd_node;
224}
225
dde5828f
RK
226static void __init bootmem_init_node(int node, struct meminfo *mi,
227 unsigned long start_pfn, unsigned long end_pfn)
1da177e4 228{
dde5828f 229 unsigned long boot_pfn;
90072059
RK
230 unsigned int boot_pages;
231 pg_data_t *pgdat;
232 int i;
1da177e4 233
90072059
RK
234 /*
235 * Allocate the bootmem bitmap page.
236 */
237 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
238 boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
1da177e4 239
90072059
RK
240 /*
241 * Initialise the bootmem allocator for this node, handing the
242 * memory banks over to bootmem.
243 */
244 node_set_online(node);
245 pgdat = NODE_DATA(node);
246 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
1da177e4 247
d2a38ef9
RK
248 for_each_nodebank(i, mi, node) {
249 struct membank *bank = &mi->bank[i];
dde5828f
RK
250 if (!bank->highmem)
251 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
d2a38ef9 252 }
90072059
RK
253
254 /*
255 * Reserve the bootmem bitmap for this node.
256 */
257 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
72a7fe39 258 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
b7a69ac3 259}
b962a286 260
b7a69ac3
RK
261static void __init bootmem_reserve_initrd(int node)
262{
1da177e4 263#ifdef CONFIG_BLK_DEV_INITRD
b7a69ac3
RK
264 pg_data_t *pgdat = NODE_DATA(node);
265 int res;
266
267 res = reserve_bootmem_node(pgdat, phys_initrd_start,
268 phys_initrd_size, BOOTMEM_EXCLUSIVE);
269
270 if (res == 0) {
271 initrd_start = __phys_to_virt(phys_initrd_start);
272 initrd_end = initrd_start + phys_initrd_size;
273 } else {
274 printk(KERN_ERR
275 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
276 "memory region - disabling initrd\n",
277 phys_initrd_start, phys_initrd_size);
1da177e4
LT
278 }
279#endif
b7a69ac3
RK
280}
281
282static void __init bootmem_free_node(int node, struct meminfo *mi)
283{
284 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
dde5828f 285 unsigned long min, max_low, max_high;
b7a69ac3
RK
286 int i;
287
dde5828f 288 find_node_limits(node, mi, &min, &max_low, &max_high);
1da177e4 289
90072059
RK
290 /*
291 * initialise the zones within this node.
292 */
293 memset(zone_size, 0, sizeof(zone_size));
90072059
RK
294
295 /*
296 * The size of this node has already been determined. If we need
297 * to do anything fancy with the allocation of this memory to the
298 * zones, now is the time to do it.
299 */
dde5828f
RK
300 zone_size[0] = max_low - min;
301#ifdef CONFIG_HIGHMEM
302 zone_size[ZONE_HIGHMEM] = max_high - max_low;
303#endif
90072059
RK
304
305 /*
306 * For each bank in this node, calculate the size of the holes.
307 * holes = node_size - sum(bank_sizes_in_node)
308 */
dde5828f
RK
309 memcpy(zhole_size, zone_size, sizeof(zhole_size));
310 for_each_nodebank(i, mi, node) {
311 int idx = 0;
312#ifdef CONFIG_HIGHMEM
313 if (mi->bank[i].highmem)
314 idx = ZONE_HIGHMEM;
315#endif
316 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
317 }
90072059
RK
318
319 /*
320 * Adjust the sizes according to any special requirements for
321 * this machine type.
322 */
323 arch_adjust_zones(node, zone_size, zhole_size);
324
dde5828f 325 free_area_init_node(node, zone_size, min, zhole_size);
1da177e4
LT
326}
327
b7cfda9f
RK
328#ifndef CONFIG_SPARSEMEM
329int pfn_valid(unsigned long pfn)
330{
331 struct meminfo *mi = &meminfo;
332 unsigned int left = 0, right = mi->nr_banks;
333
334 do {
335 unsigned int mid = (right + left) / 2;
336 struct membank *bank = &mi->bank[mid];
337
338 if (pfn < bank_pfn_start(bank))
339 right = mid;
340 else if (pfn >= bank_pfn_end(bank))
341 left = mid + 1;
342 else
343 return 1;
344 } while (left < right);
345 return 0;
346}
347EXPORT_SYMBOL(pfn_valid);
657e12fd
RK
348
349static void arm_memory_present(struct meminfo *mi, int node)
350{
351}
352#else
353static void arm_memory_present(struct meminfo *mi, int node)
354{
355 int i;
356 for_each_nodebank(i, mi, node) {
357 struct membank *bank = &mi->bank[i];
358 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
359 }
360}
b7cfda9f
RK
361#endif
362
4b5f32ce 363void __init bootmem_init(void)
1da177e4 364{
4b5f32ce 365 struct meminfo *mi = &meminfo;
dde5828f 366 unsigned long min, max_low, max_high;
eca73214 367 int node, initrd_node;
1da177e4 368
1da177e4 369 /*
90072059 370 * Locate which node contains the ramdisk image, if any.
1da177e4 371 */
90072059 372 initrd_node = check_initrd(mi);
1da177e4 373
dde5828f
RK
374 max_low = max_high = 0;
375
90072059
RK
376 /*
377 * Run through each node initialising the bootmem allocator.
378 */
379 for_each_node(node) {
dde5828f
RK
380 unsigned long node_low, node_high;
381
382 find_node_limits(node, mi, &min, &node_low, &node_high);
383
384 if (node_low > max_low)
385 max_low = node_low;
386 if (node_high > max_high)
387 max_high = node_high;
388
389 /*
390 * If there is no memory in this node, ignore it.
391 * (We can't have nodes which have no lowmem)
392 */
393 if (node_low == 0)
394 continue;
395
396 bootmem_init_node(node, mi, min, node_low);
1da177e4 397
b7a69ac3
RK
398 /*
399 * Reserve any special node zero regions.
400 */
401 if (node == 0)
402 reserve_node_zero(NODE_DATA(node));
403
404 /*
405 * If the initrd is in this node, reserve its memory.
406 */
407 if (node == initrd_node)
408 bootmem_reserve_initrd(node);
657e12fd
RK
409
410 /*
411 * Sparsemem tries to allocate bootmem in memory_present(),
412 * so must be done after the fixed reservations
413 */
414 arm_memory_present(mi, node);
90072059 415 }
1da177e4 416
b7a69ac3
RK
417 /*
418 * sparse_init() needs the bootmem allocator up and running.
419 */
420 sparse_init();
421
422 /*
423 * Now free memory in each node - free_area_init_node needs
424 * the sparse mem_map arrays initialized by sparse_init()
425 * for memmap_init_zone(), otherwise all PFNs are invalid.
426 */
427 for_each_node(node)
428 bootmem_free_node(node, mi);
429
dde5828f 430 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
1da177e4 431
90072059
RK
432 /*
433 * This doesn't seem to be used by the Linux memory manager any
434 * more, but is used by ll_rw_block. If we can get rid of it, we
435 * also get rid of some of the stuff above as well.
436 *
437 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
438 * the system, not the maximum PFN.
439 */
dde5828f
RK
440 max_low_pfn = max_low - PHYS_PFN_OFFSET;
441 max_pfn = max_high - PHYS_PFN_OFFSET;
90072059 442}
1da177e4 443
6db015e4 444static inline int free_area(unsigned long pfn, unsigned long end, char *s)
1da177e4 445{
6db015e4 446 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
1da177e4 447
6db015e4
NP
448 for (; pfn < end; pfn++) {
449 struct page *page = pfn_to_page(pfn);
1da177e4 450 ClearPageReserved(page);
7835e98b 451 init_page_count(page);
6db015e4
NP
452 __free_page(page);
453 pages++;
1da177e4
LT
454 }
455
456 if (size && s)
457 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
6db015e4
NP
458
459 return pages;
1da177e4
LT
460}
461
a013053d
RK
462static inline void
463free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
464{
465 struct page *start_pg, *end_pg;
466 unsigned long pg, pgend;
467
468 /*
469 * Convert start_pfn/end_pfn to a struct page pointer.
470 */
3257f43d 471 start_pg = pfn_to_page(start_pfn - 1) + 1;
a013053d
RK
472 end_pg = pfn_to_page(end_pfn);
473
474 /*
475 * Convert to physical addresses, and
476 * round start upwards and end downwards.
477 */
478 pg = PAGE_ALIGN(__pa(start_pg));
479 pgend = __pa(end_pg) & PAGE_MASK;
480
481 /*
482 * If there are free pages between these,
483 * free the section of the memmap array.
484 */
485 if (pg < pgend)
486 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
487}
488
489/*
490 * The mem_map array can get very big. Free the unused area of the memory map.
491 */
492static void __init free_unused_memmap_node(int node, struct meminfo *mi)
493{
494 unsigned long bank_start, prev_bank_end = 0;
495 unsigned int i;
496
497 /*
498 * [FIXME] This relies on each bank being in address order. This
499 * may not be the case, especially if the user has provided the
500 * information on the command line.
501 */
90072059 502 for_each_nodebank(i, mi, node) {
d2a38ef9
RK
503 struct membank *bank = &mi->bank[i];
504
505 bank_start = bank_pfn_start(bank);
a013053d
RK
506 if (bank_start < prev_bank_end) {
507 printk(KERN_ERR "MEM: unordered memory banks. "
508 "Not freeing memmap.\n");
509 break;
510 }
511
512 /*
513 * If we had a previous bank, and there is a space
514 * between the current bank and the previous, free it.
515 */
516 if (prev_bank_end && prev_bank_end != bank_start)
517 free_memmap(node, prev_bank_end, bank_start);
518
d2a38ef9 519 prev_bank_end = bank_pfn_end(bank);
a013053d
RK
520 }
521}
522
1da177e4
LT
523/*
524 * mem_init() marks the free areas in the mem_map and tells us how much
525 * memory is free. This is done after various parts of the system have
526 * claimed their memory after the kernel image.
527 */
528void __init mem_init(void)
529{
db9ef1af 530 unsigned long reserved_pages, free_pages;
1da177e4
LT
531 int i, node;
532
1da177e4 533#ifndef CONFIG_DISCONTIGMEM
3835f6cb 534 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
1da177e4
LT
535#endif
536
1da177e4
LT
537 /* this will put all unused low memory onto the freelists */
538 for_each_online_node(node) {
539 pg_data_t *pgdat = NODE_DATA(node);
540
a013053d
RK
541 free_unused_memmap_node(node, &meminfo);
542
1da177e4
LT
543 if (pgdat->node_spanned_pages != 0)
544 totalram_pages += free_all_bootmem_node(pgdat);
545 }
546
547#ifdef CONFIG_SA1111
548 /* now that our DMA memory is actually so designated, we can free it */
6db015e4
NP
549 totalram_pages += free_area(PHYS_PFN_OFFSET,
550 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
1da177e4
LT
551#endif
552
3835f6cb
NP
553#ifdef CONFIG_HIGHMEM
554 /* set highmem page free */
555 for_each_online_node(node) {
556 for_each_nodebank (i, &meminfo, node) {
557 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
558 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
559 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
560 totalhigh_pages += free_area(start, end, NULL);
561 }
562 }
563 totalram_pages += totalhigh_pages;
564#endif
565
db9ef1af
FB
566 reserved_pages = free_pages = 0;
567
568 for_each_online_node(node) {
db9ef1af
FB
569 for_each_nodebank(i, &meminfo, node) {
570 struct membank *bank = &meminfo.bank[i];
571 unsigned int pfn1, pfn2;
572 struct page *page, *end;
573
574 pfn1 = bank_pfn_start(bank);
575 pfn2 = bank_pfn_end(bank);
576
ea056df7
CM
577 page = pfn_to_page(pfn1);
578 end = pfn_to_page(pfn2 - 1) + 1;
db9ef1af
FB
579
580 do {
581 if (PageReserved(page))
582 reserved_pages++;
583 else if (!page_count(page))
584 free_pages++;
585 page++;
586 } while (page < end);
587 }
588 }
589
1da177e4
LT
590 /*
591 * Since our memory may not be contiguous, calculate the
592 * real number of pages we have in this system
593 */
594 printk(KERN_INFO "Memory:");
1da177e4
LT
595 num_physpages = 0;
596 for (i = 0; i < meminfo.nr_banks; i++) {
d2a38ef9
RK
597 num_physpages += bank_pfn_size(&meminfo.bank[i]);
598 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
1da177e4 599 }
1da177e4 600 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
6db015e4 601
db9ef1af
FB
602 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
603 nr_free_pages() << (PAGE_SHIFT-10),
604 free_pages << (PAGE_SHIFT-10),
605 reserved_pages << (PAGE_SHIFT-10),
4b529401 606 totalhigh_pages << (PAGE_SHIFT-10));
1da177e4 607
db9ef1af
FB
608#define MLK(b, t) b, t, ((t) - (b)) >> 10
609#define MLM(b, t) b, t, ((t) - (b)) >> 20
610#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
611
612 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
613 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
614 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
a7bd08c8
FB
615#ifdef CONFIG_MMU
616 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
617#endif
db9ef1af
FB
618 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
619 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
620#ifdef CONFIG_HIGHMEM
621 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
622#endif
623 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
624 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
625 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
626 " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
627
628 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
629 (PAGE_SIZE)),
630 MLK(FIXADDR_START, FIXADDR_TOP),
a7bd08c8
FB
631#ifdef CONFIG_MMU
632 MLM(CONSISTENT_BASE, CONSISTENT_END),
633#endif
c931b4f6 634 MLM(VMALLOC_START, VMALLOC_END),
db9ef1af
FB
635 MLM(PAGE_OFFSET, (unsigned long)high_memory),
636#ifdef CONFIG_HIGHMEM
637 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
638 (PAGE_SIZE)),
639#endif
640 MLM(MODULES_VADDR, MODULES_END),
641
642 MLK_ROUNDUP(__init_begin, __init_end),
643 MLK_ROUNDUP(_text, _etext),
644 MLK_ROUNDUP(_data, _edata));
645
646#undef MLK
647#undef MLM
648#undef MLK_ROUNDUP
649
a1839272
FB
650 /*
651 * Check boundaries twice: Some fundamental inconsistencies can
652 * be detected at build time already.
653 */
654#ifdef CONFIG_MMU
655 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
656 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
657
658 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
659 BUG_ON(TASK_SIZE > MODULES_VADDR);
660#endif
661
662#ifdef CONFIG_HIGHMEM
663 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
664 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
665#endif
666
1da177e4
LT
667 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
668 extern int sysctl_overcommit_memory;
669 /*
670 * On a machine this small we won't get
671 * anywhere without overcommit, so turn
672 * it on by default.
673 */
674 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
675 }
676}
677
678void free_initmem(void)
679{
bc581770 680#ifdef CONFIG_HAVE_TCM
ea208f64 681 extern char __tcm_start, __tcm_end;
bc581770 682
ea208f64
LW
683 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
684 __phys_to_pfn(__pa(&__tcm_end)),
bc581770
LW
685 "TCM link");
686#endif
687
6db015e4 688 if (!machine_is_integrator() && !machine_is_cintegrator())
37efe642
RK
689 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
690 __phys_to_pfn(__pa(__init_end)),
6db015e4 691 "init");
1da177e4
LT
692}
693
694#ifdef CONFIG_BLK_DEV_INITRD
695
696static int keep_initrd;
697
698void free_initrd_mem(unsigned long start, unsigned long end)
699{
700 if (!keep_initrd)
6db015e4
NP
701 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
702 __phys_to_pfn(__pa(end)),
703 "initrd");
1da177e4
LT
704}
705
706static int __init keepinitrd_setup(char *__unused)
707{
708 keep_initrd = 1;
709 return 1;
710}
711
712__setup("keepinitrd", keepinitrd_setup);
713#endif