blackfin architecture
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / blackfin / mm / init.c
1 /*
2 * File: arch/blackfin/mm/init.c
3 * Based on:
4 * Author:
5 *
6 * Created:
7 * Description:
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30 #include <linux/swap.h>
31 #include <linux/bootmem.h>
32 #include <asm/bfin-global.h>
33 #include <asm/uaccess.h>
34 #include <asm/l1layout.h>
35 #include "blackfin_sram.h"
36
37 /*
38 * BAD_PAGE is the page that is used for page faults when linux
39 * is out-of-memory. Older versions of linux just did a
40 * do_exit(), but using this instead means there is less risk
41 * for a process dying in kernel mode, possibly leaving a inode
42 * unused etc..
43 *
44 * BAD_PAGETABLE is the accompanying page-table: it is initialized
45 * to point to BAD_PAGE entries.
46 *
47 * ZERO_PAGE is a special page that is used for zero-initialized
48 * data and COW.
49 */
50 static unsigned long empty_bad_page_table;
51
52 static unsigned long empty_bad_page;
53
54 unsigned long empty_zero_page;
55
56 void show_mem(void)
57 {
58 unsigned long i;
59 int free = 0, total = 0, reserved = 0, shared = 0;
60
61 int cached = 0;
62 printk(KERN_INFO "Mem-info:\n");
63 show_free_areas();
64 i = max_mapnr;
65 while (i-- > 0) {
66 total++;
67 if (PageReserved(mem_map + i))
68 reserved++;
69 else if (PageSwapCache(mem_map + i))
70 cached++;
71 else if (!page_count(mem_map + i))
72 free++;
73 else
74 shared += page_count(mem_map + i) - 1;
75 }
76 printk(KERN_INFO "%d pages of RAM\n", total);
77 printk(KERN_INFO "%d free pages\n", free);
78 printk(KERN_INFO "%d reserved pages\n", reserved);
79 printk(KERN_INFO "%d pages shared\n", shared);
80 printk(KERN_INFO "%d pages swap cached\n", cached);
81 }
82
83 /*
84 * paging_init() continues the virtual memory environment setup which
85 * was begun by the code in arch/head.S.
86 * The parameters are pointers to where to stick the starting and ending
87 * addresses of available kernel virtual memory.
88 */
89 void paging_init(void)
90 {
91 /*
92 * make sure start_mem is page aligned, otherwise bootmem and
93 * page_alloc get different views og the world
94 */
95 unsigned long end_mem = memory_end & PAGE_MASK;
96
97 pr_debug("start_mem is %#lx virtual_end is %#lx\n", PAGE_ALIGN(memory_start), end_mem);
98
99 /*
100 * initialize the bad page table and bad page to point
101 * to a couple of allocated pages
102 */
103 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
104 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
105 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
106 memset((void *)empty_zero_page, 0, PAGE_SIZE);
107
108 /*
109 * Set up SFC/DFC registers (user data space)
110 */
111 set_fs(KERNEL_DS);
112
113 pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n",
114 PAGE_ALIGN(memory_start), end_mem);
115
116 {
117 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
118
119 zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
120 #ifdef CONFIG_HIGHMEM
121 zones_size[ZONE_HIGHMEM] = 0;
122 #endif
123 free_area_init(zones_size);
124 }
125 }
126
127 void mem_init(void)
128 {
129 unsigned int codek = 0, datak = 0, initk = 0;
130 unsigned long tmp;
131 unsigned int len = _ramend - _rambase;
132 unsigned long start_mem = memory_start;
133 unsigned long end_mem = memory_end;
134
135 end_mem &= PAGE_MASK;
136 high_memory = (void *)end_mem;
137
138 start_mem = PAGE_ALIGN(start_mem);
139 max_mapnr = num_physpages = MAP_NR(high_memory);
140 printk(KERN_INFO "Physical pages: %lx\n", num_physpages);
141
142 /* This will put all memory onto the freelists. */
143 totalram_pages = free_all_bootmem();
144
145 codek = (_etext - _stext) >> 10;
146 datak = (__bss_stop - __bss_start) >> 10;
147 initk = (__init_end - __init_begin) >> 10;
148
149 tmp = nr_free_pages() << PAGE_SHIFT;
150 printk(KERN_INFO
151 "Memory available: %luk/%uk RAM, (%uk init code, %uk kernel code, %uk data, %uk dma)\n",
152 tmp >> 10, len >> 10, initk, codek, datak, DMA_UNCACHED_REGION >> 10);
153
154 /* Initialize the blackfin L1 Memory. */
155 l1sram_init();
156 l1_data_sram_init();
157 l1_inst_sram_init();
158
159 /* Allocate this once; never free it. We assume this gives us a
160 pointer to the start of L1 scratchpad memory; panic if it
161 doesn't. */
162 tmp = (unsigned long)l1sram_alloc(sizeof(struct l1_scratch_task_info));
163 if (tmp != (unsigned long)L1_SCRATCH_TASK_INFO) {
164 printk(KERN_EMERG "mem_init(): Did not get the right address from l1sram_alloc: %08lx != %08lx\n",
165 tmp, (unsigned long)L1_SCRATCH_TASK_INFO);
166 panic("No L1, time to give up\n");
167 }
168 }
169
170 #ifdef CONFIG_BLK_DEV_INITRD
171 void free_initrd_mem(unsigned long start, unsigned long end)
172 {
173 int pages = 0;
174 for (; start < end; start += PAGE_SIZE) {
175 ClearPageReserved(virt_to_page(start));
176 init_page_count(virt_to_page(start));
177 free_page(start);
178 totalram_pages++;
179 pages++;
180 }
181 printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages);
182 }
183 #endif
184
185 void free_initmem(void)
186 {
187 #ifdef CONFIG_RAMKERNEL
188 unsigned long addr;
189 /*
190 * the following code should be cool even if these sections
191 * are not page aligned.
192 */
193 addr = PAGE_ALIGN((unsigned long)(__init_begin));
194 /* next to check that the page we free is not a partial page */
195 for (; addr + PAGE_SIZE < (unsigned long)(__init_end);
196 addr += PAGE_SIZE) {
197 ClearPageReserved(virt_to_page(addr));
198 init_page_count(virt_to_page(addr));
199 free_page(addr);
200 totalram_pages++;
201 }
202 printk(KERN_NOTICE
203 "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
204 (addr - PAGE_ALIGN((long)__init_begin)) >> 10,
205 (int)(PAGE_ALIGN((unsigned long)(__init_begin))),
206 (int)(addr - PAGE_SIZE));
207 #endif
208 }