[S390] Cleanup page table definitions.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / mm / vmem.c
CommitLineData
f4eb07c1
HC
1/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex);
23
24struct memory_segment {
25 struct list_head list;
26 unsigned long start;
27 unsigned long size;
28};
29
30static LIST_HEAD(mem_segs);
31
e62133b4
HC
32void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
33 unsigned long start_pfn)
f4eb07c1
HC
34{
35 struct page *start, *end;
36 struct page *map_start, *map_end;
37 int i;
38
39 start = pfn_to_page(start_pfn);
40 end = start + size;
41
42 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
43 unsigned long cstart, cend;
44
45 cstart = PFN_DOWN(memory_chunk[i].addr);
46 cend = cstart + PFN_DOWN(memory_chunk[i].size);
47
48 map_start = mem_map + cstart;
49 map_end = mem_map + cend;
50
51 if (map_start < start)
52 map_start = start;
53 if (map_end > end)
54 map_end = end;
55
56 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
57 / sizeof(struct page);
58 map_end += ((PFN_ALIGN((unsigned long) map_end)
59 - (unsigned long) map_end)
60 / sizeof(struct page));
61
62 if (map_start < map_end)
63 memmap_init_zone((unsigned long)(map_end - map_start),
a2f3aa02
DH
64 nid, zone, page_to_pfn(map_start),
65 MEMMAP_EARLY);
f4eb07c1
HC
66 }
67}
68
e62133b4 69static void __init_refok *vmem_alloc_pages(unsigned int order)
f4eb07c1
HC
70{
71 if (slab_is_available())
72 return (void *)__get_free_pages(GFP_KERNEL, order);
73 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
74}
75
76static inline pmd_t *vmem_pmd_alloc(void)
77{
3610cce8 78 pmd_t *pmd = NULL;
f4eb07c1 79
3610cce8
MS
80#ifdef CONFIG_64BIT
81 pmd = vmem_alloc_pages(2);
f4eb07c1
HC
82 if (!pmd)
83 return NULL;
3610cce8
MS
84 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
85#endif
f4eb07c1
HC
86 return pmd;
87}
88
89static inline pte_t *vmem_pte_alloc(void)
90{
3610cce8 91 pte_t *pte = vmem_alloc_pages(0);
f4eb07c1 92
f4eb07c1
HC
93 if (!pte)
94 return NULL;
3610cce8 95 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
f4eb07c1
HC
96 return pte;
97}
98
99/*
100 * Add a physical memory range to the 1:1 mapping.
101 */
102static int vmem_add_range(unsigned long start, unsigned long size)
103{
104 unsigned long address;
105 pgd_t *pg_dir;
106 pmd_t *pm_dir;
107 pte_t *pt_dir;
108 pte_t pte;
109 int ret = -ENOMEM;
110
111 for (address = start; address < start + size; address += PAGE_SIZE) {
112 pg_dir = pgd_offset_k(address);
113 if (pgd_none(*pg_dir)) {
114 pm_dir = vmem_pmd_alloc();
115 if (!pm_dir)
116 goto out;
c1821c2e 117 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
f4eb07c1
HC
118 }
119
120 pm_dir = pmd_offset(pg_dir, address);
121 if (pmd_none(*pm_dir)) {
122 pt_dir = vmem_pte_alloc();
123 if (!pt_dir)
124 goto out;
125 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
126 }
127
128 pt_dir = pte_offset_kernel(pm_dir, address);
129 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
c1821c2e 130 *pt_dir = pte;
f4eb07c1
HC
131 }
132 ret = 0;
133out:
134 flush_tlb_kernel_range(start, start + size);
135 return ret;
136}
137
138/*
139 * Remove a physical memory range from the 1:1 mapping.
140 * Currently only invalidates page table entries.
141 */
142static void vmem_remove_range(unsigned long start, unsigned long size)
143{
144 unsigned long address;
145 pgd_t *pg_dir;
146 pmd_t *pm_dir;
147 pte_t *pt_dir;
148 pte_t pte;
149
150 pte_val(pte) = _PAGE_TYPE_EMPTY;
151 for (address = start; address < start + size; address += PAGE_SIZE) {
152 pg_dir = pgd_offset_k(address);
153 if (pgd_none(*pg_dir))
154 continue;
155 pm_dir = pmd_offset(pg_dir, address);
156 if (pmd_none(*pm_dir))
157 continue;
158 pt_dir = pte_offset_kernel(pm_dir, address);
c1821c2e 159 *pt_dir = pte;
f4eb07c1
HC
160 }
161 flush_tlb_kernel_range(start, start + size);
162}
163
164/*
165 * Add a backed mem_map array to the virtual mem_map array.
166 */
167static int vmem_add_mem_map(unsigned long start, unsigned long size)
168{
169 unsigned long address, start_addr, end_addr;
170 struct page *map_start, *map_end;
171 pgd_t *pg_dir;
172 pmd_t *pm_dir;
173 pte_t *pt_dir;
174 pte_t pte;
175 int ret = -ENOMEM;
176
177 map_start = vmem_map + PFN_DOWN(start);
178 map_end = vmem_map + PFN_DOWN(start + size);
179
180 start_addr = (unsigned long) map_start & PAGE_MASK;
181 end_addr = PFN_ALIGN((unsigned long) map_end);
182
183 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
184 pg_dir = pgd_offset_k(address);
185 if (pgd_none(*pg_dir)) {
186 pm_dir = vmem_pmd_alloc();
187 if (!pm_dir)
188 goto out;
c1821c2e 189 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
f4eb07c1
HC
190 }
191
192 pm_dir = pmd_offset(pg_dir, address);
193 if (pmd_none(*pm_dir)) {
194 pt_dir = vmem_pte_alloc();
195 if (!pt_dir)
196 goto out;
197 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
198 }
199
200 pt_dir = pte_offset_kernel(pm_dir, address);
201 if (pte_none(*pt_dir)) {
202 unsigned long new_page;
203
204 new_page =__pa(vmem_alloc_pages(0));
205 if (!new_page)
206 goto out;
207 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
c1821c2e 208 *pt_dir = pte;
f4eb07c1
HC
209 }
210 }
211 ret = 0;
212out:
213 flush_tlb_kernel_range(start_addr, end_addr);
214 return ret;
215}
216
217static int vmem_add_mem(unsigned long start, unsigned long size)
218{
219 int ret;
220
221 ret = vmem_add_range(start, size);
222 if (ret)
223 return ret;
224 return vmem_add_mem_map(start, size);
225}
226
227/*
228 * Add memory segment to the segment list if it doesn't overlap with
229 * an already present segment.
230 */
231static int insert_memory_segment(struct memory_segment *seg)
232{
233 struct memory_segment *tmp;
234
235 if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
236 seg->start + seg->size < seg->start)
237 return -ERANGE;
238
239 list_for_each_entry(tmp, &mem_segs, list) {
240 if (seg->start >= tmp->start + tmp->size)
241 continue;
242 if (seg->start + seg->size <= tmp->start)
243 continue;
244 return -ENOSPC;
245 }
246 list_add(&seg->list, &mem_segs);
247 return 0;
248}
249
250/*
251 * Remove memory segment from the segment list.
252 */
253static void remove_memory_segment(struct memory_segment *seg)
254{
255 list_del(&seg->list);
256}
257
258static void __remove_shared_memory(struct memory_segment *seg)
259{
260 remove_memory_segment(seg);
261 vmem_remove_range(seg->start, seg->size);
262}
263
264int remove_shared_memory(unsigned long start, unsigned long size)
265{
266 struct memory_segment *seg;
267 int ret;
268
269 mutex_lock(&vmem_mutex);
270
271 ret = -ENOENT;
272 list_for_each_entry(seg, &mem_segs, list) {
273 if (seg->start == start && seg->size == size)
274 break;
275 }
276
277 if (seg->start != start || seg->size != size)
278 goto out;
279
280 ret = 0;
281 __remove_shared_memory(seg);
282 kfree(seg);
283out:
284 mutex_unlock(&vmem_mutex);
285 return ret;
286}
287
288int add_shared_memory(unsigned long start, unsigned long size)
289{
290 struct memory_segment *seg;
291 struct page *page;
292 unsigned long pfn, num_pfn, end_pfn;
293 int ret;
294
295 mutex_lock(&vmem_mutex);
296 ret = -ENOMEM;
297 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
298 if (!seg)
299 goto out;
300 seg->start = start;
301 seg->size = size;
302
303 ret = insert_memory_segment(seg);
304 if (ret)
305 goto out_free;
306
307 ret = vmem_add_mem(start, size);
308 if (ret)
309 goto out_remove;
310
311 pfn = PFN_DOWN(start);
312 num_pfn = PFN_DOWN(size);
313 end_pfn = pfn + num_pfn;
314
315 page = pfn_to_page(pfn);
316 memset(page, 0, num_pfn * sizeof(struct page));
317
318 for (; pfn < end_pfn; pfn++) {
319 page = pfn_to_page(pfn);
320 init_page_count(page);
321 reset_page_mapcount(page);
322 SetPageReserved(page);
323 INIT_LIST_HEAD(&page->lru);
324 }
325 goto out;
326
327out_remove:
328 __remove_shared_memory(seg);
329out_free:
330 kfree(seg);
331out:
332 mutex_unlock(&vmem_mutex);
333 return ret;
334}
335
336/*
337 * map whole physical memory to virtual memory (identity mapping)
338 */
339void __init vmem_map_init(void)
340{
341 unsigned long map_size;
342 int i;
343
344 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
345 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
346 vmem_map = (struct page *) vmalloc_end;
347 NODE_DATA(0)->node_mem_map = vmem_map;
348
349 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
350 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
351}
352
353/*
354 * Convert memory chunk array to a memory segment list so there is a single
355 * list that contains both r/w memory and shared memory segments.
356 */
357static int __init vmem_convert_memory_chunk(void)
358{
359 struct memory_segment *seg;
360 int i;
361
362 mutex_lock(&vmem_mutex);
363 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
364 if (!memory_chunk[i].size)
365 continue;
366 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
367 if (!seg)
368 panic("Out of memory...\n");
369 seg->start = memory_chunk[i].addr;
370 seg->size = memory_chunk[i].size;
371 insert_memory_segment(seg);
372 }
373 mutex_unlock(&vmem_mutex);
374 return 0;
375}
376
377core_initcall(vmem_convert_memory_chunk);