Commit | Line | Data |
---|---|---|
f4eb07c1 HC |
1 | /* |
2 | * arch/s390/mm/vmem.c | |
3 | * | |
4 | * Copyright IBM Corp. 2006 | |
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | |
6 | */ | |
7 | ||
8 | #include <linux/bootmem.h> | |
9 | #include <linux/pfn.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/list.h> | |
13 | #include <asm/pgalloc.h> | |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/setup.h> | |
16 | #include <asm/tlbflush.h> | |
17 | ||
18 | unsigned long vmalloc_end; | |
19 | EXPORT_SYMBOL(vmalloc_end); | |
20 | ||
21 | static struct page *vmem_map; | |
22 | static DEFINE_MUTEX(vmem_mutex); | |
23 | ||
24 | struct memory_segment { | |
25 | struct list_head list; | |
26 | unsigned long start; | |
27 | unsigned long size; | |
28 | }; | |
29 | ||
30 | static LIST_HEAD(mem_segs); | |
31 | ||
32 | void memmap_init(unsigned long size, int nid, unsigned long zone, | |
33 | unsigned long start_pfn) | |
34 | { | |
35 | struct page *start, *end; | |
36 | struct page *map_start, *map_end; | |
37 | int i; | |
38 | ||
39 | start = pfn_to_page(start_pfn); | |
40 | end = start + size; | |
41 | ||
42 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | |
43 | unsigned long cstart, cend; | |
44 | ||
45 | cstart = PFN_DOWN(memory_chunk[i].addr); | |
46 | cend = cstart + PFN_DOWN(memory_chunk[i].size); | |
47 | ||
48 | map_start = mem_map + cstart; | |
49 | map_end = mem_map + cend; | |
50 | ||
51 | if (map_start < start) | |
52 | map_start = start; | |
53 | if (map_end > end) | |
54 | map_end = end; | |
55 | ||
56 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) | |
57 | / sizeof(struct page); | |
58 | map_end += ((PFN_ALIGN((unsigned long) map_end) | |
59 | - (unsigned long) map_end) | |
60 | / sizeof(struct page)); | |
61 | ||
62 | if (map_start < map_end) | |
63 | memmap_init_zone((unsigned long)(map_end - map_start), | |
64 | nid, zone, page_to_pfn(map_start)); | |
65 | } | |
66 | } | |
67 | ||
68 | static inline void *vmem_alloc_pages(unsigned int order) | |
69 | { | |
70 | if (slab_is_available()) | |
71 | return (void *)__get_free_pages(GFP_KERNEL, order); | |
72 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | |
73 | } | |
74 | ||
75 | static inline pmd_t *vmem_pmd_alloc(void) | |
76 | { | |
77 | pmd_t *pmd; | |
78 | int i; | |
79 | ||
80 | pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); | |
81 | if (!pmd) | |
82 | return NULL; | |
83 | for (i = 0; i < PTRS_PER_PMD; i++) | |
84 | pmd_clear(pmd + i); | |
85 | return pmd; | |
86 | } | |
87 | ||
88 | static inline pte_t *vmem_pte_alloc(void) | |
89 | { | |
90 | pte_t *pte; | |
91 | pte_t empty_pte; | |
92 | int i; | |
93 | ||
94 | pte = vmem_alloc_pages(PTE_ALLOC_ORDER); | |
95 | if (!pte) | |
96 | return NULL; | |
97 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; | |
98 | for (i = 0; i < PTRS_PER_PTE; i++) | |
99 | set_pte(pte + i, empty_pte); | |
100 | return pte; | |
101 | } | |
102 | ||
103 | /* | |
104 | * Add a physical memory range to the 1:1 mapping. | |
105 | */ | |
106 | static int vmem_add_range(unsigned long start, unsigned long size) | |
107 | { | |
108 | unsigned long address; | |
109 | pgd_t *pg_dir; | |
110 | pmd_t *pm_dir; | |
111 | pte_t *pt_dir; | |
112 | pte_t pte; | |
113 | int ret = -ENOMEM; | |
114 | ||
115 | for (address = start; address < start + size; address += PAGE_SIZE) { | |
116 | pg_dir = pgd_offset_k(address); | |
117 | if (pgd_none(*pg_dir)) { | |
118 | pm_dir = vmem_pmd_alloc(); | |
119 | if (!pm_dir) | |
120 | goto out; | |
121 | pgd_populate(&init_mm, pg_dir, pm_dir); | |
122 | } | |
123 | ||
124 | pm_dir = pmd_offset(pg_dir, address); | |
125 | if (pmd_none(*pm_dir)) { | |
126 | pt_dir = vmem_pte_alloc(); | |
127 | if (!pt_dir) | |
128 | goto out; | |
129 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | |
130 | } | |
131 | ||
132 | pt_dir = pte_offset_kernel(pm_dir, address); | |
133 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); | |
134 | set_pte(pt_dir, pte); | |
135 | } | |
136 | ret = 0; | |
137 | out: | |
138 | flush_tlb_kernel_range(start, start + size); | |
139 | return ret; | |
140 | } | |
141 | ||
142 | /* | |
143 | * Remove a physical memory range from the 1:1 mapping. | |
144 | * Currently only invalidates page table entries. | |
145 | */ | |
146 | static void vmem_remove_range(unsigned long start, unsigned long size) | |
147 | { | |
148 | unsigned long address; | |
149 | pgd_t *pg_dir; | |
150 | pmd_t *pm_dir; | |
151 | pte_t *pt_dir; | |
152 | pte_t pte; | |
153 | ||
154 | pte_val(pte) = _PAGE_TYPE_EMPTY; | |
155 | for (address = start; address < start + size; address += PAGE_SIZE) { | |
156 | pg_dir = pgd_offset_k(address); | |
157 | if (pgd_none(*pg_dir)) | |
158 | continue; | |
159 | pm_dir = pmd_offset(pg_dir, address); | |
160 | if (pmd_none(*pm_dir)) | |
161 | continue; | |
162 | pt_dir = pte_offset_kernel(pm_dir, address); | |
163 | set_pte(pt_dir, pte); | |
164 | } | |
165 | flush_tlb_kernel_range(start, start + size); | |
166 | } | |
167 | ||
168 | /* | |
169 | * Add a backed mem_map array to the virtual mem_map array. | |
170 | */ | |
171 | static int vmem_add_mem_map(unsigned long start, unsigned long size) | |
172 | { | |
173 | unsigned long address, start_addr, end_addr; | |
174 | struct page *map_start, *map_end; | |
175 | pgd_t *pg_dir; | |
176 | pmd_t *pm_dir; | |
177 | pte_t *pt_dir; | |
178 | pte_t pte; | |
179 | int ret = -ENOMEM; | |
180 | ||
181 | map_start = vmem_map + PFN_DOWN(start); | |
182 | map_end = vmem_map + PFN_DOWN(start + size); | |
183 | ||
184 | start_addr = (unsigned long) map_start & PAGE_MASK; | |
185 | end_addr = PFN_ALIGN((unsigned long) map_end); | |
186 | ||
187 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | |
188 | pg_dir = pgd_offset_k(address); | |
189 | if (pgd_none(*pg_dir)) { | |
190 | pm_dir = vmem_pmd_alloc(); | |
191 | if (!pm_dir) | |
192 | goto out; | |
193 | pgd_populate(&init_mm, pg_dir, pm_dir); | |
194 | } | |
195 | ||
196 | pm_dir = pmd_offset(pg_dir, address); | |
197 | if (pmd_none(*pm_dir)) { | |
198 | pt_dir = vmem_pte_alloc(); | |
199 | if (!pt_dir) | |
200 | goto out; | |
201 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | |
202 | } | |
203 | ||
204 | pt_dir = pte_offset_kernel(pm_dir, address); | |
205 | if (pte_none(*pt_dir)) { | |
206 | unsigned long new_page; | |
207 | ||
208 | new_page =__pa(vmem_alloc_pages(0)); | |
209 | if (!new_page) | |
210 | goto out; | |
211 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | |
212 | set_pte(pt_dir, pte); | |
213 | } | |
214 | } | |
215 | ret = 0; | |
216 | out: | |
217 | flush_tlb_kernel_range(start_addr, end_addr); | |
218 | return ret; | |
219 | } | |
220 | ||
221 | static int vmem_add_mem(unsigned long start, unsigned long size) | |
222 | { | |
223 | int ret; | |
224 | ||
225 | ret = vmem_add_range(start, size); | |
226 | if (ret) | |
227 | return ret; | |
228 | return vmem_add_mem_map(start, size); | |
229 | } | |
230 | ||
231 | /* | |
232 | * Add memory segment to the segment list if it doesn't overlap with | |
233 | * an already present segment. | |
234 | */ | |
235 | static int insert_memory_segment(struct memory_segment *seg) | |
236 | { | |
237 | struct memory_segment *tmp; | |
238 | ||
239 | if (PFN_DOWN(seg->start + seg->size) > max_pfn || | |
240 | seg->start + seg->size < seg->start) | |
241 | return -ERANGE; | |
242 | ||
243 | list_for_each_entry(tmp, &mem_segs, list) { | |
244 | if (seg->start >= tmp->start + tmp->size) | |
245 | continue; | |
246 | if (seg->start + seg->size <= tmp->start) | |
247 | continue; | |
248 | return -ENOSPC; | |
249 | } | |
250 | list_add(&seg->list, &mem_segs); | |
251 | return 0; | |
252 | } | |
253 | ||
254 | /* | |
255 | * Remove memory segment from the segment list. | |
256 | */ | |
257 | static void remove_memory_segment(struct memory_segment *seg) | |
258 | { | |
259 | list_del(&seg->list); | |
260 | } | |
261 | ||
262 | static void __remove_shared_memory(struct memory_segment *seg) | |
263 | { | |
264 | remove_memory_segment(seg); | |
265 | vmem_remove_range(seg->start, seg->size); | |
266 | } | |
267 | ||
268 | int remove_shared_memory(unsigned long start, unsigned long size) | |
269 | { | |
270 | struct memory_segment *seg; | |
271 | int ret; | |
272 | ||
273 | mutex_lock(&vmem_mutex); | |
274 | ||
275 | ret = -ENOENT; | |
276 | list_for_each_entry(seg, &mem_segs, list) { | |
277 | if (seg->start == start && seg->size == size) | |
278 | break; | |
279 | } | |
280 | ||
281 | if (seg->start != start || seg->size != size) | |
282 | goto out; | |
283 | ||
284 | ret = 0; | |
285 | __remove_shared_memory(seg); | |
286 | kfree(seg); | |
287 | out: | |
288 | mutex_unlock(&vmem_mutex); | |
289 | return ret; | |
290 | } | |
291 | ||
292 | int add_shared_memory(unsigned long start, unsigned long size) | |
293 | { | |
294 | struct memory_segment *seg; | |
295 | struct page *page; | |
296 | unsigned long pfn, num_pfn, end_pfn; | |
297 | int ret; | |
298 | ||
299 | mutex_lock(&vmem_mutex); | |
300 | ret = -ENOMEM; | |
301 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | |
302 | if (!seg) | |
303 | goto out; | |
304 | seg->start = start; | |
305 | seg->size = size; | |
306 | ||
307 | ret = insert_memory_segment(seg); | |
308 | if (ret) | |
309 | goto out_free; | |
310 | ||
311 | ret = vmem_add_mem(start, size); | |
312 | if (ret) | |
313 | goto out_remove; | |
314 | ||
315 | pfn = PFN_DOWN(start); | |
316 | num_pfn = PFN_DOWN(size); | |
317 | end_pfn = pfn + num_pfn; | |
318 | ||
319 | page = pfn_to_page(pfn); | |
320 | memset(page, 0, num_pfn * sizeof(struct page)); | |
321 | ||
322 | for (; pfn < end_pfn; pfn++) { | |
323 | page = pfn_to_page(pfn); | |
324 | init_page_count(page); | |
325 | reset_page_mapcount(page); | |
326 | SetPageReserved(page); | |
327 | INIT_LIST_HEAD(&page->lru); | |
328 | } | |
329 | goto out; | |
330 | ||
331 | out_remove: | |
332 | __remove_shared_memory(seg); | |
333 | out_free: | |
334 | kfree(seg); | |
335 | out: | |
336 | mutex_unlock(&vmem_mutex); | |
337 | return ret; | |
338 | } | |
339 | ||
340 | /* | |
341 | * map whole physical memory to virtual memory (identity mapping) | |
342 | */ | |
343 | void __init vmem_map_init(void) | |
344 | { | |
345 | unsigned long map_size; | |
346 | int i; | |
347 | ||
348 | map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); | |
349 | vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); | |
350 | vmem_map = (struct page *) vmalloc_end; | |
351 | NODE_DATA(0)->node_mem_map = vmem_map; | |
352 | ||
353 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) | |
354 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); | |
355 | } | |
356 | ||
357 | /* | |
358 | * Convert memory chunk array to a memory segment list so there is a single | |
359 | * list that contains both r/w memory and shared memory segments. | |
360 | */ | |
361 | static int __init vmem_convert_memory_chunk(void) | |
362 | { | |
363 | struct memory_segment *seg; | |
364 | int i; | |
365 | ||
366 | mutex_lock(&vmem_mutex); | |
367 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | |
368 | if (!memory_chunk[i].size) | |
369 | continue; | |
370 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | |
371 | if (!seg) | |
372 | panic("Out of memory...\n"); | |
373 | seg->start = memory_chunk[i].addr; | |
374 | seg->size = memory_chunk[i].size; | |
375 | insert_memory_segment(seg); | |
376 | } | |
377 | mutex_unlock(&vmem_mutex); | |
378 | return 0; | |
379 | } | |
380 | ||
381 | core_initcall(vmem_convert_memory_chunk); |