Commit | Line | Data |
---|---|---|
3610cce8 | 1 | /* |
239a6425 | 2 | * Copyright IBM Corp. 2007,2009 |
3610cce8 MS |
3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | */ | |
5 | ||
6 | #include <linux/sched.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/errno.h> | |
5a0e3ad6 | 9 | #include <linux/gfp.h> |
3610cce8 MS |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/highmem.h> | |
3610cce8 MS |
14 | #include <linux/pagemap.h> |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/quicklist.h> | |
80217147 | 18 | #include <linux/rcupdate.h> |
e5992f2e | 19 | #include <linux/slab.h> |
3610cce8 MS |
20 | |
21 | #include <asm/system.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/pgalloc.h> | |
24 | #include <asm/tlb.h> | |
25 | #include <asm/tlbflush.h> | |
6252d702 | 26 | #include <asm/mmu_context.h> |
3610cce8 MS |
27 | |
28 | #ifndef CONFIG_64BIT | |
29 | #define ALLOC_ORDER 1 | |
36409f63 | 30 | #define FRAG_MASK 0x0f |
3610cce8 MS |
31 | #else |
32 | #define ALLOC_ORDER 2 | |
36409f63 | 33 | #define FRAG_MASK 0x03 |
3610cce8 MS |
34 | #endif |
35 | ||
239a6425 HC |
36 | unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; |
37 | EXPORT_SYMBOL(VMALLOC_START); | |
38 | ||
39 | static int __init parse_vmalloc(char *arg) | |
40 | { | |
41 | if (!arg) | |
42 | return -EINVAL; | |
43 | VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; | |
44 | return 0; | |
45 | } | |
46 | early_param("vmalloc", parse_vmalloc); | |
47 | ||
043d0708 | 48 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
3610cce8 MS |
49 | { |
50 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
51 | ||
52 | if (!page) | |
53 | return NULL; | |
3610cce8 MS |
54 | return (unsigned long *) page_to_phys(page); |
55 | } | |
56 | ||
80217147 MS |
57 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
58 | { | |
043d0708 | 59 | free_pages((unsigned long) table, ALLOC_ORDER); |
80217147 MS |
60 | } |
61 | ||
6252d702 MS |
62 | #ifdef CONFIG_64BIT |
63 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | |
64 | { | |
65 | unsigned long *table, *pgd; | |
66 | unsigned long entry; | |
67 | ||
68 | BUG_ON(limit > (1UL << 53)); | |
69 | repeat: | |
043d0708 | 70 | table = crst_table_alloc(mm); |
6252d702 MS |
71 | if (!table) |
72 | return -ENOMEM; | |
80217147 | 73 | spin_lock_bh(&mm->page_table_lock); |
6252d702 MS |
74 | if (mm->context.asce_limit < limit) { |
75 | pgd = (unsigned long *) mm->pgd; | |
76 | if (mm->context.asce_limit <= (1UL << 31)) { | |
77 | entry = _REGION3_ENTRY_EMPTY; | |
78 | mm->context.asce_limit = 1UL << 42; | |
79 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
80 | _ASCE_USER_BITS | | |
81 | _ASCE_TYPE_REGION3; | |
82 | } else { | |
83 | entry = _REGION2_ENTRY_EMPTY; | |
84 | mm->context.asce_limit = 1UL << 53; | |
85 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
86 | _ASCE_USER_BITS | | |
87 | _ASCE_TYPE_REGION2; | |
88 | } | |
89 | crst_table_init(table, entry); | |
90 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | |
91 | mm->pgd = (pgd_t *) table; | |
f481bfaf | 92 | mm->task_size = mm->context.asce_limit; |
6252d702 MS |
93 | table = NULL; |
94 | } | |
80217147 | 95 | spin_unlock_bh(&mm->page_table_lock); |
6252d702 MS |
96 | if (table) |
97 | crst_table_free(mm, table); | |
98 | if (mm->context.asce_limit < limit) | |
99 | goto repeat; | |
100 | update_mm(mm, current); | |
101 | return 0; | |
102 | } | |
103 | ||
104 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |
105 | { | |
106 | pgd_t *pgd; | |
107 | ||
108 | if (mm->context.asce_limit <= limit) | |
109 | return; | |
110 | __tlb_flush_mm(mm); | |
111 | while (mm->context.asce_limit > limit) { | |
112 | pgd = mm->pgd; | |
113 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | |
114 | case _REGION_ENTRY_TYPE_R2: | |
115 | mm->context.asce_limit = 1UL << 42; | |
116 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
117 | _ASCE_USER_BITS | | |
118 | _ASCE_TYPE_REGION3; | |
119 | break; | |
120 | case _REGION_ENTRY_TYPE_R3: | |
121 | mm->context.asce_limit = 1UL << 31; | |
122 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
123 | _ASCE_USER_BITS | | |
124 | _ASCE_TYPE_SEGMENT; | |
125 | break; | |
126 | default: | |
127 | BUG(); | |
128 | } | |
129 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | |
f481bfaf | 130 | mm->task_size = mm->context.asce_limit; |
6252d702 MS |
131 | crst_table_free(mm, (unsigned long *) pgd); |
132 | } | |
133 | update_mm(mm, current); | |
134 | } | |
135 | #endif | |
136 | ||
e5992f2e MS |
137 | #ifdef CONFIG_PGSTE |
138 | ||
139 | /** | |
140 | * gmap_alloc - allocate a guest address space | |
141 | * @mm: pointer to the parent mm_struct | |
142 | * | |
143 | * Returns a guest address space structure. | |
144 | */ | |
145 | struct gmap *gmap_alloc(struct mm_struct *mm) | |
36409f63 | 146 | { |
e5992f2e MS |
147 | struct gmap *gmap; |
148 | struct page *page; | |
149 | unsigned long *table; | |
36409f63 | 150 | |
e5992f2e MS |
151 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); |
152 | if (!gmap) | |
153 | goto out; | |
154 | INIT_LIST_HEAD(&gmap->crst_list); | |
155 | gmap->mm = mm; | |
156 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
157 | if (!page) | |
158 | goto out_free; | |
159 | list_add(&page->lru, &gmap->crst_list); | |
160 | table = (unsigned long *) page_to_phys(page); | |
161 | crst_table_init(table, _REGION1_ENTRY_EMPTY); | |
162 | gmap->table = table; | |
163 | list_add(&gmap->list, &mm->context.gmap_list); | |
164 | return gmap; | |
165 | ||
166 | out_free: | |
167 | kfree(gmap); | |
168 | out: | |
169 | return NULL; | |
36409f63 | 170 | } |
e5992f2e | 171 | EXPORT_SYMBOL_GPL(gmap_alloc); |
36409f63 | 172 | |
e5992f2e MS |
173 | static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) |
174 | { | |
175 | struct gmap_pgtable *mp; | |
176 | struct gmap_rmap *rmap; | |
177 | struct page *page; | |
178 | ||
179 | if (*table & _SEGMENT_ENTRY_INV) | |
180 | return 0; | |
181 | page = pfn_to_page(*table >> PAGE_SHIFT); | |
182 | mp = (struct gmap_pgtable *) page->index; | |
183 | list_for_each_entry(rmap, &mp->mapper, list) { | |
184 | if (rmap->entry != table) | |
185 | continue; | |
186 | list_del(&rmap->list); | |
187 | kfree(rmap); | |
188 | break; | |
189 | } | |
190 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | |
191 | return 1; | |
192 | } | |
193 | ||
194 | static void gmap_flush_tlb(struct gmap *gmap) | |
195 | { | |
196 | if (MACHINE_HAS_IDTE) | |
197 | __tlb_flush_idte((unsigned long) gmap->table | | |
198 | _ASCE_TYPE_REGION1); | |
199 | else | |
200 | __tlb_flush_global(); | |
201 | } | |
202 | ||
203 | /** | |
204 | * gmap_free - free a guest address space | |
205 | * @gmap: pointer to the guest address space structure | |
3610cce8 | 206 | */ |
e5992f2e MS |
207 | void gmap_free(struct gmap *gmap) |
208 | { | |
209 | struct page *page, *next; | |
210 | unsigned long *table; | |
211 | int i; | |
212 | ||
213 | ||
214 | /* Flush tlb. */ | |
215 | if (MACHINE_HAS_IDTE) | |
216 | __tlb_flush_idte((unsigned long) gmap->table | | |
217 | _ASCE_TYPE_REGION1); | |
218 | else | |
219 | __tlb_flush_global(); | |
220 | ||
221 | /* Free all segment & region tables. */ | |
222 | down_read(&gmap->mm->mmap_sem); | |
223 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { | |
224 | table = (unsigned long *) page_to_phys(page); | |
225 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) | |
226 | /* Remove gmap rmap structures for segment table. */ | |
227 | for (i = 0; i < PTRS_PER_PMD; i++, table++) | |
228 | gmap_unlink_segment(gmap, table); | |
229 | __free_pages(page, ALLOC_ORDER); | |
230 | } | |
231 | up_read(&gmap->mm->mmap_sem); | |
232 | list_del(&gmap->list); | |
233 | kfree(gmap); | |
234 | } | |
235 | EXPORT_SYMBOL_GPL(gmap_free); | |
236 | ||
237 | /** | |
238 | * gmap_enable - switch primary space to the guest address space | |
239 | * @gmap: pointer to the guest address space structure | |
240 | */ | |
241 | void gmap_enable(struct gmap *gmap) | |
242 | { | |
243 | /* Load primary space page table origin. */ | |
244 | S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | | |
245 | _ASCE_USER_BITS | __pa(gmap->table); | |
246 | asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); | |
247 | S390_lowcore.gmap = (unsigned long) gmap; | |
248 | } | |
249 | EXPORT_SYMBOL_GPL(gmap_enable); | |
250 | ||
251 | /** | |
252 | * gmap_disable - switch back to the standard primary address space | |
253 | * @gmap: pointer to the guest address space structure | |
254 | */ | |
255 | void gmap_disable(struct gmap *gmap) | |
256 | { | |
257 | /* Load primary space page table origin. */ | |
258 | S390_lowcore.user_asce = | |
259 | gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); | |
260 | asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); | |
261 | S390_lowcore.gmap = 0UL; | |
262 | } | |
263 | EXPORT_SYMBOL_GPL(gmap_disable); | |
264 | ||
265 | static int gmap_alloc_table(struct gmap *gmap, | |
266 | unsigned long *table, unsigned long init) | |
267 | { | |
268 | struct page *page; | |
269 | unsigned long *new; | |
270 | ||
271 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
272 | if (!page) | |
273 | return -ENOMEM; | |
274 | new = (unsigned long *) page_to_phys(page); | |
275 | crst_table_init(new, init); | |
276 | down_read(&gmap->mm->mmap_sem); | |
277 | if (*table & _REGION_ENTRY_INV) { | |
278 | list_add(&page->lru, &gmap->crst_list); | |
279 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | | |
280 | (*table & _REGION_ENTRY_TYPE_MASK); | |
281 | } else | |
282 | __free_pages(page, ALLOC_ORDER); | |
283 | up_read(&gmap->mm->mmap_sem); | |
284 | return 0; | |
285 | } | |
286 | ||
287 | /** | |
288 | * gmap_unmap_segment - unmap segment from the guest address space | |
289 | * @gmap: pointer to the guest address space structure | |
290 | * @addr: address in the guest address space | |
291 | * @len: length of the memory area to unmap | |
292 | * | |
293 | * Returns 0 if the unmap succeded, -EINVAL if not. | |
294 | */ | |
295 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |
296 | { | |
297 | unsigned long *table; | |
298 | unsigned long off; | |
299 | int flush; | |
300 | ||
301 | if ((to | len) & (PMD_SIZE - 1)) | |
302 | return -EINVAL; | |
303 | if (len == 0 || to + len < to) | |
304 | return -EINVAL; | |
305 | ||
306 | flush = 0; | |
307 | down_read(&gmap->mm->mmap_sem); | |
308 | for (off = 0; off < len; off += PMD_SIZE) { | |
309 | /* Walk the guest addr space page table */ | |
310 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | |
311 | if (*table & _REGION_ENTRY_INV) | |
312 | return 0; | |
313 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
314 | table = table + (((to + off) >> 42) & 0x7ff); | |
315 | if (*table & _REGION_ENTRY_INV) | |
316 | return 0; | |
317 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
318 | table = table + (((to + off) >> 31) & 0x7ff); | |
319 | if (*table & _REGION_ENTRY_INV) | |
320 | return 0; | |
321 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
322 | table = table + (((to + off) >> 20) & 0x7ff); | |
323 | ||
324 | /* Clear segment table entry in guest address space. */ | |
325 | flush |= gmap_unlink_segment(gmap, table); | |
326 | *table = _SEGMENT_ENTRY_INV; | |
327 | } | |
328 | up_read(&gmap->mm->mmap_sem); | |
329 | if (flush) | |
330 | gmap_flush_tlb(gmap); | |
331 | return 0; | |
332 | } | |
333 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); | |
334 | ||
335 | /** | |
336 | * gmap_mmap_segment - map a segment to the guest address space | |
337 | * @gmap: pointer to the guest address space structure | |
338 | * @from: source address in the parent address space | |
339 | * @to: target address in the guest address space | |
340 | * | |
341 | * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. | |
342 | */ | |
343 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | |
344 | unsigned long to, unsigned long len) | |
345 | { | |
346 | unsigned long *table; | |
347 | unsigned long off; | |
348 | int flush; | |
349 | ||
350 | if ((from | to | len) & (PMD_SIZE - 1)) | |
351 | return -EINVAL; | |
352 | if (len == 0 || from + len > PGDIR_SIZE || | |
353 | from + len < from || to + len < to) | |
354 | return -EINVAL; | |
355 | ||
356 | flush = 0; | |
357 | down_read(&gmap->mm->mmap_sem); | |
358 | for (off = 0; off < len; off += PMD_SIZE) { | |
359 | /* Walk the gmap address space page table */ | |
360 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | |
361 | if ((*table & _REGION_ENTRY_INV) && | |
362 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) | |
363 | goto out_unmap; | |
364 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
365 | table = table + (((to + off) >> 42) & 0x7ff); | |
366 | if ((*table & _REGION_ENTRY_INV) && | |
367 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) | |
368 | goto out_unmap; | |
369 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
370 | table = table + (((to + off) >> 31) & 0x7ff); | |
371 | if ((*table & _REGION_ENTRY_INV) && | |
372 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) | |
373 | goto out_unmap; | |
374 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); | |
375 | table = table + (((to + off) >> 20) & 0x7ff); | |
376 | ||
377 | /* Store 'from' address in an invalid segment table entry. */ | |
378 | flush |= gmap_unlink_segment(gmap, table); | |
379 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); | |
380 | } | |
381 | up_read(&gmap->mm->mmap_sem); | |
382 | if (flush) | |
383 | gmap_flush_tlb(gmap); | |
384 | return 0; | |
385 | ||
386 | out_unmap: | |
387 | up_read(&gmap->mm->mmap_sem); | |
388 | gmap_unmap_segment(gmap, to, len); | |
389 | return -ENOMEM; | |
390 | } | |
391 | EXPORT_SYMBOL_GPL(gmap_map_segment); | |
392 | ||
393 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | |
394 | { | |
395 | unsigned long *table, vmaddr, segment; | |
396 | struct mm_struct *mm; | |
397 | struct gmap_pgtable *mp; | |
398 | struct gmap_rmap *rmap; | |
399 | struct vm_area_struct *vma; | |
400 | struct page *page; | |
401 | pgd_t *pgd; | |
402 | pud_t *pud; | |
403 | pmd_t *pmd; | |
404 | ||
405 | current->thread.gmap_addr = address; | |
406 | mm = gmap->mm; | |
407 | /* Walk the gmap address space page table */ | |
408 | table = gmap->table + ((address >> 53) & 0x7ff); | |
409 | if (unlikely(*table & _REGION_ENTRY_INV)) | |
410 | return -EFAULT; | |
411 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
412 | table = table + ((address >> 42) & 0x7ff); | |
413 | if (unlikely(*table & _REGION_ENTRY_INV)) | |
414 | return -EFAULT; | |
415 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
416 | table = table + ((address >> 31) & 0x7ff); | |
417 | if (unlikely(*table & _REGION_ENTRY_INV)) | |
418 | return -EFAULT; | |
419 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
420 | table = table + ((address >> 20) & 0x7ff); | |
421 | ||
422 | /* Convert the gmap address to an mm address. */ | |
423 | segment = *table; | |
424 | if (likely(!(segment & _SEGMENT_ENTRY_INV))) { | |
425 | page = pfn_to_page(segment >> PAGE_SHIFT); | |
426 | mp = (struct gmap_pgtable *) page->index; | |
427 | return mp->vmaddr | (address & ~PMD_MASK); | |
428 | } else if (segment & _SEGMENT_ENTRY_RO) { | |
429 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | |
430 | vma = find_vma(mm, vmaddr); | |
431 | if (!vma || vma->vm_start > vmaddr) | |
432 | return -EFAULT; | |
433 | ||
434 | /* Walk the parent mm page table */ | |
435 | pgd = pgd_offset(mm, vmaddr); | |
436 | pud = pud_alloc(mm, pgd, vmaddr); | |
437 | if (!pud) | |
438 | return -ENOMEM; | |
439 | pmd = pmd_alloc(mm, pud, vmaddr); | |
440 | if (!pmd) | |
441 | return -ENOMEM; | |
442 | if (!pmd_present(*pmd) && | |
443 | __pte_alloc(mm, vma, pmd, vmaddr)) | |
444 | return -ENOMEM; | |
445 | /* pmd now points to a valid segment table entry. */ | |
446 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); | |
447 | if (!rmap) | |
448 | return -ENOMEM; | |
449 | /* Link gmap segment table entry location to page table. */ | |
450 | page = pmd_page(*pmd); | |
451 | mp = (struct gmap_pgtable *) page->index; | |
452 | rmap->entry = table; | |
453 | list_add(&rmap->list, &mp->mapper); | |
454 | /* Set gmap segment table entry to page table. */ | |
455 | *table = pmd_val(*pmd) & PAGE_MASK; | |
456 | return vmaddr | (address & ~PMD_MASK); | |
457 | } | |
458 | return -EFAULT; | |
459 | ||
460 | } | |
461 | EXPORT_SYMBOL_GPL(gmap_fault); | |
462 | ||
463 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) | |
464 | { | |
465 | struct gmap_rmap *rmap, *next; | |
466 | struct gmap_pgtable *mp; | |
467 | struct page *page; | |
468 | int flush; | |
469 | ||
470 | flush = 0; | |
471 | spin_lock(&mm->page_table_lock); | |
472 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
473 | mp = (struct gmap_pgtable *) page->index; | |
474 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | |
475 | *rmap->entry = | |
476 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | |
477 | list_del(&rmap->list); | |
478 | kfree(rmap); | |
479 | flush = 1; | |
480 | } | |
481 | spin_unlock(&mm->page_table_lock); | |
482 | if (flush) | |
483 | __tlb_flush_global(); | |
484 | } | |
485 | ||
486 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |
487 | unsigned long vmaddr) | |
36409f63 MS |
488 | { |
489 | struct page *page; | |
490 | unsigned long *table; | |
e5992f2e | 491 | struct gmap_pgtable *mp; |
36409f63 MS |
492 | |
493 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | |
494 | if (!page) | |
495 | return NULL; | |
e5992f2e MS |
496 | mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); |
497 | if (!mp) { | |
498 | __free_page(page); | |
499 | return NULL; | |
500 | } | |
36409f63 | 501 | pgtable_page_ctor(page); |
e5992f2e MS |
502 | mp->vmaddr = vmaddr & PMD_MASK; |
503 | INIT_LIST_HEAD(&mp->mapper); | |
504 | page->index = (unsigned long) mp; | |
36409f63 MS |
505 | atomic_set(&page->_mapcount, 3); |
506 | table = (unsigned long *) page_to_phys(page); | |
507 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); | |
508 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); | |
509 | return table; | |
510 | } | |
511 | ||
512 | static inline void page_table_free_pgste(unsigned long *table) | |
513 | { | |
514 | struct page *page; | |
e5992f2e | 515 | struct gmap_pgtable *mp; |
36409f63 MS |
516 | |
517 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
e5992f2e MS |
518 | mp = (struct gmap_pgtable *) page->index; |
519 | BUG_ON(!list_empty(&mp->mapper)); | |
36409f63 MS |
520 | pgtable_page_ctor(page); |
521 | atomic_set(&page->_mapcount, -1); | |
e5992f2e | 522 | kfree(mp); |
36409f63 MS |
523 | __free_page(page); |
524 | } | |
36409f63 | 525 | |
e5992f2e MS |
526 | #else /* CONFIG_PGSTE */ |
527 | ||
528 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |
529 | unsigned long vmaddr) | |
530 | { | |
531 | } | |
532 | ||
533 | static inline void page_table_free_pgste(unsigned long *table) | |
534 | { | |
535 | } | |
536 | ||
537 | static inline void gmap_unmap_notifier(struct mm_struct *mm, | |
538 | unsigned long *table) | |
539 | { | |
540 | } | |
541 | ||
542 | #endif /* CONFIG_PGSTE */ | |
543 | ||
544 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) | |
545 | { | |
546 | unsigned int old, new; | |
547 | ||
548 | do { | |
549 | old = atomic_read(v); | |
550 | new = old ^ bits; | |
551 | } while (atomic_cmpxchg(v, old, new) != old); | |
552 | return new; | |
553 | } | |
554 | ||
555 | /* | |
556 | * page table entry allocation/free routines. | |
557 | */ | |
558 | unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) | |
3610cce8 | 559 | { |
146e4b3c | 560 | struct page *page; |
3610cce8 | 561 | unsigned long *table; |
36409f63 | 562 | unsigned int mask, bit; |
3610cce8 | 563 | |
36409f63 | 564 | if (mm_has_pgste(mm)) |
e5992f2e | 565 | return page_table_alloc_pgste(mm, vmaddr); |
36409f63 | 566 | /* Allocate fragments of a 4K page as 1K/2K page table */ |
80217147 | 567 | spin_lock_bh(&mm->context.list_lock); |
36409f63 | 568 | mask = FRAG_MASK; |
146e4b3c MS |
569 | if (!list_empty(&mm->context.pgtable_list)) { |
570 | page = list_first_entry(&mm->context.pgtable_list, | |
571 | struct page, lru); | |
36409f63 MS |
572 | table = (unsigned long *) page_to_phys(page); |
573 | mask = atomic_read(&page->_mapcount); | |
574 | mask = mask | (mask >> 4); | |
146e4b3c | 575 | } |
36409f63 | 576 | if ((mask & FRAG_MASK) == FRAG_MASK) { |
80217147 | 577 | spin_unlock_bh(&mm->context.list_lock); |
146e4b3c MS |
578 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
579 | if (!page) | |
3610cce8 | 580 | return NULL; |
146e4b3c | 581 | pgtable_page_ctor(page); |
36409f63 | 582 | atomic_set(&page->_mapcount, 1); |
146e4b3c | 583 | table = (unsigned long *) page_to_phys(page); |
36409f63 | 584 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
80217147 | 585 | spin_lock_bh(&mm->context.list_lock); |
146e4b3c | 586 | list_add(&page->lru, &mm->context.pgtable_list); |
36409f63 MS |
587 | } else { |
588 | for (bit = 1; mask & bit; bit <<= 1) | |
589 | table += PTRS_PER_PTE; | |
590 | mask = atomic_xor_bits(&page->_mapcount, bit); | |
591 | if ((mask & FRAG_MASK) == FRAG_MASK) | |
592 | list_del(&page->lru); | |
3610cce8 | 593 | } |
80217147 | 594 | spin_unlock_bh(&mm->context.list_lock); |
3610cce8 MS |
595 | return table; |
596 | } | |
597 | ||
36409f63 | 598 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
80217147 MS |
599 | { |
600 | struct page *page; | |
36409f63 | 601 | unsigned int bit, mask; |
80217147 | 602 | |
e5992f2e MS |
603 | if (mm_has_pgste(mm)) { |
604 | gmap_unmap_notifier(mm, table); | |
36409f63 | 605 | return page_table_free_pgste(table); |
e5992f2e | 606 | } |
36409f63 | 607 | /* Free 1K/2K page table fragment of a 4K page */ |
80217147 | 608 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
36409f63 MS |
609 | bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); |
610 | spin_lock_bh(&mm->context.list_lock); | |
611 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) | |
612 | list_del(&page->lru); | |
613 | mask = atomic_xor_bits(&page->_mapcount, bit); | |
614 | if (mask & FRAG_MASK) | |
615 | list_add(&page->lru, &mm->context.pgtable_list); | |
616 | spin_unlock_bh(&mm->context.list_lock); | |
617 | if (mask == 0) { | |
80217147 | 618 | pgtable_page_dtor(page); |
36409f63 | 619 | atomic_set(&page->_mapcount, -1); |
80217147 MS |
620 | __free_page(page); |
621 | } | |
622 | } | |
623 | ||
36409f63 MS |
624 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
625 | ||
626 | static void __page_table_free_rcu(void *table, unsigned bit) | |
3610cce8 | 627 | { |
146e4b3c | 628 | struct page *page; |
3610cce8 | 629 | |
36409f63 MS |
630 | if (bit == FRAG_MASK) |
631 | return page_table_free_pgste(table); | |
36409f63 | 632 | /* Free 1K/2K page table fragment of a 4K page */ |
146e4b3c | 633 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
36409f63 | 634 | if (atomic_xor_bits(&page->_mapcount, bit) == 0) { |
146e4b3c | 635 | pgtable_page_dtor(page); |
36409f63 | 636 | atomic_set(&page->_mapcount, -1); |
146e4b3c MS |
637 | __free_page(page); |
638 | } | |
639 | } | |
3610cce8 | 640 | |
36409f63 | 641 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) |
80217147 | 642 | { |
36409f63 | 643 | struct mm_struct *mm; |
80217147 | 644 | struct page *page; |
36409f63 | 645 | unsigned int bit, mask; |
80217147 | 646 | |
36409f63 | 647 | mm = tlb->mm; |
36409f63 | 648 | if (mm_has_pgste(mm)) { |
e5992f2e | 649 | gmap_unmap_notifier(mm, table); |
36409f63 MS |
650 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
651 | tlb_remove_table(tlb, table); | |
652 | return; | |
80217147 | 653 | } |
36409f63 | 654 | bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); |
80217147 MS |
655 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
656 | spin_lock_bh(&mm->context.list_lock); | |
36409f63 MS |
657 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
658 | list_del(&page->lru); | |
659 | mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); | |
660 | if (mask & FRAG_MASK) | |
661 | list_add_tail(&page->lru, &mm->context.pgtable_list); | |
80217147 | 662 | spin_unlock_bh(&mm->context.list_lock); |
36409f63 MS |
663 | table = (unsigned long *) (__pa(table) | (bit << 4)); |
664 | tlb_remove_table(tlb, table); | |
665 | } | |
666 | ||
667 | void __tlb_remove_table(void *_table) | |
668 | { | |
669 | void *table = (void *)((unsigned long) _table & PAGE_MASK); | |
670 | unsigned type = (unsigned long) _table & ~PAGE_MASK; | |
671 | ||
672 | if (type) | |
673 | __page_table_free_rcu(table, type); | |
674 | else | |
675 | free_pages((unsigned long) table, ALLOC_ORDER); | |
80217147 MS |
676 | } |
677 | ||
36409f63 MS |
678 | #endif |
679 | ||
402b0862 CO |
680 | /* |
681 | * switch on pgstes for its userspace process (for kvm) | |
682 | */ | |
683 | int s390_enable_sie(void) | |
684 | { | |
685 | struct task_struct *tsk = current; | |
74b6b522 | 686 | struct mm_struct *mm, *old_mm; |
402b0862 | 687 | |
702d9e58 | 688 | /* Do we have switched amode? If no, we cannot do sie */ |
b11b5334 | 689 | if (user_mode == HOME_SPACE_MODE) |
702d9e58 CO |
690 | return -EINVAL; |
691 | ||
74b6b522 | 692 | /* Do we have pgstes? if yes, we are done */ |
36409f63 | 693 | if (mm_has_pgste(tsk->mm)) |
74b6b522 | 694 | return 0; |
402b0862 | 695 | |
74b6b522 CB |
696 | /* lets check if we are allowed to replace the mm */ |
697 | task_lock(tsk); | |
402b0862 | 698 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
52a21f2c MS |
699 | #ifdef CONFIG_AIO |
700 | !hlist_empty(&tsk->mm->ioctx_list) || | |
701 | #endif | |
702 | tsk->mm != tsk->active_mm) { | |
74b6b522 CB |
703 | task_unlock(tsk); |
704 | return -EINVAL; | |
705 | } | |
706 | task_unlock(tsk); | |
402b0862 | 707 | |
250cf776 CB |
708 | /* we copy the mm and let dup_mm create the page tables with_pgstes */ |
709 | tsk->mm->context.alloc_pgste = 1; | |
402b0862 | 710 | mm = dup_mm(tsk); |
250cf776 | 711 | tsk->mm->context.alloc_pgste = 0; |
402b0862 | 712 | if (!mm) |
74b6b522 CB |
713 | return -ENOMEM; |
714 | ||
250cf776 | 715 | /* Now lets check again if something happened */ |
74b6b522 CB |
716 | task_lock(tsk); |
717 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | |
52a21f2c MS |
718 | #ifdef CONFIG_AIO |
719 | !hlist_empty(&tsk->mm->ioctx_list) || | |
720 | #endif | |
721 | tsk->mm != tsk->active_mm) { | |
74b6b522 CB |
722 | mmput(mm); |
723 | task_unlock(tsk); | |
724 | return -EINVAL; | |
725 | } | |
726 | ||
727 | /* ok, we are alone. No ptrace, no threads, etc. */ | |
728 | old_mm = tsk->mm; | |
402b0862 CO |
729 | tsk->mm = tsk->active_mm = mm; |
730 | preempt_disable(); | |
731 | update_mm(mm, tsk); | |
e05ef9bd CB |
732 | atomic_inc(&mm->context.attach_count); |
733 | atomic_dec(&old_mm->context.attach_count); | |
005f8eee | 734 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
402b0862 | 735 | preempt_enable(); |
402b0862 | 736 | task_unlock(tsk); |
74b6b522 CB |
737 | mmput(old_mm); |
738 | return 0; | |
402b0862 CO |
739 | } |
740 | EXPORT_SYMBOL_GPL(s390_enable_sie); | |
7db11a36 | 741 | |
87458ff4 | 742 | #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) |
7db11a36 HJP |
743 | bool kernel_page_present(struct page *page) |
744 | { | |
745 | unsigned long addr; | |
746 | int cc; | |
747 | ||
748 | addr = page_to_phys(page); | |
87458ff4 HC |
749 | asm volatile( |
750 | " lra %1,0(%1)\n" | |
751 | " ipm %0\n" | |
752 | " srl %0,28" | |
753 | : "=d" (cc), "+a" (addr) : : "cc"); | |
7db11a36 HJP |
754 | return cc == 0; |
755 | } | |
87458ff4 | 756 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ |