[S390] take mmap_sem when walking guest page table
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / mm / pgtable.c
CommitLineData
3610cce8 1/*
239a6425 2 * Copyright IBM Corp. 2007,2009
3610cce8
MS
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
5a0e3ad6 9#include <linux/gfp.h>
3610cce8
MS
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
3610cce8
MS
14#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
80217147 18#include <linux/rcupdate.h>
e5992f2e 19#include <linux/slab.h>
3610cce8
MS
20
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
6252d702 26#include <asm/mmu_context.h>
3610cce8
MS
27
28#ifndef CONFIG_64BIT
29#define ALLOC_ORDER 1
36409f63 30#define FRAG_MASK 0x0f
3610cce8
MS
31#else
32#define ALLOC_ORDER 2
36409f63 33#define FRAG_MASK 0x03
3610cce8
MS
34#endif
35
239a6425
HC
36unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
37EXPORT_SYMBOL(VMALLOC_START);
38
39static int __init parse_vmalloc(char *arg)
40{
41 if (!arg)
42 return -EINVAL;
43 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
44 return 0;
45}
46early_param("vmalloc", parse_vmalloc);
47
043d0708 48unsigned long *crst_table_alloc(struct mm_struct *mm)
3610cce8
MS
49{
50 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
51
52 if (!page)
53 return NULL;
3610cce8
MS
54 return (unsigned long *) page_to_phys(page);
55}
56
80217147
MS
57void crst_table_free(struct mm_struct *mm, unsigned long *table)
58{
043d0708 59 free_pages((unsigned long) table, ALLOC_ORDER);
80217147
MS
60}
61
6252d702
MS
62#ifdef CONFIG_64BIT
63int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
64{
65 unsigned long *table, *pgd;
66 unsigned long entry;
67
68 BUG_ON(limit > (1UL << 53));
69repeat:
043d0708 70 table = crst_table_alloc(mm);
6252d702
MS
71 if (!table)
72 return -ENOMEM;
80217147 73 spin_lock_bh(&mm->page_table_lock);
6252d702
MS
74 if (mm->context.asce_limit < limit) {
75 pgd = (unsigned long *) mm->pgd;
76 if (mm->context.asce_limit <= (1UL << 31)) {
77 entry = _REGION3_ENTRY_EMPTY;
78 mm->context.asce_limit = 1UL << 42;
79 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS |
81 _ASCE_TYPE_REGION3;
82 } else {
83 entry = _REGION2_ENTRY_EMPTY;
84 mm->context.asce_limit = 1UL << 53;
85 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
86 _ASCE_USER_BITS |
87 _ASCE_TYPE_REGION2;
88 }
89 crst_table_init(table, entry);
90 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 mm->pgd = (pgd_t *) table;
f481bfaf 92 mm->task_size = mm->context.asce_limit;
6252d702
MS
93 table = NULL;
94 }
80217147 95 spin_unlock_bh(&mm->page_table_lock);
6252d702
MS
96 if (table)
97 crst_table_free(mm, table);
98 if (mm->context.asce_limit < limit)
99 goto repeat;
100 update_mm(mm, current);
101 return 0;
102}
103
104void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
105{
106 pgd_t *pgd;
107
108 if (mm->context.asce_limit <= limit)
109 return;
110 __tlb_flush_mm(mm);
111 while (mm->context.asce_limit > limit) {
112 pgd = mm->pgd;
113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 case _REGION_ENTRY_TYPE_R2:
115 mm->context.asce_limit = 1UL << 42;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS |
118 _ASCE_TYPE_REGION3;
119 break;
120 case _REGION_ENTRY_TYPE_R3:
121 mm->context.asce_limit = 1UL << 31;
122 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS |
124 _ASCE_TYPE_SEGMENT;
125 break;
126 default:
127 BUG();
128 }
129 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
f481bfaf 130 mm->task_size = mm->context.asce_limit;
6252d702
MS
131 crst_table_free(mm, (unsigned long *) pgd);
132 }
133 update_mm(mm, current);
134}
135#endif
136
e5992f2e
MS
137#ifdef CONFIG_PGSTE
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
36409f63 146{
e5992f2e
MS
147 struct gmap *gmap;
148 struct page *page;
149 unsigned long *table;
36409f63 150
e5992f2e
MS
151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
152 if (!gmap)
153 goto out;
154 INIT_LIST_HEAD(&gmap->crst_list);
155 gmap->mm = mm;
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 if (!page)
158 goto out_free;
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table;
480e5926
CB
163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
e5992f2e
MS
165 list_add(&gmap->list, &mm->context.gmap_list);
166 return gmap;
167
168out_free:
169 kfree(gmap);
170out:
171 return NULL;
36409f63 172}
e5992f2e 173EXPORT_SYMBOL_GPL(gmap_alloc);
36409f63 174
e5992f2e
MS
175static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
176{
177 struct gmap_pgtable *mp;
178 struct gmap_rmap *rmap;
179 struct page *page;
180
181 if (*table & _SEGMENT_ENTRY_INV)
182 return 0;
183 page = pfn_to_page(*table >> PAGE_SHIFT);
184 mp = (struct gmap_pgtable *) page->index;
185 list_for_each_entry(rmap, &mp->mapper, list) {
186 if (rmap->entry != table)
187 continue;
188 list_del(&rmap->list);
189 kfree(rmap);
190 break;
191 }
192 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193 return 1;
194}
195
196static void gmap_flush_tlb(struct gmap *gmap)
197{
198 if (MACHINE_HAS_IDTE)
199 __tlb_flush_idte((unsigned long) gmap->table |
200 _ASCE_TYPE_REGION1);
201 else
202 __tlb_flush_global();
203}
204
205/**
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
3610cce8 208 */
e5992f2e
MS
209void gmap_free(struct gmap *gmap)
210{
211 struct page *page, *next;
212 unsigned long *table;
213 int i;
214
215
216 /* Flush tlb. */
217 if (MACHINE_HAS_IDTE)
218 __tlb_flush_idte((unsigned long) gmap->table |
219 _ASCE_TYPE_REGION1);
220 else
221 __tlb_flush_global();
222
223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem);
cc772456 225 spin_lock(&gmap->mm->page_table_lock);
e5992f2e
MS
226 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
227 table = (unsigned long *) page_to_phys(page);
228 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
229 /* Remove gmap rmap structures for segment table. */
230 for (i = 0; i < PTRS_PER_PMD; i++, table++)
231 gmap_unlink_segment(gmap, table);
232 __free_pages(page, ALLOC_ORDER);
233 }
cc772456 234 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
235 up_read(&gmap->mm->mmap_sem);
236 list_del(&gmap->list);
237 kfree(gmap);
238}
239EXPORT_SYMBOL_GPL(gmap_free);
240
241/**
242 * gmap_enable - switch primary space to the guest address space
243 * @gmap: pointer to the guest address space structure
244 */
245void gmap_enable(struct gmap *gmap)
246{
e5992f2e
MS
247 S390_lowcore.gmap = (unsigned long) gmap;
248}
249EXPORT_SYMBOL_GPL(gmap_enable);
250
251/**
252 * gmap_disable - switch back to the standard primary address space
253 * @gmap: pointer to the guest address space structure
254 */
255void gmap_disable(struct gmap *gmap)
256{
e5992f2e
MS
257 S390_lowcore.gmap = 0UL;
258}
259EXPORT_SYMBOL_GPL(gmap_disable);
260
a9162f23
CO
261/*
262 * gmap_alloc_table is assumed to be called with mmap_sem held
263 */
e5992f2e
MS
264static int gmap_alloc_table(struct gmap *gmap,
265 unsigned long *table, unsigned long init)
266{
267 struct page *page;
268 unsigned long *new;
269
270 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
271 if (!page)
272 return -ENOMEM;
273 new = (unsigned long *) page_to_phys(page);
274 crst_table_init(new, init);
e5992f2e
MS
275 if (*table & _REGION_ENTRY_INV) {
276 list_add(&page->lru, &gmap->crst_list);
277 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
278 (*table & _REGION_ENTRY_TYPE_MASK);
279 } else
280 __free_pages(page, ALLOC_ORDER);
e5992f2e
MS
281 return 0;
282}
283
284/**
285 * gmap_unmap_segment - unmap segment from the guest address space
286 * @gmap: pointer to the guest address space structure
287 * @addr: address in the guest address space
288 * @len: length of the memory area to unmap
289 *
290 * Returns 0 if the unmap succeded, -EINVAL if not.
291 */
292int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
293{
294 unsigned long *table;
295 unsigned long off;
296 int flush;
297
298 if ((to | len) & (PMD_SIZE - 1))
299 return -EINVAL;
300 if (len == 0 || to + len < to)
301 return -EINVAL;
302
303 flush = 0;
304 down_read(&gmap->mm->mmap_sem);
cc772456 305 spin_lock(&gmap->mm->page_table_lock);
e5992f2e
MS
306 for (off = 0; off < len; off += PMD_SIZE) {
307 /* Walk the guest addr space page table */
308 table = gmap->table + (((to + off) >> 53) & 0x7ff);
309 if (*table & _REGION_ENTRY_INV)
05873df9 310 goto out;
e5992f2e
MS
311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312 table = table + (((to + off) >> 42) & 0x7ff);
313 if (*table & _REGION_ENTRY_INV)
05873df9 314 goto out;
e5992f2e
MS
315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316 table = table + (((to + off) >> 31) & 0x7ff);
317 if (*table & _REGION_ENTRY_INV)
05873df9 318 goto out;
e5992f2e
MS
319 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
320 table = table + (((to + off) >> 20) & 0x7ff);
321
322 /* Clear segment table entry in guest address space. */
323 flush |= gmap_unlink_segment(gmap, table);
324 *table = _SEGMENT_ENTRY_INV;
325 }
05873df9 326out:
cc772456 327 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
328 up_read(&gmap->mm->mmap_sem);
329 if (flush)
330 gmap_flush_tlb(gmap);
331 return 0;
332}
333EXPORT_SYMBOL_GPL(gmap_unmap_segment);
334
335/**
336 * gmap_mmap_segment - map a segment to the guest address space
337 * @gmap: pointer to the guest address space structure
338 * @from: source address in the parent address space
339 * @to: target address in the guest address space
340 *
341 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
342 */
343int gmap_map_segment(struct gmap *gmap, unsigned long from,
344 unsigned long to, unsigned long len)
345{
346 unsigned long *table;
347 unsigned long off;
348 int flush;
349
350 if ((from | to | len) & (PMD_SIZE - 1))
351 return -EINVAL;
352 if (len == 0 || from + len > PGDIR_SIZE ||
353 from + len < from || to + len < to)
354 return -EINVAL;
355
356 flush = 0;
357 down_read(&gmap->mm->mmap_sem);
cc772456 358 spin_lock(&gmap->mm->page_table_lock);
e5992f2e
MS
359 for (off = 0; off < len; off += PMD_SIZE) {
360 /* Walk the gmap address space page table */
361 table = gmap->table + (((to + off) >> 53) & 0x7ff);
362 if ((*table & _REGION_ENTRY_INV) &&
363 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
364 goto out_unmap;
365 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
366 table = table + (((to + off) >> 42) & 0x7ff);
367 if ((*table & _REGION_ENTRY_INV) &&
368 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
369 goto out_unmap;
370 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
371 table = table + (((to + off) >> 31) & 0x7ff);
372 if ((*table & _REGION_ENTRY_INV) &&
373 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
374 goto out_unmap;
375 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
376 table = table + (((to + off) >> 20) & 0x7ff);
377
378 /* Store 'from' address in an invalid segment table entry. */
379 flush |= gmap_unlink_segment(gmap, table);
380 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
381 }
cc772456 382 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
383 up_read(&gmap->mm->mmap_sem);
384 if (flush)
385 gmap_flush_tlb(gmap);
386 return 0;
387
388out_unmap:
cc772456 389 spin_unlock(&gmap->mm->page_table_lock);
e5992f2e
MS
390 up_read(&gmap->mm->mmap_sem);
391 gmap_unmap_segment(gmap, to, len);
392 return -ENOMEM;
393}
394EXPORT_SYMBOL_GPL(gmap_map_segment);
395
499069e1
CO
396/*
397 * this function is assumed to be called with mmap_sem held
398 */
399unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
e5992f2e
MS
400{
401 unsigned long *table, vmaddr, segment;
402 struct mm_struct *mm;
403 struct gmap_pgtable *mp;
404 struct gmap_rmap *rmap;
405 struct vm_area_struct *vma;
406 struct page *page;
407 pgd_t *pgd;
408 pud_t *pud;
409 pmd_t *pmd;
410
411 current->thread.gmap_addr = address;
412 mm = gmap->mm;
413 /* Walk the gmap address space page table */
414 table = gmap->table + ((address >> 53) & 0x7ff);
415 if (unlikely(*table & _REGION_ENTRY_INV))
416 return -EFAULT;
417 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
418 table = table + ((address >> 42) & 0x7ff);
419 if (unlikely(*table & _REGION_ENTRY_INV))
420 return -EFAULT;
421 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
422 table = table + ((address >> 31) & 0x7ff);
423 if (unlikely(*table & _REGION_ENTRY_INV))
424 return -EFAULT;
425 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
426 table = table + ((address >> 20) & 0x7ff);
427
428 /* Convert the gmap address to an mm address. */
429 segment = *table;
430 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
431 page = pfn_to_page(segment >> PAGE_SHIFT);
432 mp = (struct gmap_pgtable *) page->index;
433 return mp->vmaddr | (address & ~PMD_MASK);
434 } else if (segment & _SEGMENT_ENTRY_RO) {
435 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
436 vma = find_vma(mm, vmaddr);
437 if (!vma || vma->vm_start > vmaddr)
438 return -EFAULT;
439
440 /* Walk the parent mm page table */
441 pgd = pgd_offset(mm, vmaddr);
442 pud = pud_alloc(mm, pgd, vmaddr);
443 if (!pud)
444 return -ENOMEM;
445 pmd = pmd_alloc(mm, pud, vmaddr);
446 if (!pmd)
447 return -ENOMEM;
448 if (!pmd_present(*pmd) &&
449 __pte_alloc(mm, vma, pmd, vmaddr))
450 return -ENOMEM;
451 /* pmd now points to a valid segment table entry. */
452 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
453 if (!rmap)
454 return -ENOMEM;
455 /* Link gmap segment table entry location to page table. */
456 page = pmd_page(*pmd);
457 mp = (struct gmap_pgtable *) page->index;
458 rmap->entry = table;
cc772456 459 spin_lock(&mm->page_table_lock);
e5992f2e 460 list_add(&rmap->list, &mp->mapper);
cc772456 461 spin_unlock(&mm->page_table_lock);
e5992f2e
MS
462 /* Set gmap segment table entry to page table. */
463 *table = pmd_val(*pmd) & PAGE_MASK;
464 return vmaddr | (address & ~PMD_MASK);
465 }
466 return -EFAULT;
499069e1
CO
467}
468
469unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
470{
471 unsigned long rc;
472
473 down_read(&gmap->mm->mmap_sem);
474 rc = __gmap_fault(address, gmap);
475 up_read(&gmap->mm->mmap_sem);
e5992f2e 476
499069e1 477 return rc;
e5992f2e
MS
478}
479EXPORT_SYMBOL_GPL(gmap_fault);
480
481void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
482{
483 struct gmap_rmap *rmap, *next;
484 struct gmap_pgtable *mp;
485 struct page *page;
486 int flush;
487
488 flush = 0;
489 spin_lock(&mm->page_table_lock);
490 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
491 mp = (struct gmap_pgtable *) page->index;
492 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
493 *rmap->entry =
494 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
495 list_del(&rmap->list);
496 kfree(rmap);
497 flush = 1;
498 }
499 spin_unlock(&mm->page_table_lock);
500 if (flush)
501 __tlb_flush_global();
502}
503
504static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
505 unsigned long vmaddr)
36409f63
MS
506{
507 struct page *page;
508 unsigned long *table;
e5992f2e 509 struct gmap_pgtable *mp;
36409f63
MS
510
511 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
512 if (!page)
513 return NULL;
e5992f2e
MS
514 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
515 if (!mp) {
516 __free_page(page);
517 return NULL;
518 }
36409f63 519 pgtable_page_ctor(page);
e5992f2e
MS
520 mp->vmaddr = vmaddr & PMD_MASK;
521 INIT_LIST_HEAD(&mp->mapper);
522 page->index = (unsigned long) mp;
36409f63
MS
523 atomic_set(&page->_mapcount, 3);
524 table = (unsigned long *) page_to_phys(page);
525 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
526 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
527 return table;
528}
529
530static inline void page_table_free_pgste(unsigned long *table)
531{
532 struct page *page;
e5992f2e 533 struct gmap_pgtable *mp;
36409f63
MS
534
535 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
e5992f2e
MS
536 mp = (struct gmap_pgtable *) page->index;
537 BUG_ON(!list_empty(&mp->mapper));
36409f63
MS
538 pgtable_page_ctor(page);
539 atomic_set(&page->_mapcount, -1);
e5992f2e 540 kfree(mp);
36409f63
MS
541 __free_page(page);
542}
36409f63 543
e5992f2e
MS
544#else /* CONFIG_PGSTE */
545
546static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
547 unsigned long vmaddr)
548{
944291de 549 return NULL;
e5992f2e
MS
550}
551
552static inline void page_table_free_pgste(unsigned long *table)
553{
554}
555
556static inline void gmap_unmap_notifier(struct mm_struct *mm,
557 unsigned long *table)
558{
559}
560
561#endif /* CONFIG_PGSTE */
562
563static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
564{
565 unsigned int old, new;
566
567 do {
568 old = atomic_read(v);
569 new = old ^ bits;
570 } while (atomic_cmpxchg(v, old, new) != old);
571 return new;
572}
573
574/*
575 * page table entry allocation/free routines.
576 */
577unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
3610cce8 578{
146e4b3c 579 struct page *page;
3610cce8 580 unsigned long *table;
36409f63 581 unsigned int mask, bit;
3610cce8 582
36409f63 583 if (mm_has_pgste(mm))
e5992f2e 584 return page_table_alloc_pgste(mm, vmaddr);
36409f63 585 /* Allocate fragments of a 4K page as 1K/2K page table */
80217147 586 spin_lock_bh(&mm->context.list_lock);
36409f63 587 mask = FRAG_MASK;
146e4b3c
MS
588 if (!list_empty(&mm->context.pgtable_list)) {
589 page = list_first_entry(&mm->context.pgtable_list,
590 struct page, lru);
36409f63
MS
591 table = (unsigned long *) page_to_phys(page);
592 mask = atomic_read(&page->_mapcount);
593 mask = mask | (mask >> 4);
146e4b3c 594 }
36409f63 595 if ((mask & FRAG_MASK) == FRAG_MASK) {
80217147 596 spin_unlock_bh(&mm->context.list_lock);
146e4b3c
MS
597 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
598 if (!page)
3610cce8 599 return NULL;
146e4b3c 600 pgtable_page_ctor(page);
36409f63 601 atomic_set(&page->_mapcount, 1);
146e4b3c 602 table = (unsigned long *) page_to_phys(page);
36409f63 603 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
80217147 604 spin_lock_bh(&mm->context.list_lock);
146e4b3c 605 list_add(&page->lru, &mm->context.pgtable_list);
36409f63
MS
606 } else {
607 for (bit = 1; mask & bit; bit <<= 1)
608 table += PTRS_PER_PTE;
609 mask = atomic_xor_bits(&page->_mapcount, bit);
610 if ((mask & FRAG_MASK) == FRAG_MASK)
611 list_del(&page->lru);
3610cce8 612 }
80217147 613 spin_unlock_bh(&mm->context.list_lock);
3610cce8
MS
614 return table;
615}
616
36409f63 617void page_table_free(struct mm_struct *mm, unsigned long *table)
80217147
MS
618{
619 struct page *page;
36409f63 620 unsigned int bit, mask;
80217147 621
e5992f2e
MS
622 if (mm_has_pgste(mm)) {
623 gmap_unmap_notifier(mm, table);
36409f63 624 return page_table_free_pgste(table);
e5992f2e 625 }
36409f63 626 /* Free 1K/2K page table fragment of a 4K page */
80217147 627 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
36409f63
MS
628 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
629 spin_lock_bh(&mm->context.list_lock);
630 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
631 list_del(&page->lru);
632 mask = atomic_xor_bits(&page->_mapcount, bit);
633 if (mask & FRAG_MASK)
634 list_add(&page->lru, &mm->context.pgtable_list);
635 spin_unlock_bh(&mm->context.list_lock);
636 if (mask == 0) {
80217147 637 pgtable_page_dtor(page);
36409f63 638 atomic_set(&page->_mapcount, -1);
80217147
MS
639 __free_page(page);
640 }
641}
642
36409f63
MS
643#ifdef CONFIG_HAVE_RCU_TABLE_FREE
644
645static void __page_table_free_rcu(void *table, unsigned bit)
3610cce8 646{
146e4b3c 647 struct page *page;
3610cce8 648
36409f63
MS
649 if (bit == FRAG_MASK)
650 return page_table_free_pgste(table);
36409f63 651 /* Free 1K/2K page table fragment of a 4K page */
146e4b3c 652 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
36409f63 653 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
146e4b3c 654 pgtable_page_dtor(page);
36409f63 655 atomic_set(&page->_mapcount, -1);
146e4b3c
MS
656 __free_page(page);
657 }
658}
3610cce8 659
36409f63 660void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
80217147 661{
36409f63 662 struct mm_struct *mm;
80217147 663 struct page *page;
36409f63 664 unsigned int bit, mask;
80217147 665
36409f63 666 mm = tlb->mm;
36409f63 667 if (mm_has_pgste(mm)) {
e5992f2e 668 gmap_unmap_notifier(mm, table);
36409f63
MS
669 table = (unsigned long *) (__pa(table) | FRAG_MASK);
670 tlb_remove_table(tlb, table);
671 return;
80217147 672 }
36409f63 673 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
80217147
MS
674 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
675 spin_lock_bh(&mm->context.list_lock);
36409f63
MS
676 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
677 list_del(&page->lru);
678 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
679 if (mask & FRAG_MASK)
680 list_add_tail(&page->lru, &mm->context.pgtable_list);
80217147 681 spin_unlock_bh(&mm->context.list_lock);
36409f63
MS
682 table = (unsigned long *) (__pa(table) | (bit << 4));
683 tlb_remove_table(tlb, table);
684}
685
686void __tlb_remove_table(void *_table)
687{
e73b7fff
MS
688 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
689 void *table = (void *)((unsigned long) _table & ~mask);
690 unsigned type = (unsigned long) _table & mask;
36409f63
MS
691
692 if (type)
693 __page_table_free_rcu(table, type);
694 else
695 free_pages((unsigned long) table, ALLOC_ORDER);
80217147
MS
696}
697
36409f63
MS
698#endif
699
402b0862
CO
700/*
701 * switch on pgstes for its userspace process (for kvm)
702 */
703int s390_enable_sie(void)
704{
705 struct task_struct *tsk = current;
74b6b522 706 struct mm_struct *mm, *old_mm;
402b0862 707
702d9e58 708 /* Do we have switched amode? If no, we cannot do sie */
b11b5334 709 if (user_mode == HOME_SPACE_MODE)
702d9e58
CO
710 return -EINVAL;
711
74b6b522 712 /* Do we have pgstes? if yes, we are done */
36409f63 713 if (mm_has_pgste(tsk->mm))
74b6b522 714 return 0;
402b0862 715
74b6b522
CB
716 /* lets check if we are allowed to replace the mm */
717 task_lock(tsk);
402b0862 718 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
52a21f2c
MS
719#ifdef CONFIG_AIO
720 !hlist_empty(&tsk->mm->ioctx_list) ||
721#endif
722 tsk->mm != tsk->active_mm) {
74b6b522
CB
723 task_unlock(tsk);
724 return -EINVAL;
725 }
726 task_unlock(tsk);
402b0862 727
250cf776
CB
728 /* we copy the mm and let dup_mm create the page tables with_pgstes */
729 tsk->mm->context.alloc_pgste = 1;
402b0862 730 mm = dup_mm(tsk);
250cf776 731 tsk->mm->context.alloc_pgste = 0;
402b0862 732 if (!mm)
74b6b522
CB
733 return -ENOMEM;
734
250cf776 735 /* Now lets check again if something happened */
74b6b522
CB
736 task_lock(tsk);
737 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
52a21f2c
MS
738#ifdef CONFIG_AIO
739 !hlist_empty(&tsk->mm->ioctx_list) ||
740#endif
741 tsk->mm != tsk->active_mm) {
74b6b522
CB
742 mmput(mm);
743 task_unlock(tsk);
744 return -EINVAL;
745 }
746
747 /* ok, we are alone. No ptrace, no threads, etc. */
748 old_mm = tsk->mm;
402b0862
CO
749 tsk->mm = tsk->active_mm = mm;
750 preempt_disable();
751 update_mm(mm, tsk);
e05ef9bd
CB
752 atomic_inc(&mm->context.attach_count);
753 atomic_dec(&old_mm->context.attach_count);
005f8eee 754 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
402b0862 755 preempt_enable();
402b0862 756 task_unlock(tsk);
74b6b522
CB
757 mmput(old_mm);
758 return 0;
402b0862
CO
759}
760EXPORT_SYMBOL_GPL(s390_enable_sie);
7db11a36 761
87458ff4 762#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
7db11a36
HJP
763bool kernel_page_present(struct page *page)
764{
765 unsigned long addr;
766 int cc;
767
768 addr = page_to_phys(page);
87458ff4
HC
769 asm volatile(
770 " lra %1,0(%1)\n"
771 " ipm %0\n"
772 " srl %0,28"
773 : "=d" (cc), "+a" (addr) : : "cc");
7db11a36
HJP
774 return cc == 0;
775}
87458ff4 776#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */