[PATCH] unpaged: ZERO_PAGE in VM_UNPAGED
authorHugh Dickins <hugh@veritas.com>
Tue, 22 Nov 2005 05:32:19 +0000 (21:32 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 22 Nov 2005 17:13:42 +0000 (09:13 -0800)
It's strange enough to be looking out for anonymous pages in VM_UNPAGED areas,
let's not insert the ZERO_PAGE there - though whether it would matter will
depend on what we decide about ZERO_PAGE refcounting.

But whereas do_anonymous_page may (exceptionally) be called on a VM_UNPAGED
area, do_no_page should never be: just BUG_ON.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/char/mem.c
mm/memory.c

index 91dd669273e0018aeeb95b02e18b4549e074a661..29c3b631445af851e9e90c93f7a25dbdef08ad7a 100644 (file)
@@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
 
                if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
                        goto out_up;
-               if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
+               if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED))
                        break;
                count = vma->vm_end - addr;
                if (count > size)
index 3666a4c6dd222b106650fe913e78bb70041c4f9e..d1f46f4e4c8a8594ffa649f15dd03d6cd6eda944 100644 (file)
@@ -1812,7 +1812,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
-       if (write_access) {
+       /*
+        * A VM_UNPAGED vma will normally be filled with present ptes
+        * by remap_pfn_range, and never arrive here; but it might have
+        * holes, or if !VM_DONTEXPAND, mremap might have expanded it.
+        * It's weird enough handling anon pages in unpaged vmas, we do
+        * not want to worry about ZERO_PAGEs too (it may or may not
+        * matter if their counts wrap): just give them anon pages.
+        */
+
+       if (write_access || (vma->vm_flags & VM_UNPAGED)) {
                /* Allocate our own private page. */
                pte_unmap(page_table);
 
@@ -1887,6 +1896,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int anon = 0;
 
        pte_unmap(page_table);
+       BUG_ON(vma->vm_flags & VM_UNPAGED);
 
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
@@ -1962,7 +1972,7 @@ retry:
                        inc_mm_counter(mm, anon_rss);
                        lru_cache_add_active(new_page);
                        page_add_anon_rmap(new_page, vma, address);
-               } else if (!(vma->vm_flags & VM_UNPAGED)) {
+               } else {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(new_page);
                }