mm: merge populate and nopage into fault (fixes nonlinear)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / fremap.c
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
18
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22
23 static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep)
25 {
26 pte_t pte = *ptep;
27 struct page *page = NULL;
28
29 if (pte_present(pte)) {
30 flush_cache_page(vma, addr, pte_pfn(pte));
31 pte = ptep_clear_flush(vma, addr, ptep);
32 page = vm_normal_page(vma, addr, pte);
33 if (page) {
34 if (pte_dirty(pte))
35 set_page_dirty(page);
36 page_remove_rmap(page, vma);
37 page_cache_release(page);
38 }
39 } else {
40 if (!pte_file(pte))
41 free_swap_and_cache(pte_to_swp_entry(pte));
42 pte_clear_not_present_full(mm, addr, ptep, 0);
43 }
44 return !!page;
45 }
46
47 /*
48 * Install a file page to a given virtual memory address, release any
49 * previously existing mapping.
50 */
51 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
52 unsigned long addr, struct page *page, pgprot_t prot)
53 {
54 struct inode *inode;
55 pgoff_t size;
56 int err = -ENOMEM;
57 pte_t *pte;
58 pte_t pte_val;
59 spinlock_t *ptl;
60
61 pte = get_locked_pte(mm, addr, &ptl);
62 if (!pte)
63 goto out;
64
65 /*
66 * This page may have been truncated. Tell the
67 * caller about it.
68 */
69 err = -EINVAL;
70 inode = vma->vm_file->f_mapping->host;
71 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
72 if (!page->mapping || page->index >= size)
73 goto unlock;
74 err = -ENOMEM;
75 if (page_mapcount(page) > INT_MAX/2)
76 goto unlock;
77
78 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
79 inc_mm_counter(mm, file_rss);
80
81 flush_icache_page(vma, page);
82 pte_val = mk_pte(page, prot);
83 set_pte_at(mm, addr, pte, pte_val);
84 page_add_file_rmap(page);
85 update_mmu_cache(vma, addr, pte_val);
86 lazy_mmu_prot_update(pte_val);
87 err = 0;
88 unlock:
89 pte_unmap_unlock(pte, ptl);
90 out:
91 return err;
92 }
93 EXPORT_SYMBOL(install_page);
94
95 /*
96 * Install a file pte to a given virtual memory address, release any
97 * previously existing mapping.
98 */
99 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
100 unsigned long addr, unsigned long pgoff, pgprot_t prot)
101 {
102 int err = -ENOMEM;
103 pte_t *pte;
104 spinlock_t *ptl;
105
106 pte = get_locked_pte(mm, addr, &ptl);
107 if (!pte)
108 goto out;
109
110 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
111 update_hiwater_rss(mm);
112 dec_mm_counter(mm, file_rss);
113 }
114
115 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
116 /*
117 * We don't need to run update_mmu_cache() here because the "file pte"
118 * being installed by install_file_pte() is not a real pte - it's a
119 * non-present entry (like a swap entry), noting what file offset should
120 * be mapped there when there's a fault (in a non-linear vma where
121 * that's not obvious).
122 */
123 pte_unmap_unlock(pte, ptl);
124 err = 0;
125 out:
126 return err;
127 }
128
129 static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
130 unsigned long addr, unsigned long size, pgoff_t pgoff)
131 {
132 int err;
133
134 do {
135 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
136 if (err)
137 return err;
138
139 size -= PAGE_SIZE;
140 addr += PAGE_SIZE;
141 pgoff++;
142 } while (size);
143
144 return 0;
145
146 }
147
148 /***
149 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
150 * file within an existing vma.
151 * @start: start of the remapped virtual memory range
152 * @size: size of the remapped virtual memory range
153 * @prot: new protection bits of the range
154 * @pgoff: to be mapped page of the backing store file
155 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
156 *
157 * this syscall works purely via pagetables, so it's the most efficient
158 * way to map the same (large) file into a given virtual window. Unlike
159 * mmap()/mremap() it does not create any new vmas. The new mappings are
160 * also safe across swapout.
161 *
162 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
163 * protection is used. Arbitrary protections might be implemented in the
164 * future.
165 */
166 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
167 unsigned long __prot, unsigned long pgoff, unsigned long flags)
168 {
169 struct mm_struct *mm = current->mm;
170 struct address_space *mapping;
171 unsigned long end = start + size;
172 struct vm_area_struct *vma;
173 int err = -EINVAL;
174 int has_write_lock = 0;
175
176 if (__prot)
177 return err;
178 /*
179 * Sanitize the syscall parameters:
180 */
181 start = start & PAGE_MASK;
182 size = size & PAGE_MASK;
183
184 /* Does the address range wrap, or is the span zero-sized? */
185 if (start + size <= start)
186 return err;
187
188 /* Can we represent this offset inside this architecture's pte's? */
189 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
190 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
191 return err;
192 #endif
193
194 /* We need down_write() to change vma->vm_flags. */
195 down_read(&mm->mmap_sem);
196 retry:
197 vma = find_vma(mm, start);
198
199 /*
200 * Make sure the vma is shared, that it supports prefaulting,
201 * and that the remapped range is valid and fully within
202 * the single existing vma. vm_private_data is used as a
203 * swapout cursor in a VM_NONLINEAR vma.
204 */
205 if (!vma || !(vma->vm_flags & VM_SHARED))
206 goto out;
207
208 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
209 goto out;
210
211 if ((!vma->vm_ops || !vma->vm_ops->populate) &&
212 !(vma->vm_flags & VM_CAN_NONLINEAR))
213 goto out;
214
215 if (end <= start || start < vma->vm_start || end > vma->vm_end)
216 goto out;
217
218 /* Must set VM_NONLINEAR before any pages are populated. */
219 if (!(vma->vm_flags & VM_NONLINEAR)) {
220 /* Don't need a nonlinear mapping, exit success */
221 if (pgoff == linear_page_index(vma, start)) {
222 err = 0;
223 goto out;
224 }
225
226 if (!has_write_lock) {
227 up_read(&mm->mmap_sem);
228 down_write(&mm->mmap_sem);
229 has_write_lock = 1;
230 goto retry;
231 }
232 mapping = vma->vm_file->f_mapping;
233 spin_lock(&mapping->i_mmap_lock);
234 flush_dcache_mmap_lock(mapping);
235 vma->vm_flags |= VM_NONLINEAR;
236 vma_prio_tree_remove(vma, &mapping->i_mmap);
237 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
238 flush_dcache_mmap_unlock(mapping);
239 spin_unlock(&mapping->i_mmap_lock);
240 }
241
242 if (vma->vm_flags & VM_CAN_NONLINEAR) {
243 err = populate_range(mm, vma, start, size, pgoff);
244 if (!err && !(flags & MAP_NONBLOCK)) {
245 if (unlikely(has_write_lock)) {
246 downgrade_write(&mm->mmap_sem);
247 has_write_lock = 0;
248 }
249 make_pages_present(start, start+size);
250 }
251 } else
252 err = vma->vm_ops->populate(vma, start, size, vma->vm_page_prot,
253 pgoff, flags & MAP_NONBLOCK);
254
255 /*
256 * We can't clear VM_NONLINEAR because we'd have to do
257 * it after ->populate completes, and that would prevent
258 * downgrading the lock. (Locks can't be upgraded).
259 */
260
261 out:
262 if (likely(!has_write_lock))
263 up_read(&mm->mmap_sem);
264 else
265 up_write(&mm->mmap_sem);
266
267 return err;
268 }
269