[PATCH] mm: ptd_alloc take ptlock
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / fremap.c
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
18
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22
23 static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep)
25 {
26 pte_t pte = *ptep;
27 struct page *page = NULL;
28
29 if (pte_present(pte)) {
30 unsigned long pfn = pte_pfn(pte);
31 flush_cache_page(vma, addr, pfn);
32 pte = ptep_clear_flush(vma, addr, ptep);
33 if (unlikely(!pfn_valid(pfn))) {
34 print_bad_pte(vma, pte, addr);
35 goto out;
36 }
37 page = pfn_to_page(pfn);
38 if (pte_dirty(pte))
39 set_page_dirty(page);
40 page_remove_rmap(page);
41 page_cache_release(page);
42 } else {
43 if (!pte_file(pte))
44 free_swap_and_cache(pte_to_swp_entry(pte));
45 pte_clear(mm, addr, ptep);
46 }
47 out:
48 return !!page;
49 }
50
51 /*
52 * Install a file page to a given virtual memory address, release any
53 * previously existing mapping.
54 */
55 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
56 unsigned long addr, struct page *page, pgprot_t prot)
57 {
58 struct inode *inode;
59 pgoff_t size;
60 int err = -ENOMEM;
61 pte_t *pte;
62 pmd_t *pmd;
63 pud_t *pud;
64 pgd_t *pgd;
65 pte_t pte_val;
66 spinlock_t *ptl;
67
68 BUG_ON(vma->vm_flags & VM_RESERVED);
69
70 pgd = pgd_offset(mm, addr);
71 pud = pud_alloc(mm, pgd, addr);
72 if (!pud)
73 goto out;
74 pmd = pmd_alloc(mm, pud, addr);
75 if (!pmd)
76 goto out;
77 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
78 if (!pte)
79 goto out;
80
81 /*
82 * This page may have been truncated. Tell the
83 * caller about it.
84 */
85 err = -EINVAL;
86 inode = vma->vm_file->f_mapping->host;
87 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
88 if (!page->mapping || page->index >= size)
89 goto unlock;
90 err = -ENOMEM;
91 if (page_mapcount(page) > INT_MAX/2)
92 goto unlock;
93
94 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
95 inc_mm_counter(mm, file_rss);
96
97 flush_icache_page(vma, page);
98 set_pte_at(mm, addr, pte, mk_pte(page, prot));
99 page_add_file_rmap(page);
100 pte_val = *pte;
101 update_mmu_cache(vma, addr, pte_val);
102 err = 0;
103 unlock:
104 pte_unmap_unlock(pte, ptl);
105 out:
106 return err;
107 }
108 EXPORT_SYMBOL(install_page);
109
110 /*
111 * Install a file pte to a given virtual memory address, release any
112 * previously existing mapping.
113 */
114 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
115 unsigned long addr, unsigned long pgoff, pgprot_t prot)
116 {
117 int err = -ENOMEM;
118 pte_t *pte;
119 pmd_t *pmd;
120 pud_t *pud;
121 pgd_t *pgd;
122 pte_t pte_val;
123 spinlock_t *ptl;
124
125 BUG_ON(vma->vm_flags & VM_RESERVED);
126
127 pgd = pgd_offset(mm, addr);
128 pud = pud_alloc(mm, pgd, addr);
129 if (!pud)
130 goto out;
131 pmd = pmd_alloc(mm, pud, addr);
132 if (!pmd)
133 goto out;
134 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
135 if (!pte)
136 goto out;
137
138 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
139 update_hiwater_rss(mm);
140 dec_mm_counter(mm, file_rss);
141 }
142
143 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
144 pte_val = *pte;
145 update_mmu_cache(vma, addr, pte_val);
146 pte_unmap_unlock(pte, ptl);
147 err = 0;
148 out:
149 return err;
150 }
151
152 /***
153 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
154 * file within an existing vma.
155 * @start: start of the remapped virtual memory range
156 * @size: size of the remapped virtual memory range
157 * @prot: new protection bits of the range
158 * @pgoff: to be mapped page of the backing store file
159 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
160 *
161 * this syscall works purely via pagetables, so it's the most efficient
162 * way to map the same (large) file into a given virtual window. Unlike
163 * mmap()/mremap() it does not create any new vmas. The new mappings are
164 * also safe across swapout.
165 *
166 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
167 * protection is used. Arbitrary protections might be implemented in the
168 * future.
169 */
170 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
171 unsigned long __prot, unsigned long pgoff, unsigned long flags)
172 {
173 struct mm_struct *mm = current->mm;
174 struct address_space *mapping;
175 unsigned long end = start + size;
176 struct vm_area_struct *vma;
177 int err = -EINVAL;
178 int has_write_lock = 0;
179
180 if (__prot)
181 return err;
182 /*
183 * Sanitize the syscall parameters:
184 */
185 start = start & PAGE_MASK;
186 size = size & PAGE_MASK;
187
188 /* Does the address range wrap, or is the span zero-sized? */
189 if (start + size <= start)
190 return err;
191
192 /* Can we represent this offset inside this architecture's pte's? */
193 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
194 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
195 return err;
196 #endif
197
198 /* We need down_write() to change vma->vm_flags. */
199 down_read(&mm->mmap_sem);
200 retry:
201 vma = find_vma(mm, start);
202
203 /*
204 * Make sure the vma is shared, that it supports prefaulting,
205 * and that the remapped range is valid and fully within
206 * the single existing vma. vm_private_data is used as a
207 * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
208 * or VM_LOCKED, but VM_LOCKED could be revoked later on).
209 */
210 if (vma && (vma->vm_flags & VM_SHARED) &&
211 (!vma->vm_private_data ||
212 (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) &&
213 vma->vm_ops && vma->vm_ops->populate &&
214 end > start && start >= vma->vm_start &&
215 end <= vma->vm_end) {
216
217 /* Must set VM_NONLINEAR before any pages are populated. */
218 if (pgoff != linear_page_index(vma, start) &&
219 !(vma->vm_flags & VM_NONLINEAR)) {
220 if (!has_write_lock) {
221 up_read(&mm->mmap_sem);
222 down_write(&mm->mmap_sem);
223 has_write_lock = 1;
224 goto retry;
225 }
226 mapping = vma->vm_file->f_mapping;
227 spin_lock(&mapping->i_mmap_lock);
228 flush_dcache_mmap_lock(mapping);
229 vma->vm_flags |= VM_NONLINEAR;
230 vma_prio_tree_remove(vma, &mapping->i_mmap);
231 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
232 flush_dcache_mmap_unlock(mapping);
233 spin_unlock(&mapping->i_mmap_lock);
234 }
235
236 err = vma->vm_ops->populate(vma, start, size,
237 vma->vm_page_prot,
238 pgoff, flags & MAP_NONBLOCK);
239
240 /*
241 * We can't clear VM_NONLINEAR because we'd have to do
242 * it after ->populate completes, and that would prevent
243 * downgrading the lock. (Locks can't be upgraded).
244 */
245 }
246 if (likely(!has_write_lock))
247 up_read(&mm->mmap_sem);
248 else
249 up_write(&mm->mmap_sem);
250
251 return err;
252 }
253