Pull mbcs-init-sn-check into release branch
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / fremap.c
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
18
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22
23 static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep)
25 {
26 pte_t pte = *ptep;
27
28 if (pte_none(pte))
29 return;
30 if (pte_present(pte)) {
31 unsigned long pfn = pte_pfn(pte);
32
33 flush_cache_page(vma, addr, pfn);
34 pte = ptep_clear_flush(vma, addr, ptep);
35 if (pfn_valid(pfn)) {
36 struct page *page = pfn_to_page(pfn);
37 if (!PageReserved(page)) {
38 if (pte_dirty(pte))
39 set_page_dirty(page);
40 page_remove_rmap(page);
41 page_cache_release(page);
42 dec_mm_counter(mm, rss);
43 }
44 }
45 } else {
46 if (!pte_file(pte))
47 free_swap_and_cache(pte_to_swp_entry(pte));
48 pte_clear(mm, addr, ptep);
49 }
50 }
51
52 /*
53 * Install a file page to a given virtual memory address, release any
54 * previously existing mapping.
55 */
56 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
57 unsigned long addr, struct page *page, pgprot_t prot)
58 {
59 struct inode *inode;
60 pgoff_t size;
61 int err = -ENOMEM;
62 pte_t *pte;
63 pmd_t *pmd;
64 pud_t *pud;
65 pgd_t *pgd;
66 pte_t pte_val;
67
68 pgd = pgd_offset(mm, addr);
69 spin_lock(&mm->page_table_lock);
70
71 pud = pud_alloc(mm, pgd, addr);
72 if (!pud)
73 goto err_unlock;
74
75 pmd = pmd_alloc(mm, pud, addr);
76 if (!pmd)
77 goto err_unlock;
78
79 pte = pte_alloc_map(mm, pmd, addr);
80 if (!pte)
81 goto err_unlock;
82
83 /*
84 * This page may have been truncated. Tell the
85 * caller about it.
86 */
87 err = -EINVAL;
88 inode = vma->vm_file->f_mapping->host;
89 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
90 if (!page->mapping || page->index >= size)
91 goto err_unlock;
92 err = -ENOMEM;
93 if (page_mapcount(page) > INT_MAX/2)
94 goto err_unlock;
95
96 zap_pte(mm, vma, addr, pte);
97
98 inc_mm_counter(mm,rss);
99 flush_icache_page(vma, page);
100 set_pte_at(mm, addr, pte, mk_pte(page, prot));
101 page_add_file_rmap(page);
102 pte_val = *pte;
103 pte_unmap(pte);
104 update_mmu_cache(vma, addr, pte_val);
105
106 err = 0;
107 err_unlock:
108 spin_unlock(&mm->page_table_lock);
109 return err;
110 }
111 EXPORT_SYMBOL(install_page);
112
113
114 /*
115 * Install a file pte to a given virtual memory address, release any
116 * previously existing mapping.
117 */
118 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
119 unsigned long addr, unsigned long pgoff, pgprot_t prot)
120 {
121 int err = -ENOMEM;
122 pte_t *pte;
123 pmd_t *pmd;
124 pud_t *pud;
125 pgd_t *pgd;
126 pte_t pte_val;
127
128 pgd = pgd_offset(mm, addr);
129 spin_lock(&mm->page_table_lock);
130
131 pud = pud_alloc(mm, pgd, addr);
132 if (!pud)
133 goto err_unlock;
134
135 pmd = pmd_alloc(mm, pud, addr);
136 if (!pmd)
137 goto err_unlock;
138
139 pte = pte_alloc_map(mm, pmd, addr);
140 if (!pte)
141 goto err_unlock;
142
143 zap_pte(mm, vma, addr, pte);
144
145 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
146 pte_val = *pte;
147 pte_unmap(pte);
148 update_mmu_cache(vma, addr, pte_val);
149 spin_unlock(&mm->page_table_lock);
150 return 0;
151
152 err_unlock:
153 spin_unlock(&mm->page_table_lock);
154 return err;
155 }
156
157
158 /***
159 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
160 * file within an existing vma.
161 * @start: start of the remapped virtual memory range
162 * @size: size of the remapped virtual memory range
163 * @prot: new protection bits of the range
164 * @pgoff: to be mapped page of the backing store file
165 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
166 *
167 * this syscall works purely via pagetables, so it's the most efficient
168 * way to map the same (large) file into a given virtual window. Unlike
169 * mmap()/mremap() it does not create any new vmas. The new mappings are
170 * also safe across swapout.
171 *
172 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
173 * protection is used. Arbitrary protections might be implemented in the
174 * future.
175 */
176 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
177 unsigned long __prot, unsigned long pgoff, unsigned long flags)
178 {
179 struct mm_struct *mm = current->mm;
180 struct address_space *mapping;
181 unsigned long end = start + size;
182 struct vm_area_struct *vma;
183 int err = -EINVAL;
184 int has_write_lock = 0;
185
186 if (__prot)
187 return err;
188 /*
189 * Sanitize the syscall parameters:
190 */
191 start = start & PAGE_MASK;
192 size = size & PAGE_MASK;
193
194 /* Does the address range wrap, or is the span zero-sized? */
195 if (start + size <= start)
196 return err;
197
198 /* Can we represent this offset inside this architecture's pte's? */
199 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
200 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
201 return err;
202 #endif
203
204 /* We need down_write() to change vma->vm_flags. */
205 down_read(&mm->mmap_sem);
206 retry:
207 vma = find_vma(mm, start);
208
209 /*
210 * Make sure the vma is shared, that it supports prefaulting,
211 * and that the remapped range is valid and fully within
212 * the single existing vma. vm_private_data is used as a
213 * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
214 * or VM_LOCKED, but VM_LOCKED could be revoked later on).
215 */
216 if (vma && (vma->vm_flags & VM_SHARED) &&
217 (!vma->vm_private_data ||
218 (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) &&
219 vma->vm_ops && vma->vm_ops->populate &&
220 end > start && start >= vma->vm_start &&
221 end <= vma->vm_end) {
222
223 /* Must set VM_NONLINEAR before any pages are populated. */
224 if (pgoff != linear_page_index(vma, start) &&
225 !(vma->vm_flags & VM_NONLINEAR)) {
226 if (!has_write_lock) {
227 up_read(&mm->mmap_sem);
228 down_write(&mm->mmap_sem);
229 has_write_lock = 1;
230 goto retry;
231 }
232 mapping = vma->vm_file->f_mapping;
233 spin_lock(&mapping->i_mmap_lock);
234 flush_dcache_mmap_lock(mapping);
235 vma->vm_flags |= VM_NONLINEAR;
236 vma_prio_tree_remove(vma, &mapping->i_mmap);
237 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
238 flush_dcache_mmap_unlock(mapping);
239 spin_unlock(&mapping->i_mmap_lock);
240 }
241
242 err = vma->vm_ops->populate(vma, start, size,
243 vma->vm_page_prot,
244 pgoff, flags & MAP_NONBLOCK);
245
246 /*
247 * We can't clear VM_NONLINEAR because we'd have to do
248 * it after ->populate completes, and that would prevent
249 * downgrading the lock. (Locks can't be upgraded).
250 */
251 }
252 if (likely(!has_write_lock))
253 up_read(&mm->mmap_sem);
254 else
255 up_write(&mm->mmap_sem);
256
257 return err;
258 }
259