thp: mprotect: pass vma down to page table walkers
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / mm / mprotect.c
CommitLineData
1da177e4
LT
1/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
046c6884 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
1da177e4
LT
13#include <linux/shm.h>
14#include <linux/mman.h>
15#include <linux/fs.h>
16#include <linux/highmem.h>
17#include <linux/security.h>
18#include <linux/mempolicy.h>
19#include <linux/personality.h>
20#include <linux/syscalls.h>
0697212a
CL
21#include <linux/swap.h>
22#include <linux/swapops.h>
cddb8a5c 23#include <linux/mmu_notifier.h>
64cdd548 24#include <linux/migrate.h>
cdd6c482 25#include <linux/perf_event.h>
1da177e4
LT
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
1c12c4cf
VP
31#ifndef pgprot_modify
32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
33{
34 return newprot;
35}
36#endif
37
1da177e4 38static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
c1e6098b
PZ
39 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable)
1da177e4 41{
0697212a 42 pte_t *pte, oldpte;
705e87c0 43 spinlock_t *ptl;
1da177e4 44
705e87c0 45 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
6606c3e0 46 arch_enter_lazy_mmu_mode();
1da177e4 47 do {
0697212a
CL
48 oldpte = *pte;
49 if (pte_present(oldpte)) {
1da177e4
LT
50 pte_t ptent;
51
1ea0704e 52 ptent = ptep_modify_prot_start(mm, addr, pte);
c1e6098b 53 ptent = pte_modify(ptent, newprot);
1ea0704e 54
c1e6098b
PZ
55 /*
56 * Avoid taking write faults for pages we know to be
57 * dirty.
58 */
59 if (dirty_accountable && pte_dirty(ptent))
60 ptent = pte_mkwrite(ptent);
1ea0704e
JF
61
62 ptep_modify_prot_commit(mm, addr, pte, ptent);
64cdd548 63 } else if (PAGE_MIGRATION && !pte_file(oldpte)) {
0697212a
CL
64 swp_entry_t entry = pte_to_swp_entry(oldpte);
65
66 if (is_write_migration_entry(entry)) {
67 /*
68 * A protection check is difficult so
69 * just be safe and disable write
70 */
71 make_migration_entry_read(&entry);
72 set_pte_at(mm, addr, pte,
73 swp_entry_to_pte(entry));
74 }
1da177e4
LT
75 }
76 } while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0 77 arch_leave_lazy_mmu_mode();
705e87c0 78 pte_unmap_unlock(pte - 1, ptl);
1da177e4
LT
79}
80
b36f5b07 81static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
c1e6098b
PZ
82 unsigned long addr, unsigned long end, pgprot_t newprot,
83 int dirty_accountable)
1da177e4
LT
84{
85 pmd_t *pmd;
86 unsigned long next;
87
88 pmd = pmd_offset(pud, addr);
89 do {
90 next = pmd_addr_end(addr, end);
b36f5b07 91 split_huge_page_pmd(vma->vm_mm, pmd);
1da177e4
LT
92 if (pmd_none_or_clear_bad(pmd))
93 continue;
b36f5b07
JW
94 change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
95 dirty_accountable);
1da177e4
LT
96 } while (pmd++, addr = next, addr != end);
97}
98
b36f5b07 99static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
c1e6098b
PZ
100 unsigned long addr, unsigned long end, pgprot_t newprot,
101 int dirty_accountable)
1da177e4
LT
102{
103 pud_t *pud;
104 unsigned long next;
105
106 pud = pud_offset(pgd, addr);
107 do {
108 next = pud_addr_end(addr, end);
109 if (pud_none_or_clear_bad(pud))
110 continue;
b36f5b07
JW
111 change_pmd_range(vma, pud, addr, next, newprot,
112 dirty_accountable);
1da177e4
LT
113 } while (pud++, addr = next, addr != end);
114}
115
116static void change_protection(struct vm_area_struct *vma,
c1e6098b
PZ
117 unsigned long addr, unsigned long end, pgprot_t newprot,
118 int dirty_accountable)
1da177e4
LT
119{
120 struct mm_struct *mm = vma->vm_mm;
121 pgd_t *pgd;
122 unsigned long next;
123 unsigned long start = addr;
124
125 BUG_ON(addr >= end);
126 pgd = pgd_offset(mm, addr);
127 flush_cache_range(vma, addr, end);
1da177e4
LT
128 do {
129 next = pgd_addr_end(addr, end);
130 if (pgd_none_or_clear_bad(pgd))
131 continue;
b36f5b07
JW
132 change_pud_range(vma, pgd, addr, next, newprot,
133 dirty_accountable);
1da177e4
LT
134 } while (pgd++, addr = next, addr != end);
135 flush_tlb_range(vma, start, end);
1da177e4
LT
136}
137
b6a2fea3 138int
1da177e4
LT
139mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
140 unsigned long start, unsigned long end, unsigned long newflags)
141{
142 struct mm_struct *mm = vma->vm_mm;
143 unsigned long oldflags = vma->vm_flags;
144 long nrpages = (end - start) >> PAGE_SHIFT;
145 unsigned long charged = 0;
1da177e4
LT
146 pgoff_t pgoff;
147 int error;
c1e6098b 148 int dirty_accountable = 0;
1da177e4
LT
149
150 if (newflags == oldflags) {
151 *pprev = vma;
152 return 0;
153 }
154
155 /*
156 * If we make a private mapping writable we increase our commit;
157 * but (without finer accounting) cannot reduce our commit if we
5a6fe125
MG
158 * make it unwritable again. hugetlb mapping were accounted for
159 * even if read-only so there is no need to account for them here
1da177e4
LT
160 */
161 if (newflags & VM_WRITE) {
5a6fe125 162 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325 163 VM_SHARED|VM_NORESERVE))) {
1da177e4
LT
164 charged = nrpages;
165 if (security_vm_enough_memory(charged))
166 return -ENOMEM;
167 newflags |= VM_ACCOUNT;
168 }
169 }
170
1da177e4
LT
171 /*
172 * First try to merge with previous and/or next vma.
173 */
174 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
175 *pprev = vma_merge(mm, *pprev, start, end, newflags,
176 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
177 if (*pprev) {
178 vma = *pprev;
179 goto success;
180 }
181
182 *pprev = vma;
183
184 if (start != vma->vm_start) {
185 error = split_vma(mm, vma, start, 1);
186 if (error)
187 goto fail;
188 }
189
190 if (end != vma->vm_end) {
191 error = split_vma(mm, vma, end, 0);
192 if (error)
193 goto fail;
194 }
195
196success:
197 /*
198 * vm_flags and vm_page_prot are protected by the mmap_sem
199 * held in write mode.
200 */
201 vma->vm_flags = newflags;
1c12c4cf
VP
202 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
203 vm_get_page_prot(newflags));
204
c1e6098b 205 if (vma_wants_writenotify(vma)) {
1ddd439e 206 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
c1e6098b
PZ
207 dirty_accountable = 1;
208 }
d08b3851 209
cddb8a5c 210 mmu_notifier_invalidate_range_start(mm, start, end);
8f860591 211 if (is_vm_hugetlb_page(vma))
d08b3851 212 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
8f860591 213 else
c1e6098b 214 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
cddb8a5c 215 mmu_notifier_invalidate_range_end(mm, start, end);
ab50b8ed
HD
216 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
217 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
63bfd738 218 perf_event_mmap(vma);
1da177e4
LT
219 return 0;
220
221fail:
222 vm_unacct_memory(charged);
223 return error;
224}
225
6a6160a7
HC
226SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
227 unsigned long, prot)
1da177e4
LT
228{
229 unsigned long vm_flags, nstart, end, tmp, reqprot;
230 struct vm_area_struct *vma, *prev;
231 int error = -EINVAL;
232 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
233 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
234 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
235 return -EINVAL;
236
237 if (start & ~PAGE_MASK)
238 return -EINVAL;
239 if (!len)
240 return 0;
241 len = PAGE_ALIGN(len);
242 end = start + len;
243 if (end <= start)
244 return -ENOMEM;
b845f313 245 if (!arch_validate_prot(prot))
1da177e4
LT
246 return -EINVAL;
247
248 reqprot = prot;
249 /*
250 * Does the application expect PROT_READ to imply PROT_EXEC:
251 */
b344e05c 252 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1da177e4
LT
253 prot |= PROT_EXEC;
254
255 vm_flags = calc_vm_prot_bits(prot);
256
257 down_write(&current->mm->mmap_sem);
258
259 vma = find_vma_prev(current->mm, start, &prev);
260 error = -ENOMEM;
261 if (!vma)
262 goto out;
263 if (unlikely(grows & PROT_GROWSDOWN)) {
264 if (vma->vm_start >= end)
265 goto out;
266 start = vma->vm_start;
267 error = -EINVAL;
268 if (!(vma->vm_flags & VM_GROWSDOWN))
269 goto out;
270 }
271 else {
272 if (vma->vm_start > start)
273 goto out;
274 if (unlikely(grows & PROT_GROWSUP)) {
275 end = vma->vm_end;
276 error = -EINVAL;
277 if (!(vma->vm_flags & VM_GROWSUP))
278 goto out;
279 }
280 }
281 if (start > vma->vm_start)
282 prev = vma;
283
284 for (nstart = start ; ; ) {
285 unsigned long newflags;
286
287 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
288
1da177e4
LT
289 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
290
7e2cff42
PBG
291 /* newflags >> 4 shift VM_MAY% in place of VM_% */
292 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
1da177e4
LT
293 error = -EACCES;
294 goto out;
295 }
296
297 error = security_file_mprotect(vma, reqprot, prot);
298 if (error)
299 goto out;
300
301 tmp = vma->vm_end;
302 if (tmp > end)
303 tmp = end;
304 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
305 if (error)
306 goto out;
307 nstart = tmp;
308
309 if (nstart < prev->vm_end)
310 nstart = prev->vm_end;
311 if (nstart >= end)
312 goto out;
313
314 vma = prev->vm_next;
315 if (!vma || vma->vm_start != nstart) {
316 error = -ENOMEM;
317 goto out;
318 }
319 }
320out:
321 up_write(&current->mm->mmap_sem);
322 return error;
323}