Merge tag 'v3.10.68' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / pagewalk.c
CommitLineData
e6473092
MM
1#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
d33b9f45 4#include <linux/hugetlb.h>
e6473092
MM
5
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009b 7 struct mm_walk *walk)
e6473092
MM
8{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
556637cd 13 for (;;) {
2165009b 14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
e6473092
MM
15 if (err)
16 break;
556637cd
JW
17 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
e6473092
MM
22
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
2165009b 28 struct mm_walk *walk)
e6473092
MM
29{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
03319327 36again:
e6473092 37 next = pmd_addr_end(addr, end);
03319327 38 if (pmd_none(*pmd)) {
e6473092 39 if (walk->pte_hole)
2165009b 40 err = walk->pte_hole(addr, next, walk);
e6473092
MM
41 if (err)
42 break;
43 continue;
44 }
03319327
DH
45 /*
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
48 */
e6473092 49 if (walk->pmd_entry)
2165009b 50 err = walk->pmd_entry(pmd, addr, next, walk);
03319327
DH
51 if (err)
52 break;
53
54 /*
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
57 */
58 if (!walk->pte_entry)
59 continue;
60
e180377f 61 split_huge_page_pmd_mm(walk->mm, addr, pmd);
1a5a9906 62 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
03319327
DH
63 goto again;
64 err = walk_pte_range(pmd, addr, next, walk);
e6473092
MM
65 if (err)
66 break;
67 } while (pmd++, addr = next, addr != end);
68
69 return err;
70}
71
72static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
2165009b 73 struct mm_walk *walk)
e6473092
MM
74{
75 pud_t *pud;
76 unsigned long next;
77 int err = 0;
78
79 pud = pud_offset(pgd, addr);
80 do {
81 next = pud_addr_end(addr, end);
82 if (pud_none_or_clear_bad(pud)) {
83 if (walk->pte_hole)
2165009b 84 err = walk->pte_hole(addr, next, walk);
e6473092
MM
85 if (err)
86 break;
87 continue;
88 }
89 if (walk->pud_entry)
2165009b 90 err = walk->pud_entry(pud, addr, next, walk);
e6473092 91 if (!err && (walk->pmd_entry || walk->pte_entry))
2165009b 92 err = walk_pmd_range(pud, addr, next, walk);
e6473092
MM
93 if (err)
94 break;
95 } while (pud++, addr = next, addr != end);
96
97 return err;
98}
99
116354d1
NH
100#ifdef CONFIG_HUGETLB_PAGE
101static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
102 unsigned long end)
103{
104 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
105 return boundary < end ? boundary : end;
106}
107
108static int walk_hugetlb_range(struct vm_area_struct *vma,
109 unsigned long addr, unsigned long end,
110 struct mm_walk *walk)
111{
112 struct hstate *h = hstate_vma(vma);
113 unsigned long next;
114 unsigned long hmask = huge_page_mask(h);
115 pte_t *pte;
116 int err = 0;
117
118 do {
119 next = hugetlb_entry_end(h, addr, end);
120 pte = huge_pte_offset(walk->mm, addr & hmask);
121 if (pte && walk->hugetlb_entry)
122 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
123 if (err)
124 return err;
125 } while (addr = next, addr != end);
126
127 return 0;
128}
6c6d5280 129
6c6d5280 130#else /* CONFIG_HUGETLB_PAGE */
6c6d5280
KM
131static int walk_hugetlb_range(struct vm_area_struct *vma,
132 unsigned long addr, unsigned long end,
133 struct mm_walk *walk)
134{
135 return 0;
136}
137
138#endif /* CONFIG_HUGETLB_PAGE */
139
140
116354d1 141
e6473092
MM
142/**
143 * walk_page_range - walk a memory map's page tables with a callback
7682486b
RD
144 * @addr: starting address
145 * @end: ending address
146 * @walk: set of callbacks to invoke for each level of the tree
e6473092
MM
147 *
148 * Recursively walk the page table for the memory area in a VMA,
149 * calling supplied callbacks. Callbacks are called in-order (first
150 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
151 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
152 *
2165009b
DH
153 * Each callback receives an entry pointer and the start and end of the
154 * associated range, and a copy of the original mm_walk for access to
155 * the ->private or ->mm fields.
e6473092 156 *
dd78553b
KM
157 * Usually no locks are taken, but splitting transparent huge page may
158 * take page table lock. And the bottom level iterator will map PTE
e6473092
MM
159 * directories from highmem if necessary.
160 *
161 * If any callback returns a non-zero value, the walk is aborted and
162 * the return value is propagated back to the caller. Otherwise 0 is returned.
c27fe4c8
KM
163 *
164 * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry
165 * is !NULL.
e6473092 166 */
2165009b
DH
167int walk_page_range(unsigned long addr, unsigned long end,
168 struct mm_walk *walk)
e6473092
MM
169{
170 pgd_t *pgd;
171 unsigned long next;
172 int err = 0;
173
174 if (addr >= end)
175 return err;
176
2165009b
DH
177 if (!walk->mm)
178 return -EINVAL;
179
a9ff785e
CW
180 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
181
2165009b 182 pgd = pgd_offset(walk->mm, addr);
e6473092 183 do {
a9ff785e 184 struct vm_area_struct *vma = NULL;
5f0af70a 185
e6473092 186 next = pgd_addr_end(addr, end);
d33b9f45 187
5dc37642 188 /*
a9ff785e
CW
189 * This function was not intended to be vma based.
190 * But there are vma special cases to be handled:
191 * - hugetlb vma's
192 * - VM_PFNMAP vma's
5dc37642 193 */
a9ff785e 194 vma = find_vma(walk->mm, addr);
6c6d5280 195 if (vma) {
a9ff785e
CW
196 /*
197 * There are no page structures backing a VM_PFNMAP
198 * range, so do not allow split_huge_page_pmd().
199 */
200 if ((vma->vm_start <= addr) &&
201 (vma->vm_flags & VM_PFNMAP)) {
d33b9f45 202 next = vma->vm_end;
a9ff785e
CW
203 pgd = pgd_offset(walk->mm, next);
204 continue;
205 }
116354d1 206 /*
a9ff785e
CW
207 * Handle hugetlb vma individually because pagetable
208 * walk for the hugetlb page is dependent on the
209 * architecture and we can't handled it in the same
210 * manner as non-huge pages.
116354d1 211 */
a9ff785e
CW
212 if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
213 is_vm_hugetlb_page(vma)) {
214 if (vma->vm_end < next)
215 next = vma->vm_end;
216 /*
217 * Hugepage is very tightly coupled with vma,
218 * so walk through hugetlb entries within a
219 * given vma.
220 */
221 err = walk_hugetlb_range(vma, addr, next, walk);
222 if (err)
223 break;
224 pgd = pgd_offset(walk->mm, next);
225 continue;
226 }
d33b9f45 227 }
6c6d5280 228
e6473092
MM
229 if (pgd_none_or_clear_bad(pgd)) {
230 if (walk->pte_hole)
2165009b 231 err = walk->pte_hole(addr, next, walk);
e6473092
MM
232 if (err)
233 break;
d33b9f45 234 pgd++;
e6473092
MM
235 continue;
236 }
237 if (walk->pgd_entry)
2165009b 238 err = walk->pgd_entry(pgd, addr, next, walk);
e6473092
MM
239 if (!err &&
240 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
2165009b 241 err = walk_pud_range(pgd, addr, next, walk);
e6473092
MM
242 if (err)
243 break;
d33b9f45 244 pgd++;
18b683a2 245 } while (addr = next, addr < end);
e6473092
MM
246
247 return err;
248}