Linux-2.6.12-rc2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / ia64 / mm / hugetlbpage.c
1 /*
2 * IA-64 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6 *
7 * Sep, 2003: add numa support
8 * Feb, 2004: dynamic hugetlb page size via boot parameter
9 */
10
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
20 #include <asm/mman.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
24
25 unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
26
27 static pte_t *
28 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
29 {
30 unsigned long taddr = htlbpage_to_page(addr);
31 pgd_t *pgd;
32 pud_t *pud;
33 pmd_t *pmd;
34 pte_t *pte = NULL;
35
36 pgd = pgd_offset(mm, taddr);
37 pud = pud_alloc(mm, pgd, taddr);
38 if (pud) {
39 pmd = pmd_alloc(mm, pud, taddr);
40 if (pmd)
41 pte = pte_alloc_map(mm, pmd, taddr);
42 }
43 return pte;
44 }
45
46 static pte_t *
47 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
48 {
49 unsigned long taddr = htlbpage_to_page(addr);
50 pgd_t *pgd;
51 pud_t *pud;
52 pmd_t *pmd;
53 pte_t *pte = NULL;
54
55 pgd = pgd_offset(mm, taddr);
56 if (pgd_present(*pgd)) {
57 pud = pud_offset(pgd, taddr);
58 if (pud_present(*pud)) {
59 pmd = pmd_offset(pud, taddr);
60 if (pmd_present(*pmd))
61 pte = pte_offset_map(pmd, taddr);
62 }
63 }
64
65 return pte;
66 }
67
68 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
69
70 static void
71 set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
72 struct page *page, pte_t * page_table, int write_access)
73 {
74 pte_t entry;
75
76 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
77 if (write_access) {
78 entry =
79 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
80 } else
81 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
82 entry = pte_mkyoung(entry);
83 mk_pte_huge(entry);
84 set_pte(page_table, entry);
85 return;
86 }
87 /*
88 * This function checks for proper alignment of input addr and len parameters.
89 */
90 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
91 {
92 if (len & ~HPAGE_MASK)
93 return -EINVAL;
94 if (addr & ~HPAGE_MASK)
95 return -EINVAL;
96 if (REGION_NUMBER(addr) != REGION_HPAGE)
97 return -EINVAL;
98
99 return 0;
100 }
101
102 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
103 struct vm_area_struct *vma)
104 {
105 pte_t *src_pte, *dst_pte, entry;
106 struct page *ptepage;
107 unsigned long addr = vma->vm_start;
108 unsigned long end = vma->vm_end;
109
110 while (addr < end) {
111 dst_pte = huge_pte_alloc(dst, addr);
112 if (!dst_pte)
113 goto nomem;
114 src_pte = huge_pte_offset(src, addr);
115 entry = *src_pte;
116 ptepage = pte_page(entry);
117 get_page(ptepage);
118 set_pte(dst_pte, entry);
119 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
120 addr += HPAGE_SIZE;
121 }
122 return 0;
123 nomem:
124 return -ENOMEM;
125 }
126
127 int
128 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
129 struct page **pages, struct vm_area_struct **vmas,
130 unsigned long *st, int *length, int i)
131 {
132 pte_t *ptep, pte;
133 unsigned long start = *st;
134 unsigned long pstart;
135 int len = *length;
136 struct page *page;
137
138 do {
139 pstart = start & HPAGE_MASK;
140 ptep = huge_pte_offset(mm, start);
141 pte = *ptep;
142
143 back1:
144 page = pte_page(pte);
145 if (pages) {
146 page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
147 get_page(page);
148 pages[i] = page;
149 }
150 if (vmas)
151 vmas[i] = vma;
152 i++;
153 len--;
154 start += PAGE_SIZE;
155 if (((start & HPAGE_MASK) == pstart) && len &&
156 (start < vma->vm_end))
157 goto back1;
158 } while (len && start < vma->vm_end);
159 *length = len;
160 *st = start;
161 return i;
162 }
163
164 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
165 {
166 struct page *page;
167 pte_t *ptep;
168
169 if (REGION_NUMBER(addr) != REGION_HPAGE)
170 return ERR_PTR(-EINVAL);
171
172 ptep = huge_pte_offset(mm, addr);
173 if (!ptep || pte_none(*ptep))
174 return NULL;
175 page = pte_page(*ptep);
176 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
177 return page;
178 }
179 int pmd_huge(pmd_t pmd)
180 {
181 return 0;
182 }
183 struct page *
184 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
185 {
186 return NULL;
187 }
188
189 /*
190 * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
191 * are hugetlb region specific.
192 */
193 void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
194 unsigned long start, unsigned long end)
195 {
196 unsigned long first = start & HUGETLB_PGDIR_MASK;
197 unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
198 struct mm_struct *mm = tlb->mm;
199
200 if (!prev) {
201 prev = mm->mmap;
202 if (!prev)
203 goto no_mmaps;
204 if (prev->vm_end > start) {
205 if (last > prev->vm_start)
206 last = prev->vm_start;
207 goto no_mmaps;
208 }
209 }
210 for (;;) {
211 struct vm_area_struct *next = prev->vm_next;
212
213 if (next) {
214 if (next->vm_start < start) {
215 prev = next;
216 continue;
217 }
218 if (last > next->vm_start)
219 last = next->vm_start;
220 }
221 if (prev->vm_end > first)
222 first = prev->vm_end;
223 break;
224 }
225 no_mmaps:
226 if (last < first) /* for arches with discontiguous pgd indices */
227 return;
228 clear_page_range(tlb, first, last);
229 }
230
231 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
232 {
233 struct mm_struct *mm = vma->vm_mm;
234 unsigned long address;
235 pte_t *pte;
236 struct page *page;
237
238 BUG_ON(start & (HPAGE_SIZE - 1));
239 BUG_ON(end & (HPAGE_SIZE - 1));
240
241 for (address = start; address < end; address += HPAGE_SIZE) {
242 pte = huge_pte_offset(mm, address);
243 if (pte_none(*pte))
244 continue;
245 page = pte_page(*pte);
246 put_page(page);
247 pte_clear(mm, address, pte);
248 }
249 add_mm_counter(mm, rss, - ((end - start) >> PAGE_SHIFT));
250 flush_tlb_range(vma, start, end);
251 }
252
253 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
254 {
255 struct mm_struct *mm = current->mm;
256 unsigned long addr;
257 int ret = 0;
258
259 BUG_ON(vma->vm_start & ~HPAGE_MASK);
260 BUG_ON(vma->vm_end & ~HPAGE_MASK);
261
262 spin_lock(&mm->page_table_lock);
263 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
264 unsigned long idx;
265 pte_t *pte = huge_pte_alloc(mm, addr);
266 struct page *page;
267
268 if (!pte) {
269 ret = -ENOMEM;
270 goto out;
271 }
272 if (!pte_none(*pte))
273 continue;
274
275 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
276 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
277 page = find_get_page(mapping, idx);
278 if (!page) {
279 /* charge the fs quota first */
280 if (hugetlb_get_quota(mapping)) {
281 ret = -ENOMEM;
282 goto out;
283 }
284 page = alloc_huge_page();
285 if (!page) {
286 hugetlb_put_quota(mapping);
287 ret = -ENOMEM;
288 goto out;
289 }
290 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
291 if (! ret) {
292 unlock_page(page);
293 } else {
294 hugetlb_put_quota(mapping);
295 page_cache_release(page);
296 goto out;
297 }
298 }
299 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
300 }
301 out:
302 spin_unlock(&mm->page_table_lock);
303 return ret;
304 }
305
306 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
307 unsigned long pgoff, unsigned long flags)
308 {
309 struct vm_area_struct *vmm;
310
311 if (len > RGN_MAP_LIMIT)
312 return -ENOMEM;
313 if (len & ~HPAGE_MASK)
314 return -EINVAL;
315 /* This code assumes that REGION_HPAGE != 0. */
316 if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
317 addr = HPAGE_REGION_BASE;
318 else
319 addr = ALIGN(addr, HPAGE_SIZE);
320 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
321 /* At this point: (!vmm || addr < vmm->vm_end). */
322 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
323 return -ENOMEM;
324 if (!vmm || (addr + len) <= vmm->vm_start)
325 return addr;
326 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
327 }
328 }
329
330 static int __init hugetlb_setup_sz(char *str)
331 {
332 u64 tr_pages;
333 unsigned long long size;
334
335 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
336 /*
337 * shouldn't happen, but just in case.
338 */
339 tr_pages = 0x15557000UL;
340
341 size = memparse(str, &str);
342 if (*str || (size & (size-1)) || !(tr_pages & size) ||
343 size <= PAGE_SIZE ||
344 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
345 printk(KERN_WARNING "Invalid huge page size specified\n");
346 return 1;
347 }
348
349 hpage_shift = __ffs(size);
350 /*
351 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
352 * override here with new page shift.
353 */
354 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
355 return 1;
356 }
357 __setup("hugepagesz=", hugetlb_setup_sz);