Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SPARC64 Huge TLB page support. | |
3 | * | |
f6b83f07 | 4 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
5 | */ |
6 | ||
1da177e4 | 7 | #include <linux/init.h> |
1da177e4 LT |
8 | #include <linux/fs.h> |
9 | #include <linux/mm.h> | |
10 | #include <linux/hugetlb.h> | |
11 | #include <linux/pagemap.h> | |
1da177e4 LT |
12 | #include <linux/sysctl.h> |
13 | ||
14 | #include <asm/mman.h> | |
15 | #include <asm/pgalloc.h> | |
16 | #include <asm/tlb.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include <asm/cacheflush.h> | |
19 | #include <asm/mmu_context.h> | |
20 | ||
f6b83f07 DM |
21 | /* Slightly simplified from the non-hugepage variant because by |
22 | * definition we don't have to worry about any page coloring stuff | |
23 | */ | |
24 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | |
25 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) | |
26 | ||
27 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | |
28 | unsigned long addr, | |
29 | unsigned long len, | |
30 | unsigned long pgoff, | |
31 | unsigned long flags) | |
32 | { | |
f6b83f07 | 33 | unsigned long task_size = TASK_SIZE; |
2aea28b9 | 34 | struct vm_unmapped_area_info info; |
f6b83f07 DM |
35 | |
36 | if (test_thread_flag(TIF_32BIT)) | |
37 | task_size = STACK_TOP32; | |
f6b83f07 | 38 | |
2aea28b9 ML |
39 | info.flags = 0; |
40 | info.length = len; | |
41 | info.low_limit = TASK_UNMAPPED_BASE; | |
42 | info.high_limit = min(task_size, VA_EXCLUDE_START); | |
43 | info.align_mask = PAGE_MASK & ~HPAGE_MASK; | |
44 | info.align_offset = 0; | |
45 | addr = vm_unmapped_area(&info); | |
46 | ||
47 | if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { | |
48 | VM_BUG_ON(addr != -ENOMEM); | |
49 | info.low_limit = VA_EXCLUDE_END; | |
50 | info.high_limit = task_size; | |
51 | addr = vm_unmapped_area(&info); | |
f6b83f07 DM |
52 | } |
53 | ||
2aea28b9 | 54 | return addr; |
f6b83f07 DM |
55 | } |
56 | ||
57 | static unsigned long | |
58 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
59 | const unsigned long len, | |
60 | const unsigned long pgoff, | |
61 | const unsigned long flags) | |
62 | { | |
f6b83f07 DM |
63 | struct mm_struct *mm = current->mm; |
64 | unsigned long addr = addr0; | |
2aea28b9 | 65 | struct vm_unmapped_area_info info; |
f6b83f07 DM |
66 | |
67 | /* This should only ever run for 32-bit processes. */ | |
68 | BUG_ON(!test_thread_flag(TIF_32BIT)); | |
69 | ||
2aea28b9 ML |
70 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
71 | info.length = len; | |
72 | info.low_limit = PAGE_SIZE; | |
73 | info.high_limit = mm->mmap_base; | |
74 | info.align_mask = PAGE_MASK & ~HPAGE_MASK; | |
75 | info.align_offset = 0; | |
76 | addr = vm_unmapped_area(&info); | |
f6b83f07 | 77 | |
f6b83f07 DM |
78 | /* |
79 | * A failed mmap() very likely causes application failure, | |
80 | * so fall back to the bottom-up function here. This scenario | |
81 | * can happen with large stack limits and large mmap() | |
82 | * allocations. | |
83 | */ | |
2aea28b9 ML |
84 | if (addr & ~PAGE_MASK) { |
85 | VM_BUG_ON(addr != -ENOMEM); | |
86 | info.flags = 0; | |
87 | info.low_limit = TASK_UNMAPPED_BASE; | |
88 | info.high_limit = STACK_TOP32; | |
89 | addr = vm_unmapped_area(&info); | |
90 | } | |
f6b83f07 DM |
91 | |
92 | return addr; | |
93 | } | |
94 | ||
95 | unsigned long | |
96 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
97 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
98 | { | |
99 | struct mm_struct *mm = current->mm; | |
100 | struct vm_area_struct *vma; | |
101 | unsigned long task_size = TASK_SIZE; | |
102 | ||
103 | if (test_thread_flag(TIF_32BIT)) | |
104 | task_size = STACK_TOP32; | |
105 | ||
106 | if (len & ~HPAGE_MASK) | |
107 | return -EINVAL; | |
108 | if (len > task_size) | |
109 | return -ENOMEM; | |
110 | ||
ac35ee48 | 111 | if (flags & MAP_FIXED) { |
a5516438 | 112 | if (prepare_hugepage_range(file, addr, len)) |
ac35ee48 BH |
113 | return -EINVAL; |
114 | return addr; | |
115 | } | |
116 | ||
f6b83f07 DM |
117 | if (addr) { |
118 | addr = ALIGN(addr, HPAGE_SIZE); | |
119 | vma = find_vma(mm, addr); | |
120 | if (task_size - len >= addr && | |
1ad9a25d | 121 | (!vma || addr + len <= vm_start_gap(vma))) |
f6b83f07 DM |
122 | return addr; |
123 | } | |
124 | if (mm->get_unmapped_area == arch_get_unmapped_area) | |
125 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | |
126 | pgoff, flags); | |
127 | else | |
128 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | |
129 | pgoff, flags); | |
130 | } | |
131 | ||
a5516438 AK |
132 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
133 | unsigned long addr, unsigned long sz) | |
1da177e4 LT |
134 | { |
135 | pgd_t *pgd; | |
136 | pud_t *pud; | |
137 | pmd_t *pmd; | |
138 | pte_t *pte = NULL; | |
139 | ||
9df1dab1 DM |
140 | /* We must align the address, because our caller will run |
141 | * set_huge_pte_at() on whatever we return, which writes out | |
142 | * all of the sub-ptes for the hugepage range. So we have | |
143 | * to give it the first such sub-pte. | |
144 | */ | |
145 | addr &= HPAGE_MASK; | |
146 | ||
1da177e4 | 147 | pgd = pgd_offset(mm, addr); |
dcc1e8dd DM |
148 | pud = pud_alloc(mm, pgd, addr); |
149 | if (pud) { | |
150 | pmd = pmd_alloc(mm, pud, addr); | |
151 | if (pmd) | |
8ac1f832 | 152 | pte = pte_alloc_map(mm, NULL, pmd, addr); |
1da177e4 LT |
153 | } |
154 | return pte; | |
155 | } | |
156 | ||
63551ae0 | 157 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
1da177e4 LT |
158 | { |
159 | pgd_t *pgd; | |
160 | pud_t *pud; | |
161 | pmd_t *pmd; | |
162 | pte_t *pte = NULL; | |
163 | ||
f6b83f07 DM |
164 | addr &= HPAGE_MASK; |
165 | ||
1da177e4 | 166 | pgd = pgd_offset(mm, addr); |
f6b83f07 | 167 | if (!pgd_none(*pgd)) { |
1da177e4 | 168 | pud = pud_offset(pgd, addr); |
f6b83f07 | 169 | if (!pud_none(*pud)) { |
1da177e4 | 170 | pmd = pmd_offset(pud, addr); |
f6b83f07 | 171 | if (!pmd_none(*pmd)) |
1da177e4 LT |
172 | pte = pte_offset_map(pmd, addr); |
173 | } | |
174 | } | |
175 | return pte; | |
176 | } | |
177 | ||
39dde65c KC |
178 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
179 | { | |
180 | return 0; | |
181 | } | |
182 | ||
63551ae0 DG |
183 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
184 | pte_t *ptep, pte_t entry) | |
1da177e4 | 185 | { |
63551ae0 DG |
186 | int i; |
187 | ||
dcc1e8dd DM |
188 | if (!pte_present(*ptep) && pte_present(entry)) |
189 | mm->context.huge_pte_count++; | |
190 | ||
bb8236f2 | 191 | addr &= HPAGE_MASK; |
63551ae0 DG |
192 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { |
193 | set_pte_at(mm, addr, ptep, entry); | |
194 | ptep++; | |
195 | addr += PAGE_SIZE; | |
196 | pte_val(entry) += PAGE_SIZE; | |
197 | } | |
198 | } | |
1da177e4 | 199 | |
63551ae0 DG |
200 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
201 | pte_t *ptep) | |
202 | { | |
203 | pte_t entry; | |
204 | int i; | |
1da177e4 | 205 | |
63551ae0 | 206 | entry = *ptep; |
dcc1e8dd DM |
207 | if (pte_present(entry)) |
208 | mm->context.huge_pte_count--; | |
1da177e4 | 209 | |
bb8236f2 DM |
210 | addr &= HPAGE_MASK; |
211 | ||
1da177e4 | 212 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { |
63551ae0 | 213 | pte_clear(mm, addr, ptep); |
1da177e4 | 214 | addr += PAGE_SIZE; |
63551ae0 | 215 | ptep++; |
1da177e4 | 216 | } |
63551ae0 DG |
217 | |
218 | return entry; | |
1da177e4 LT |
219 | } |
220 | ||
1da177e4 LT |
221 | struct page *follow_huge_addr(struct mm_struct *mm, |
222 | unsigned long address, int write) | |
223 | { | |
224 | return ERR_PTR(-EINVAL); | |
225 | } | |
226 | ||
227 | int pmd_huge(pmd_t pmd) | |
228 | { | |
229 | return 0; | |
230 | } | |
231 | ||
ceb86879 AK |
232 | int pud_huge(pud_t pud) |
233 | { | |
234 | return 0; | |
235 | } | |
236 | ||
1da177e4 LT |
237 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
238 | pmd_t *pmd, int write) | |
239 | { | |
240 | return NULL; | |
241 | } |