[SPARC64]: Randomize mm->mmap_base when PF_RANDOMIZE is set.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / mm / tsb.c
CommitLineData
74bf4312
DM
1/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <asm/system.h>
8#include <asm/page.h>
9#include <asm/tlbflush.h>
10#include <asm/tlb.h>
09f94287 11#include <asm/mmu_context.h>
98c5584c 12#include <asm/pgtable.h>
bd40791e 13#include <asm/tsb.h>
74bf4312 14
74bf4312
DM
15extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
16
98c5584c 17static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
74bf4312
DM
18{
19 vaddr >>= PAGE_SHIFT;
98c5584c 20 return vaddr & (nentries - 1);
74bf4312
DM
21}
22
8b234274 23static inline int tag_compare(unsigned long tag, unsigned long vaddr)
74bf4312 24{
8b234274 25 return (tag == (vaddr >> 22));
74bf4312
DM
26}
27
28/* TSB flushes need only occur on the processor initiating the address
29 * space modification, not on each cpu the address space has run on.
30 * Only the TLB flush needs that treatment.
31 */
32
33void flush_tsb_kernel_range(unsigned long start, unsigned long end)
34{
35 unsigned long v;
36
37 for (v = start; v < end; v += PAGE_SIZE) {
98c5584c
DM
38 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
39 struct tsb *ent = &swapper_tsb[hash];
74bf4312 40
8b234274
DM
41 if (tag_compare(ent->tag, v)) {
42 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
74bf4312
DM
43 membar_storeload_storestore();
44 }
45 }
46}
47
48void flush_tsb_user(struct mmu_gather *mp)
49{
50 struct mm_struct *mm = mp->mm;
7a1ac526
DM
51 unsigned long nentries, base, flags;
52 struct tsb *tsb;
74bf4312
DM
53 int i;
54
7a1ac526
DM
55 spin_lock_irqsave(&mm->context.lock, flags);
56
57 tsb = mm->context.tsb;
58 nentries = mm->context.tsb_nentries;
59
de635d83 60 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
61 base = __pa(tsb);
62 else
63 base = (unsigned long) tsb;
64
74bf4312
DM
65 for (i = 0; i < mp->tlb_nr; i++) {
66 unsigned long v = mp->vaddrs[i];
517af332 67 unsigned long tag, ent, hash;
74bf4312
DM
68
69 v &= ~0x1UL;
70
517af332
DM
71 hash = tsb_hash(v, nentries);
72 ent = base + (hash * sizeof(struct tsb));
8b234274 73 tag = (v >> 22UL);
517af332
DM
74
75 tsb_flush(ent, tag);
74bf4312 76 }
7a1ac526
DM
77
78 spin_unlock_irqrestore(&mm->context.lock, flags);
74bf4312 79}
09f94287 80
98c5584c
DM
81static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
82{
83 unsigned long tsb_reg, base, tsb_paddr;
84 unsigned long page_sz, tte;
85
86 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
87
88 base = TSBMAP_BASE;
c4bce90e 89 tte = pgprot_val(PAGE_KERNEL_LOCKED);
98c5584c 90 tsb_paddr = __pa(mm->context.tsb);
517af332 91 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
98c5584c
DM
92
93 /* Use the smallest page size that can map the whole TSB
94 * in one TLB entry.
95 */
96 switch (tsb_bytes) {
97 case 8192 << 0:
98 tsb_reg = 0x0UL;
99#ifdef DCACHE_ALIASING_POSSIBLE
100 base += (tsb_paddr & 8192);
101#endif
98c5584c
DM
102 page_sz = 8192;
103 break;
104
105 case 8192 << 1:
106 tsb_reg = 0x1UL;
98c5584c
DM
107 page_sz = 64 * 1024;
108 break;
109
110 case 8192 << 2:
111 tsb_reg = 0x2UL;
98c5584c
DM
112 page_sz = 64 * 1024;
113 break;
114
115 case 8192 << 3:
116 tsb_reg = 0x3UL;
98c5584c
DM
117 page_sz = 64 * 1024;
118 break;
119
120 case 8192 << 4:
121 tsb_reg = 0x4UL;
98c5584c
DM
122 page_sz = 512 * 1024;
123 break;
124
125 case 8192 << 5:
126 tsb_reg = 0x5UL;
98c5584c
DM
127 page_sz = 512 * 1024;
128 break;
129
130 case 8192 << 6:
131 tsb_reg = 0x6UL;
98c5584c
DM
132 page_sz = 512 * 1024;
133 break;
134
135 case 8192 << 7:
136 tsb_reg = 0x7UL;
98c5584c
DM
137 page_sz = 4 * 1024 * 1024;
138 break;
bd40791e
DM
139
140 default:
141 BUG();
98c5584c 142 };
c4bce90e 143 tte |= pte_sz_bits(page_sz);
98c5584c 144
618e9ed9 145 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
517af332
DM
146 /* Physical mapping, no locked TLB entry for TSB. */
147 tsb_reg |= tsb_paddr;
148
149 mm->context.tsb_reg_val = tsb_reg;
150 mm->context.tsb_map_vaddr = 0;
151 mm->context.tsb_map_pte = 0;
152 } else {
153 tsb_reg |= base;
154 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
155 tte |= (tsb_paddr & ~(page_sz - 1UL));
156
157 mm->context.tsb_reg_val = tsb_reg;
158 mm->context.tsb_map_vaddr = base;
159 mm->context.tsb_map_pte = tte;
160 }
98c5584c 161
618e9ed9
DM
162 /* Setup the Hypervisor TSB descriptor. */
163 if (tlb_type == hypervisor) {
164 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
165
166 switch (PAGE_SIZE) {
167 case 8192:
168 default:
169 hp->pgsz_idx = HV_PGSZ_IDX_8K;
170 break;
171
172 case 64 * 1024:
173 hp->pgsz_idx = HV_PGSZ_IDX_64K;
174 break;
175
176 case 512 * 1024:
177 hp->pgsz_idx = HV_PGSZ_IDX_512K;
178 break;
179
180 case 4 * 1024 * 1024:
181 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
182 break;
183 };
184 hp->assoc = 1;
185 hp->num_ttes = tsb_bytes / 16;
186 hp->ctx_idx = 0;
187 switch (PAGE_SIZE) {
188 case 8192:
189 default:
190 hp->pgsz_mask = HV_PGSZ_MASK_8K;
191 break;
192
193 case 64 * 1024:
194 hp->pgsz_mask = HV_PGSZ_MASK_64K;
195 break;
196
197 case 512 * 1024:
198 hp->pgsz_mask = HV_PGSZ_MASK_512K;
199 break;
200
201 case 4 * 1024 * 1024:
202 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
203 break;
204 };
205 hp->tsb_base = tsb_paddr;
206 hp->resv = 0;
207 }
98c5584c
DM
208}
209
bd40791e 210/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
7a1ac526
DM
211 * do_sparc64_fault() invokes this routine to try and grow the TSB.
212 *
bd40791e
DM
213 * When we reach the maximum TSB size supported, we stick ~0UL into
214 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
215 * will not trigger any longer.
216 *
217 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
218 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
219 * must be 512K aligned.
220 *
221 * The idea here is to grow the TSB when the RSS of the process approaches
222 * the number of entries that the current TSB can hold at once. Currently,
223 * we trigger when the RSS hits 3/4 of the TSB capacity.
224 */
7a1ac526 225void tsb_grow(struct mm_struct *mm, unsigned long rss)
bd40791e
DM
226{
227 unsigned long max_tsb_size = 1 * 1024 * 1024;
7a1ac526 228 unsigned long size, old_size, flags;
bd40791e 229 struct page *page;
7a1ac526 230 struct tsb *old_tsb, *new_tsb;
bd40791e
DM
231
232 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
233 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
234
235 for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
236 unsigned long n_entries = size / sizeof(struct tsb);
237
238 n_entries = (n_entries * 3) / 4;
239 if (n_entries > rss)
240 break;
241 }
242
7a1ac526 243 page = alloc_pages(GFP_KERNEL, get_order(size));
bd40791e
DM
244 if (unlikely(!page))
245 return;
246
8b234274 247 /* Mark all tags as invalid. */
7a1ac526
DM
248 new_tsb = page_address(page);
249 memset(new_tsb, 0x40, size);
250
251 /* Ok, we are about to commit the changes. If we are
252 * growing an existing TSB the locking is very tricky,
253 * so WATCH OUT!
254 *
255 * We have to hold mm->context.lock while committing to the
256 * new TSB, this synchronizes us with processors in
257 * flush_tsb_user() and switch_mm() for this address space.
258 *
259 * But even with that lock held, processors run asynchronously
260 * accessing the old TSB via TLB miss handling. This is OK
261 * because those actions are just propagating state from the
262 * Linux page tables into the TSB, page table mappings are not
263 * being changed. If a real fault occurs, the processor will
264 * synchronize with us when it hits flush_tsb_user(), this is
265 * also true for the case where vmscan is modifying the page
266 * tables. The only thing we need to be careful with is to
267 * skip any locked TSB entries during copy_tsb().
268 *
269 * When we finish committing to the new TSB, we have to drop
270 * the lock and ask all other cpus running this address space
271 * to run tsb_context_switch() to see the new TSB table.
272 */
273 spin_lock_irqsave(&mm->context.lock, flags);
274
275 old_tsb = mm->context.tsb;
276 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
277
278 /* Handle multiple threads trying to grow the TSB at the same time.
279 * One will get in here first, and bump the size and the RSS limit.
280 * The others will get in here next and hit this check.
281 */
282 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
283 spin_unlock_irqrestore(&mm->context.lock, flags);
284
285 free_pages((unsigned long) new_tsb, get_order(size));
286 return;
287 }
8b234274 288
bd40791e
DM
289 if (size == max_tsb_size)
290 mm->context.tsb_rss_limit = ~0UL;
291 else
292 mm->context.tsb_rss_limit =
293 ((size / sizeof(struct tsb)) * 3) / 4;
294
7a1ac526
DM
295 if (old_tsb) {
296 extern void copy_tsb(unsigned long old_tsb_base,
297 unsigned long old_tsb_size,
298 unsigned long new_tsb_base,
299 unsigned long new_tsb_size);
300 unsigned long old_tsb_base = (unsigned long) old_tsb;
301 unsigned long new_tsb_base = (unsigned long) new_tsb;
302
303 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
304 old_tsb_base = __pa(old_tsb_base);
305 new_tsb_base = __pa(new_tsb_base);
306 }
307 copy_tsb(old_tsb_base, old_size, new_tsb_base, size);
308 }
bd40791e 309
7a1ac526 310 mm->context.tsb = new_tsb;
bd40791e
DM
311 setup_tsb_params(mm, size);
312
7a1ac526
DM
313 spin_unlock_irqrestore(&mm->context.lock, flags);
314
bd40791e
DM
315 /* If old_tsb is NULL, we're being invoked for the first time
316 * from init_new_context().
317 */
318 if (old_tsb) {
7a1ac526 319 /* Reload it on the local cpu. */
bd40791e
DM
320 tsb_context_switch(mm);
321
7a1ac526
DM
322 /* Now force other processors to do the same. */
323 smp_tsb_sync(mm);
324
325 /* Now it is safe to free the old tsb. */
bd40791e
DM
326 free_pages((unsigned long) old_tsb, get_order(old_size));
327 }
328}
329
09f94287
DM
330int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
331{
a77754b4 332 spin_lock_init(&mm->context.lock);
09f94287
DM
333
334 mm->context.sparc64_ctx_val = 0UL;
09f94287 335
bd40791e
DM
336 /* copy_mm() copies over the parent's mm_struct before calling
337 * us, so we need to zero out the TSB pointer or else tsb_grow()
338 * will be confused and think there is an older TSB to free up.
339 */
340 mm->context.tsb = NULL;
7a1ac526
DM
341
342 /* If this is fork, inherit the parent's TSB size. We would
343 * grow it to that size on the first page fault anyways.
344 */
345 tsb_grow(mm, get_mm_rss(mm));
bd40791e
DM
346
347 if (unlikely(!mm->context.tsb))
348 return -ENOMEM;
09f94287
DM
349
350 return 0;
351}
352
353void destroy_context(struct mm_struct *mm)
354{
bd40791e 355 unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
77b838fa 356 unsigned long flags;
bd40791e
DM
357
358 free_pages((unsigned long) mm->context.tsb, get_order(size));
98c5584c
DM
359
360 /* We can remove these later, but for now it's useful
361 * to catch any bogus post-destroy_context() references
362 * to the TSB.
363 */
364 mm->context.tsb = NULL;
365 mm->context.tsb_reg_val = 0UL;
09f94287 366
77b838fa 367 spin_lock_irqsave(&ctx_alloc_lock, flags);
09f94287
DM
368
369 if (CTX_VALID(mm->context)) {
370 unsigned long nr = CTX_NRBITS(mm->context);
371 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
372 }
373
77b838fa 374 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
09f94287 375}