Linux-2.6.12-rc2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sh64 / mm / fault.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/mm/fault.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
10 * Copyright (C) 2003 Paul Mundt
11 *
12 */
13
14 #include <linux/signal.h>
15 #include <linux/rwsem.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/smp_lock.h>
26 #include <linux/interrupt.h>
27
28 #include <asm/system.h>
29 #include <asm/io.h>
30 #include <asm/tlb.h>
31 #include <asm/uaccess.h>
32 #include <asm/pgalloc.h>
33 #include <asm/mmu_context.h>
34 #include <asm/registers.h> /* required by inline asm statements */
35
36 #if defined(CONFIG_SH64_PROC_TLB)
37 #include <linux/init.h>
38 #include <linux/proc_fs.h>
39 /* Count numbers of tlb refills in each region */
40 static unsigned long long calls_to_update_mmu_cache = 0ULL;
41 static unsigned long long calls_to_flush_tlb_page = 0ULL;
42 static unsigned long long calls_to_flush_tlb_range = 0ULL;
43 static unsigned long long calls_to_flush_tlb_mm = 0ULL;
44 static unsigned long long calls_to_flush_tlb_all = 0ULL;
45 unsigned long long calls_to_do_slow_page_fault = 0ULL;
46 unsigned long long calls_to_do_fast_page_fault = 0ULL;
47
48 /* Count size of ranges for flush_tlb_range */
49 static unsigned long long flush_tlb_range_1 = 0ULL;
50 static unsigned long long flush_tlb_range_2 = 0ULL;
51 static unsigned long long flush_tlb_range_3_4 = 0ULL;
52 static unsigned long long flush_tlb_range_5_7 = 0ULL;
53 static unsigned long long flush_tlb_range_8_11 = 0ULL;
54 static unsigned long long flush_tlb_range_12_15 = 0ULL;
55 static unsigned long long flush_tlb_range_16_up = 0ULL;
56
57 static unsigned long long page_not_present = 0ULL;
58
59 #endif
60
61 extern void die(const char *,struct pt_regs *,long);
62
63 #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
64 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
65
66 static inline void print_prots(pgprot_t prot)
67 {
68 printk("prot is 0x%08lx\n",pgprot_val(prot));
69
70 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
71 PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
72 }
73
74 static inline void print_vma(struct vm_area_struct *vma)
75 {
76 printk("vma start 0x%08lx\n", vma->vm_start);
77 printk("vma end 0x%08lx\n", vma->vm_end);
78
79 print_prots(vma->vm_page_prot);
80 printk("vm_flags 0x%08lx\n", vma->vm_flags);
81 }
82
83 static inline void print_task(struct task_struct *tsk)
84 {
85 printk("Task pid %d\n", tsk->pid);
86 }
87
88 static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
89 {
90 pgd_t *dir;
91 pmd_t *pmd;
92 pte_t *pte;
93 pte_t entry;
94
95 dir = pgd_offset(mm, address);
96 if (pgd_none(*dir)) {
97 return NULL;
98 }
99
100 pmd = pmd_offset(dir, address);
101 if (pmd_none(*pmd)) {
102 return NULL;
103 }
104
105 pte = pte_offset_kernel(pmd, address);
106 entry = *pte;
107
108 if (pte_none(entry)) {
109 return NULL;
110 }
111 if (!pte_present(entry)) {
112 return NULL;
113 }
114
115 return pte;
116 }
117
118 /*
119 * This routine handles page faults. It determines the address,
120 * and the problem, and then passes it off to one of the appropriate
121 * routines.
122 */
123 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
124 unsigned long textaccess, unsigned long address)
125 {
126 struct task_struct *tsk;
127 struct mm_struct *mm;
128 struct vm_area_struct * vma;
129 const struct exception_table_entry *fixup;
130 pte_t *pte;
131
132 #if defined(CONFIG_SH64_PROC_TLB)
133 ++calls_to_do_slow_page_fault;
134 #endif
135
136 /* SIM
137 * Note this is now called with interrupts still disabled
138 * This is to cope with being called for a missing IO port
139 * address with interupts disabled. This should be fixed as
140 * soon as we have a better 'fast path' miss handler.
141 *
142 * Plus take care how you try and debug this stuff.
143 * For example, writing debug data to a port which you
144 * have just faulted on is not going to work.
145 */
146
147 tsk = current;
148 mm = tsk->mm;
149
150 /* Not an IO address, so reenable interrupts */
151 local_irq_enable();
152
153 /*
154 * If we're in an interrupt or have no user
155 * context, we must not take the fault..
156 */
157 if (in_interrupt() || !mm)
158 goto no_context;
159
160 /* TLB misses upon some cache flushes get done under cli() */
161 down_read(&mm->mmap_sem);
162
163 vma = find_vma(mm, address);
164
165 if (!vma) {
166 #ifdef DEBUG_FAULT
167 print_task(tsk);
168 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
169 __FUNCTION__,__LINE__,
170 address,regs->pc,textaccess,writeaccess);
171 show_regs(regs);
172 #endif
173 goto bad_area;
174 }
175 if (vma->vm_start <= address) {
176 goto good_area;
177 }
178
179 if (!(vma->vm_flags & VM_GROWSDOWN)) {
180 #ifdef DEBUG_FAULT
181 print_task(tsk);
182 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
183 __FUNCTION__,__LINE__,
184 address,regs->pc,textaccess,writeaccess);
185 show_regs(regs);
186
187 print_vma(vma);
188 #endif
189 goto bad_area;
190 }
191 if (expand_stack(vma, address)) {
192 #ifdef DEBUG_FAULT
193 print_task(tsk);
194 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
195 __FUNCTION__,__LINE__,
196 address,regs->pc,textaccess,writeaccess);
197 show_regs(regs);
198 #endif
199 goto bad_area;
200 }
201 /*
202 * Ok, we have a good vm_area for this memory access, so
203 * we can handle it..
204 */
205 good_area:
206 if (textaccess) {
207 if (!(vma->vm_flags & VM_EXEC))
208 goto bad_area;
209 } else {
210 if (writeaccess) {
211 if (!(vma->vm_flags & VM_WRITE))
212 goto bad_area;
213 } else {
214 if (!(vma->vm_flags & VM_READ))
215 goto bad_area;
216 }
217 }
218
219 /*
220 * If for any reason at all we couldn't handle the fault,
221 * make sure we exit gracefully rather than endlessly redo
222 * the fault.
223 */
224 survive:
225 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
226 case 1:
227 tsk->min_flt++;
228 break;
229 case 2:
230 tsk->maj_flt++;
231 break;
232 case 0:
233 goto do_sigbus;
234 default:
235 goto out_of_memory;
236 }
237 /* If we get here, the page fault has been handled. Do the TLB refill
238 now from the newly-setup PTE, to avoid having to fault again right
239 away on the same instruction. */
240 pte = lookup_pte (mm, address);
241 if (!pte) {
242 /* From empirical evidence, we can get here, due to
243 !pte_present(pte). (e.g. if a swap-in occurs, and the page
244 is swapped back out again before the process that wanted it
245 gets rescheduled?) */
246 goto no_pte;
247 }
248
249 __do_tlb_refill(address, textaccess, pte);
250
251 no_pte:
252
253 up_read(&mm->mmap_sem);
254 return;
255
256 /*
257 * Something tried to access memory that isn't in our memory map..
258 * Fix it, but check if it's kernel or user first..
259 */
260 bad_area:
261 #ifdef DEBUG_FAULT
262 printk("fault:bad area\n");
263 #endif
264 up_read(&mm->mmap_sem);
265
266 if (user_mode(regs)) {
267 static int count=0;
268 siginfo_t info;
269 if (count < 4) {
270 /* This is really to help debug faults when starting
271 * usermode, so only need a few */
272 count++;
273 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
274 address, current->pid, current->comm,
275 (unsigned long) regs->pc);
276 #if 0
277 show_regs(regs);
278 #endif
279 }
280 if (tsk->pid == 1) {
281 panic("INIT had user mode bad_area\n");
282 }
283 tsk->thread.address = address;
284 tsk->thread.error_code = writeaccess;
285 info.si_signo = SIGSEGV;
286 info.si_errno = 0;
287 info.si_addr = (void *) address;
288 force_sig_info(SIGSEGV, &info, tsk);
289 return;
290 }
291
292 no_context:
293 #ifdef DEBUG_FAULT
294 printk("fault:No context\n");
295 #endif
296 /* Are we prepared to handle this kernel fault? */
297 fixup = search_exception_tables(regs->pc);
298 if (fixup) {
299 regs->pc = fixup->fixup;
300 return;
301 }
302
303 /*
304 * Oops. The kernel tried to access some bad page. We'll have to
305 * terminate things with extreme prejudice.
306 *
307 */
308 if (address < PAGE_SIZE)
309 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
310 else
311 printk(KERN_ALERT "Unable to handle kernel paging request");
312 printk(" at virtual address %08lx\n", address);
313 printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
314 die("Oops", regs, writeaccess);
315 do_exit(SIGKILL);
316
317 /*
318 * We ran out of memory, or some other thing happened to us that made
319 * us unable to handle the page fault gracefully.
320 */
321 out_of_memory:
322 if (current->pid == 1) {
323 panic("INIT out of memory\n");
324 yield();
325 goto survive;
326 }
327 printk("fault:Out of memory\n");
328 up_read(&mm->mmap_sem);
329 if (current->pid == 1) {
330 yield();
331 down_read(&mm->mmap_sem);
332 goto survive;
333 }
334 printk("VM: killing process %s\n", tsk->comm);
335 if (user_mode(regs))
336 do_exit(SIGKILL);
337 goto no_context;
338
339 do_sigbus:
340 printk("fault:Do sigbus\n");
341 up_read(&mm->mmap_sem);
342
343 /*
344 * Send a sigbus, regardless of whether we were in kernel
345 * or user mode.
346 */
347 tsk->thread.address = address;
348 tsk->thread.error_code = writeaccess;
349 tsk->thread.trap_no = 14;
350 force_sig(SIGBUS, tsk);
351
352 /* Kernel mode? Handle exceptions or die */
353 if (!user_mode(regs))
354 goto no_context;
355 }
356
357
358 void flush_tlb_all(void);
359
360 void update_mmu_cache(struct vm_area_struct * vma,
361 unsigned long address, pte_t pte)
362 {
363 #if defined(CONFIG_SH64_PROC_TLB)
364 ++calls_to_update_mmu_cache;
365 #endif
366
367 /*
368 * This appears to get called once for every pte entry that gets
369 * established => I don't think it's efficient to try refilling the
370 * TLBs with the pages - some may not get accessed even. Also, for
371 * executable pages, it is impossible to determine reliably here which
372 * TLB they should be mapped into (or both even).
373 *
374 * So, just do nothing here and handle faults on demand. In the
375 * TLBMISS handling case, the refill is now done anyway after the pte
376 * has been fixed up, so that deals with most useful cases.
377 */
378 }
379
380 static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
381 {
382 unsigned long long match, pteh=0, lpage;
383 unsigned long tlb;
384 struct mm_struct *mm;
385
386 mm = vma->vm_mm;
387
388 if (mm->context == NO_CONTEXT)
389 return;
390
391 /*
392 * Sign-extend based on neff.
393 */
394 lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
395 match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
396 match |= lpage;
397
398 /* Do ITLB : don't bother for pages in non-exectutable VMAs */
399 if (vma->vm_flags & VM_EXEC) {
400 for_each_itlb_entry(tlb) {
401 asm volatile ("getcfg %1, 0, %0"
402 : "=r" (pteh)
403 : "r" (tlb) );
404
405 if (pteh == match) {
406 __flush_tlb_slot(tlb);
407 break;
408 }
409
410 }
411 }
412
413 /* Do DTLB : any page could potentially be in here. */
414 for_each_dtlb_entry(tlb) {
415 asm volatile ("getcfg %1, 0, %0"
416 : "=r" (pteh)
417 : "r" (tlb) );
418
419 if (pteh == match) {
420 __flush_tlb_slot(tlb);
421 break;
422 }
423
424 }
425 }
426
427 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
428 {
429 unsigned long flags;
430
431 #if defined(CONFIG_SH64_PROC_TLB)
432 ++calls_to_flush_tlb_page;
433 #endif
434
435 if (vma->vm_mm) {
436 page &= PAGE_MASK;
437 local_irq_save(flags);
438 __flush_tlb_page(vma, page);
439 local_irq_restore(flags);
440 }
441 }
442
443 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
444 unsigned long end)
445 {
446 unsigned long flags;
447 unsigned long long match, pteh=0, pteh_epn, pteh_low;
448 unsigned long tlb;
449 struct mm_struct *mm;
450
451 mm = vma->vm_mm;
452
453 #if defined(CONFIG_SH64_PROC_TLB)
454 ++calls_to_flush_tlb_range;
455
456 {
457 unsigned long size = (end - 1) - start;
458 size >>= 12; /* divide by PAGE_SIZE */
459 size++; /* end=start+4096 => 1 page */
460 switch (size) {
461 case 1 : flush_tlb_range_1++; break;
462 case 2 : flush_tlb_range_2++; break;
463 case 3 ... 4 : flush_tlb_range_3_4++; break;
464 case 5 ... 7 : flush_tlb_range_5_7++; break;
465 case 8 ... 11 : flush_tlb_range_8_11++; break;
466 case 12 ... 15 : flush_tlb_range_12_15++; break;
467 default : flush_tlb_range_16_up++; break;
468 }
469 }
470 #endif
471
472 if (mm->context == NO_CONTEXT)
473 return;
474
475 local_irq_save(flags);
476
477 start &= PAGE_MASK;
478 end &= PAGE_MASK;
479
480 match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
481
482 /* Flush ITLB */
483 for_each_itlb_entry(tlb) {
484 asm volatile ("getcfg %1, 0, %0"
485 : "=r" (pteh)
486 : "r" (tlb) );
487
488 pteh_epn = pteh & PAGE_MASK;
489 pteh_low = pteh & ~PAGE_MASK;
490
491 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
492 __flush_tlb_slot(tlb);
493 }
494
495 /* Flush DTLB */
496 for_each_dtlb_entry(tlb) {
497 asm volatile ("getcfg %1, 0, %0"
498 : "=r" (pteh)
499 : "r" (tlb) );
500
501 pteh_epn = pteh & PAGE_MASK;
502 pteh_low = pteh & ~PAGE_MASK;
503
504 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
505 __flush_tlb_slot(tlb);
506 }
507
508 local_irq_restore(flags);
509 }
510
511 void flush_tlb_mm(struct mm_struct *mm)
512 {
513 unsigned long flags;
514
515 #if defined(CONFIG_SH64_PROC_TLB)
516 ++calls_to_flush_tlb_mm;
517 #endif
518
519 if (mm->context == NO_CONTEXT)
520 return;
521
522 local_irq_save(flags);
523
524 mm->context=NO_CONTEXT;
525 if(mm==current->mm)
526 activate_context(mm);
527
528 local_irq_restore(flags);
529
530 }
531
532 void flush_tlb_all(void)
533 {
534 /* Invalidate all, including shared pages, excluding fixed TLBs */
535
536 unsigned long flags, tlb;
537
538 #if defined(CONFIG_SH64_PROC_TLB)
539 ++calls_to_flush_tlb_all;
540 #endif
541
542 local_irq_save(flags);
543
544 /* Flush each ITLB entry */
545 for_each_itlb_entry(tlb) {
546 __flush_tlb_slot(tlb);
547 }
548
549 /* Flush each DTLB entry */
550 for_each_dtlb_entry(tlb) {
551 __flush_tlb_slot(tlb);
552 }
553
554 local_irq_restore(flags);
555 }
556
557 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
558 {
559 /* FIXME: Optimize this later.. */
560 flush_tlb_all();
561 }
562
563 #if defined(CONFIG_SH64_PROC_TLB)
564 /* Procfs interface to read the performance information */
565
566 static int
567 tlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
568 {
569 int len=0;
570 len += sprintf(buf+len, "do_fast_page_fault called %12lld times\n", calls_to_do_fast_page_fault);
571 len += sprintf(buf+len, "do_slow_page_fault called %12lld times\n", calls_to_do_slow_page_fault);
572 len += sprintf(buf+len, "update_mmu_cache called %12lld times\n", calls_to_update_mmu_cache);
573 len += sprintf(buf+len, "flush_tlb_page called %12lld times\n", calls_to_flush_tlb_page);
574 len += sprintf(buf+len, "flush_tlb_range called %12lld times\n", calls_to_flush_tlb_range);
575 len += sprintf(buf+len, "flush_tlb_mm called %12lld times\n", calls_to_flush_tlb_mm);
576 len += sprintf(buf+len, "flush_tlb_all called %12lld times\n", calls_to_flush_tlb_all);
577 len += sprintf(buf+len, "flush_tlb_range_sizes\n"
578 " 1 : %12lld\n"
579 " 2 : %12lld\n"
580 " 3 - 4 : %12lld\n"
581 " 5 - 7 : %12lld\n"
582 " 8 - 11 : %12lld\n"
583 "12 - 15 : %12lld\n"
584 "16+ : %12lld\n",
585 flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4,
586 flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15,
587 flush_tlb_range_16_up);
588 len += sprintf(buf+len, "page not present %12lld times\n", page_not_present);
589 *eof = 1;
590 return len;
591 }
592
593 static int __init register_proc_tlb(void)
594 {
595 create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL);
596 return 0;
597 }
598
599 __initcall(register_proc_tlb);
600
601 #endif