2 * linux/arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2004 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/signal.h>
14 #include <linux/hardirq.h>
15 #include <linux/init.h>
16 #include <linux/kprobes.h>
17 #include <linux/uaccess.h>
18 #include <linux/page-flags.h>
19 #include <linux/sched.h>
20 #include <linux/highmem.h>
21 #include <linux/perf_event.h>
22 #include <linux/aee.h>
24 #include <asm/exception.h>
25 #include <asm/pgtable.h>
26 #include <asm/system_misc.h>
27 #include <asm/system_info.h>
28 #include <asm/tlbflush.h>
35 static inline int notify_page_fault(struct pt_regs
*regs
, unsigned int fsr
)
39 if (!user_mode(regs
)) {
40 /* kprobe_running() needs smp_processor_id() */
42 if (kprobe_running() && kprobe_fault_handler(regs
, fsr
))
50 static inline int notify_page_fault(struct pt_regs
*regs
, unsigned int fsr
)
57 * This is useful to dump out the page tables associated with
60 void show_pte(struct mm_struct
*mm
, unsigned long addr
)
67 printk(KERN_ALERT
"pgd = %p\n", mm
->pgd
);
68 pgd
= pgd_offset(mm
, addr
);
69 printk(KERN_ALERT
"[%08lx] *pgd=%08llx",
70 addr
, (long long)pgd_val(*pgd
));
85 pud
= pud_offset(pgd
, addr
);
86 if (PTRS_PER_PUD
!= 1)
87 printk(", *pud=%08llx", (long long)pud_val(*pud
));
97 pmd
= pmd_offset(pud
, addr
);
98 if (PTRS_PER_PMD
!= 1)
99 printk(", *pmd=%08llx", (long long)pmd_val(*pmd
));
109 /* We must not map this if we have highmem enabled */
110 if (PageHighMem(pfn_to_page(pmd_val(*pmd
) >> PAGE_SHIFT
)))
113 pte
= pte_offset_map(pmd
, addr
);
114 printk(", *pte=%08llx", (long long)pte_val(*pte
));
115 #ifndef CONFIG_ARM_LPAE
116 printk(", *ppte=%08llx",
117 (long long)pte_val(pte
[PTE_HWTABLE_PTRS
]));
124 #else /* CONFIG_MMU */
125 void show_pte(struct mm_struct
*mm
, unsigned long addr
)
127 #endif /* CONFIG_MMU */
130 * Oops. The kernel tried to access some page that wasn't present.
133 __do_kernel_fault(struct mm_struct
*mm
, unsigned long addr
, unsigned int fsr
,
134 struct pt_regs
*regs
)
137 * Are we prepared to handle this kernel fault?
139 if (fixup_exception(regs
))
143 * No handler, we'll have to terminate things with extreme prejudice.
147 "Unable to handle kernel %s at virtual address %08lx\n",
148 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
149 "paging request", addr
);
152 die("Oops", regs
, fsr
);
158 * Something tried to access memory that isn't in our memory map..
159 * User mode accesses just cause a SIGSEGV
162 __do_user_fault(struct task_struct
*tsk
, unsigned long addr
,
163 unsigned int fsr
, unsigned int sig
, int code
,
164 struct pt_regs
*regs
)
168 #ifdef CONFIG_DEBUG_USER
169 if (((user_debug
& UDBG_SEGV
) && (sig
== SIGSEGV
)) ||
170 ((user_debug
& UDBG_BUS
) && (sig
== SIGBUS
))) {
171 printk(KERN_DEBUG
"%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
172 tsk
->comm
, sig
, addr
, fsr
);
173 show_pte(tsk
->mm
, addr
);
178 tsk
->thread
.address
= addr
;
179 tsk
->thread
.error_code
= fsr
;
180 tsk
->thread
.trap_no
= 14;
184 si
.si_addr
= (void __user
*)addr
;
185 force_sig_info(sig
, &si
, tsk
);
188 void do_bad_area(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
)
190 struct task_struct
*tsk
= current
;
191 struct mm_struct
*mm
= tsk
->active_mm
;
194 * If we are in kernel mode at this point, we
195 * have no context to handle this fault with.
198 __do_user_fault(tsk
, addr
, fsr
, SIGSEGV
, SEGV_MAPERR
, regs
);
200 __do_kernel_fault(mm
, addr
, fsr
, regs
);
204 #define VM_FAULT_BADMAP 0x010000
205 #define VM_FAULT_BADACCESS 0x020000
208 * Check that the permissions on the VMA allow for the fault which occurred.
209 * If we encountered a write fault, we must have write permission, otherwise
210 * we allow any permission.
212 static inline bool access_error(unsigned int fsr
, struct vm_area_struct
*vma
)
214 unsigned int mask
= VM_READ
| VM_WRITE
| VM_EXEC
;
218 if (fsr
& FSR_LNX_PF
)
221 return vma
->vm_flags
& mask
? false : true;
225 __do_page_fault(struct mm_struct
*mm
, unsigned long addr
, unsigned int fsr
,
226 unsigned int flags
, struct task_struct
*tsk
)
228 struct vm_area_struct
*vma
;
231 vma
= find_vma(mm
, addr
);
232 fault
= VM_FAULT_BADMAP
;
235 if (unlikely(vma
->vm_start
> addr
))
239 * Ok, we have a good vm_area for this
240 * memory access, so we can handle it.
243 if (access_error(fsr
, vma
)) {
244 fault
= VM_FAULT_BADACCESS
;
248 return handle_mm_fault(mm
, vma
, addr
& PAGE_MASK
, flags
);
251 /* Don't allow expansion below FIRST_USER_ADDRESS */
252 if (vma
->vm_flags
& VM_GROWSDOWN
&&
253 addr
>= FIRST_USER_ADDRESS
&& !expand_stack(vma
, addr
))
260 do_page_fault(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
)
262 struct task_struct
*tsk
;
263 struct mm_struct
*mm
;
264 int fault
, sig
, code
;
265 int write
= fsr
& FSR_WRITE
;
266 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
|
267 (write
? FAULT_FLAG_WRITE
: 0);
269 if (notify_page_fault(regs
, fsr
))
275 /* Enable interrupts if they were enabled in the parent context. */
276 if (interrupts_enabled(regs
))
280 * If we're in an interrupt, or have no irqs, or have no user
281 * context, we must not take the fault..
283 if (in_atomic() || irqs_disabled() || !mm
)
287 * As per x86, we may deadlock here. However, since the kernel only
288 * validly references user space from well defined areas of the code,
289 * we can bug out early if this is from code which shouldn't.
291 if (!down_read_trylock(&mm
->mmap_sem
)) {
292 if (!user_mode(regs
) && !search_exception_tables(regs
->ARM_pc
))
295 down_read(&mm
->mmap_sem
);
298 * The above down_read_trylock() might have succeeded in
299 * which case, we'll have missed the might_sleep() from
303 #ifdef CONFIG_DEBUG_VM
304 if (!user_mode(regs
) &&
305 !search_exception_tables(regs
->ARM_pc
))
310 fault
= __do_page_fault(mm
, addr
, fsr
, flags
, tsk
);
312 /* If we need to retry but a fatal signal is pending, handle the
313 * signal first. We do not need to release the mmap_sem because
314 * it would already be released in __lock_page_or_retry in
316 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
320 * Major/minor page fault accounting is only done on the
321 * initial attempt. If we go through a retry, it is extremely
322 * likely that the page will be found in page cache at that point.
325 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, addr
);
326 if (!(fault
& VM_FAULT_ERROR
) && flags
& FAULT_FLAG_ALLOW_RETRY
) {
327 if (fault
& VM_FAULT_MAJOR
) {
329 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
, 1,
333 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
, 1,
336 if (fault
& VM_FAULT_RETRY
) {
337 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
339 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
340 flags
|= FAULT_FLAG_TRIED
;
345 up_read(&mm
->mmap_sem
);
348 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
350 if (likely(!(fault
& (VM_FAULT_ERROR
| VM_FAULT_BADMAP
| VM_FAULT_BADACCESS
))))
353 if (fault
& VM_FAULT_OOM
) {
355 * We ran out of memory, call the OOM killer, and return to
356 * userspace (which will retry the fault, or kill us if we
359 pagefault_out_of_memory();
364 * If we are in kernel mode at this point, we
365 * have no context to handle this fault with.
367 if (!user_mode(regs
))
370 if (fault
& VM_FAULT_SIGBUS
) {
372 * We had some memory, but were unable to
373 * successfully fix up this page fault.
379 * Something tried to access memory that
380 * isn't in our memory map..
383 code
= fault
== VM_FAULT_BADACCESS
?
384 SEGV_ACCERR
: SEGV_MAPERR
;
387 __do_user_fault(tsk
, addr
, fsr
, sig
, code
, regs
);
391 __do_kernel_fault(mm
, addr
, fsr
, regs
);
394 #else /* CONFIG_MMU */
396 do_page_fault(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
)
400 #endif /* CONFIG_MMU */
403 * First Level Translation Fault Handler
405 * We enter here because the first level page table doesn't contain
406 * a valid entry for the address.
408 * If the address is in kernel space (>= TASK_SIZE), then we are
409 * probably faulting in the vmalloc() area.
411 * If the init_task's first level page tables contains the relevant
412 * entry, we copy the it to this task. If not, we send the process
413 * a signal, fixup the exception, or oops the kernel.
415 * NOTE! We MUST NOT take any locks for this case. We may be in an
416 * interrupt or a critical region, and should only copy the information
417 * from the master page table, nothing more.
421 do_translation_fault(unsigned long addr
, unsigned int fsr
,
422 struct pt_regs
*regs
)
429 if (addr
< TASK_SIZE
)
430 return do_page_fault(addr
, fsr
, regs
);
435 index
= pgd_index(addr
);
437 pgd
= cpu_get_pgd() + index
;
438 pgd_k
= init_mm
.pgd
+ index
;
440 if (pgd_none(*pgd_k
))
442 if (!pgd_present(*pgd
))
443 set_pgd(pgd
, *pgd_k
);
445 pud
= pud_offset(pgd
, addr
);
446 pud_k
= pud_offset(pgd_k
, addr
);
448 if (pud_none(*pud_k
))
450 if (!pud_present(*pud
))
451 set_pud(pud
, *pud_k
);
453 pmd
= pmd_offset(pud
, addr
);
454 pmd_k
= pmd_offset(pud_k
, addr
);
456 #ifdef CONFIG_ARM_LPAE
458 * Only one hardware entry per PMD with LPAE.
463 * On ARM one Linux PGD entry contains two hardware entries (see page
464 * tables layout in pgtable.h). We normally guarantee that we always
465 * fill both L1 entries. But create_mapping() doesn't follow the rule.
466 * It can create inidividual L1 entries, so here we have to call
467 * pmd_none() check for the entry really corresponded to address, not
468 * for the first of pair.
470 index
= (addr
>> SECTION_SHIFT
) & 1;
472 if (pmd_none(pmd_k
[index
]))
475 copy_pmd(pmd
, pmd_k
);
479 do_bad_area(addr
, fsr
, regs
);
482 #else /* CONFIG_MMU */
484 do_translation_fault(unsigned long addr
, unsigned int fsr
,
485 struct pt_regs
*regs
)
489 #endif /* CONFIG_MMU */
492 * Some section permission faults need to be handled gracefully.
493 * They can happen due to a __{get,put}_user during an oops.
496 do_sect_fault(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
)
498 do_bad_area(addr
, fsr
, regs
);
503 * This abort handler always returns "fault".
506 do_bad(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
)
512 int (*fn
)(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
);
519 #ifdef CONFIG_ARM_LPAE
520 #include "fsr-3level.c"
522 #include "fsr-2level.c"
526 hook_fault_code(int nr
, int (*fn
)(unsigned long, unsigned int, struct pt_regs
*),
527 int sig
, int code
, const char *name
)
529 if (nr
< 0 || nr
>= ARRAY_SIZE(fsr_info
))
532 fsr_info
[nr
].fn
= fn
;
533 fsr_info
[nr
].sig
= sig
;
534 fsr_info
[nr
].code
= code
;
535 fsr_info
[nr
].name
= name
;
537 EXPORT_SYMBOL(hook_fault_code
);
540 * Dispatch a data abort to the relevant handler.
542 asmlinkage
void __exception
543 do_DataAbort(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
)
545 struct thread_info
*thread
= current_thread_info();
547 const struct fsr_info
*inf
= fsr_info
+ fsr_fs(fsr
);
550 if (!user_mode(regs
)) {
552 if (thread
->cpu_excp
== 1) {
553 thread
->regs_on_excp
= (void *)regs
;
554 aee_excp_regs
= (void*)regs
;
557 * NoteXXX: The data abort exception may happen twice
558 * when calling probe_kernel_address() in which.
559 * __copy_from_user_inatomic() is used and the
560 * fixup table lookup may be performed.
561 * Check if the nested panic happens via
564 if (thread
->cpu_excp
>= 3) {
565 aee_stop_nested_panic(regs
);
569 ret
= inf
->fn(addr
, fsr
& ~FSR_LNX_PF
, regs
);
571 if (!user_mode(regs
)) {
577 printk(KERN_ALERT
"Unhandled fault: %s (0x%03x) at 0x%08lx\n",
578 inf
->name
, fsr
, addr
);
580 info
.si_signo
= inf
->sig
;
582 info
.si_code
= inf
->code
;
583 info
.si_addr
= (void __user
*)addr
;
584 arm_notify_die("", regs
, &info
, fsr
, 0);
588 hook_ifault_code(int nr
, int (*fn
)(unsigned long, unsigned int, struct pt_regs
*),
589 int sig
, int code
, const char *name
)
591 if (nr
< 0 || nr
>= ARRAY_SIZE(ifsr_info
))
594 ifsr_info
[nr
].fn
= fn
;
595 ifsr_info
[nr
].sig
= sig
;
596 ifsr_info
[nr
].code
= code
;
597 ifsr_info
[nr
].name
= name
;
600 asmlinkage
void __exception
601 do_PrefetchAbort(unsigned long addr
, unsigned int ifsr
, struct pt_regs
*regs
)
603 struct thread_info
*thread
= current_thread_info();
605 const struct fsr_info
*inf
= ifsr_info
+ fsr_fs(ifsr
);
608 if (!user_mode(regs
)) {
610 if (thread
->cpu_excp
== 1) {
611 thread
->regs_on_excp
= (void *)regs
;
614 * NoteXXX: The data abort exception may happen twice
615 * when calling probe_kernel_address() in which.
616 * __copy_from_user_inatomic() is used and the
617 * fixup table lookup may be performed.
618 * Check if the nested panic happens via
621 if (thread
->cpu_excp
>= 3) {
622 aee_stop_nested_panic(regs
);
626 ret
= inf
->fn(addr
, ifsr
| FSR_LNX_PF
, regs
);
628 if (!user_mode(regs
)) {
634 printk(KERN_ALERT
"Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
635 inf
->name
, ifsr
, addr
);
637 info
.si_signo
= inf
->sig
;
639 info
.si_code
= inf
->code
;
640 info
.si_addr
= (void __user
*)addr
;
641 arm_notify_die("", regs
, &info
, ifsr
, 0);
644 #ifndef CONFIG_ARM_LPAE
645 static int __init
exceptions_init(void)
647 if (cpu_architecture() >= CPU_ARCH_ARMv6
) {
648 hook_fault_code(4, do_translation_fault
, SIGSEGV
, SEGV_MAPERR
,
649 "I-cache maintenance fault");
652 if (cpu_architecture() >= CPU_ARCH_ARMv7
) {
654 * TODO: Access flag faults introduced in ARMv6K.
655 * Runtime check for 'K' extension is needed
657 hook_fault_code(3, do_bad
, SIGSEGV
, SEGV_MAPERR
,
658 "section access flag fault");
659 hook_fault_code(6, do_bad
, SIGSEGV
, SEGV_MAPERR
,
660 "section access flag fault");
666 arch_initcall(exceptions_init
);