[ARM] 3104/1: ARM EABI: new helper function names
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / traps.c
1 /*
2 * linux/arch/arm/kernel/traps.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 'traps.c' handles hardware exceptions after we have saved some state in
12 * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
13 * kill the offending process.
14 */
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/signal.h>
18 #include <linux/spinlock.h>
19 #include <linux/personality.h>
20 #include <linux/ptrace.h>
21 #include <linux/kallsyms.h>
22 #include <linux/init.h>
23
24 #include <asm/atomic.h>
25 #include <asm/cacheflush.h>
26 #include <asm/system.h>
27 #include <asm/uaccess.h>
28 #include <asm/unistd.h>
29 #include <asm/traps.h>
30
31 #include "ptrace.h"
32 #include "signal.h"
33
34 const char *processor_modes[]=
35 { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
36 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
37 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
38 "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
39 };
40
41 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
42
43 #ifdef CONFIG_DEBUG_USER
44 unsigned int user_debug;
45
46 static int __init user_debug_setup(char *str)
47 {
48 get_option(&str, &user_debug);
49 return 1;
50 }
51 __setup("user_debug=", user_debug_setup);
52 #endif
53
54 void dump_backtrace_entry(unsigned long where, unsigned long from)
55 {
56 #ifdef CONFIG_KALLSYMS
57 printk("[<%08lx>] ", where);
58 print_symbol("(%s) ", where);
59 printk("from [<%08lx>] ", from);
60 print_symbol("(%s)\n", from);
61 #else
62 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
63 #endif
64 }
65
66 /*
67 * Stack pointers should always be within the kernels view of
68 * physical memory. If it is not there, then we can't dump
69 * out any information relating to the stack.
70 */
71 static int verify_stack(unsigned long sp)
72 {
73 if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0))
74 return -EFAULT;
75
76 return 0;
77 }
78
79 /*
80 * Dump out the contents of some memory nicely...
81 */
82 static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
83 {
84 unsigned long p = bottom & ~31;
85 mm_segment_t fs;
86 int i;
87
88 /*
89 * We need to switch to kernel mode so that we can use __get_user
90 * to safely read from kernel space. Note that we now dump the
91 * code first, just in case the backtrace kills us.
92 */
93 fs = get_fs();
94 set_fs(KERNEL_DS);
95
96 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
97
98 for (p = bottom & ~31; p < top;) {
99 printk("%04lx: ", p & 0xffff);
100
101 for (i = 0; i < 8; i++, p += 4) {
102 unsigned int val;
103
104 if (p < bottom || p >= top)
105 printk(" ");
106 else {
107 __get_user(val, (unsigned long *)p);
108 printk("%08x ", val);
109 }
110 }
111 printk ("\n");
112 }
113
114 set_fs(fs);
115 }
116
117 static void dump_instr(struct pt_regs *regs)
118 {
119 unsigned long addr = instruction_pointer(regs);
120 const int thumb = thumb_mode(regs);
121 const int width = thumb ? 4 : 8;
122 mm_segment_t fs;
123 int i;
124
125 /*
126 * We need to switch to kernel mode so that we can use __get_user
127 * to safely read from kernel space. Note that we now dump the
128 * code first, just in case the backtrace kills us.
129 */
130 fs = get_fs();
131 set_fs(KERNEL_DS);
132
133 printk("Code: ");
134 for (i = -4; i < 1; i++) {
135 unsigned int val, bad;
136
137 if (thumb)
138 bad = __get_user(val, &((u16 *)addr)[i]);
139 else
140 bad = __get_user(val, &((u32 *)addr)[i]);
141
142 if (!bad)
143 printk(i == 0 ? "(%0*x) " : "%0*x ", width, val);
144 else {
145 printk("bad PC value.");
146 break;
147 }
148 }
149 printk("\n");
150
151 set_fs(fs);
152 }
153
154 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
155 {
156 unsigned int fp;
157 int ok = 1;
158
159 printk("Backtrace: ");
160 fp = regs->ARM_fp;
161 if (!fp) {
162 printk("no frame pointer");
163 ok = 0;
164 } else if (verify_stack(fp)) {
165 printk("invalid frame pointer 0x%08x", fp);
166 ok = 0;
167 } else if (fp < (unsigned long)end_of_stack(tsk))
168 printk("frame pointer underflow");
169 printk("\n");
170
171 if (ok)
172 c_backtrace(fp, processor_mode(regs));
173 }
174
175 void dump_stack(void)
176 {
177 #ifdef CONFIG_DEBUG_ERRORS
178 __backtrace();
179 #endif
180 }
181
182 EXPORT_SYMBOL(dump_stack);
183
184 void show_stack(struct task_struct *tsk, unsigned long *sp)
185 {
186 unsigned long fp;
187
188 if (!tsk)
189 tsk = current;
190
191 if (tsk != current)
192 fp = thread_saved_fp(tsk);
193 else
194 asm("mov%? %0, fp" : "=r" (fp));
195
196 c_backtrace(fp, 0x10);
197 barrier();
198 }
199
200 static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
201 {
202 struct task_struct *tsk = thread->task;
203 static int die_counter;
204
205 printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
206 print_modules();
207 __show_regs(regs);
208 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
209 tsk->comm, tsk->pid, thread + 1);
210
211 if (!user_mode(regs) || in_interrupt()) {
212 dump_mem("Stack: ", regs->ARM_sp,
213 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
214 dump_backtrace(regs, tsk);
215 dump_instr(regs);
216 }
217 }
218
219 DEFINE_SPINLOCK(die_lock);
220
221 /*
222 * This function is protected against re-entrancy.
223 */
224 NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
225 {
226 struct thread_info *thread = current_thread_info();
227
228 console_verbose();
229 spin_lock_irq(&die_lock);
230 bust_spinlocks(1);
231 __die(str, err, thread, regs);
232 bust_spinlocks(0);
233 spin_unlock_irq(&die_lock);
234 do_exit(SIGSEGV);
235 }
236
237 void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
238 unsigned long err, unsigned long trap)
239 {
240 if (user_mode(regs)) {
241 current->thread.error_code = err;
242 current->thread.trap_no = trap;
243
244 force_sig_info(info->si_signo, info, current);
245 } else {
246 die(str, regs, err);
247 }
248 }
249
250 static LIST_HEAD(undef_hook);
251 static DEFINE_SPINLOCK(undef_lock);
252
253 void register_undef_hook(struct undef_hook *hook)
254 {
255 unsigned long flags;
256
257 spin_lock_irqsave(&undef_lock, flags);
258 list_add(&hook->node, &undef_hook);
259 spin_unlock_irqrestore(&undef_lock, flags);
260 }
261
262 void unregister_undef_hook(struct undef_hook *hook)
263 {
264 unsigned long flags;
265
266 spin_lock_irqsave(&undef_lock, flags);
267 list_del(&hook->node);
268 spin_unlock_irqrestore(&undef_lock, flags);
269 }
270
271 asmlinkage void do_undefinstr(struct pt_regs *regs)
272 {
273 unsigned int correction = thumb_mode(regs) ? 2 : 4;
274 unsigned int instr;
275 struct undef_hook *hook;
276 siginfo_t info;
277 void __user *pc;
278
279 /*
280 * According to the ARM ARM, PC is 2 or 4 bytes ahead,
281 * depending whether we're in Thumb mode or not.
282 * Correct this offset.
283 */
284 regs->ARM_pc -= correction;
285
286 pc = (void __user *)instruction_pointer(regs);
287 if (thumb_mode(regs)) {
288 get_user(instr, (u16 __user *)pc);
289 } else {
290 get_user(instr, (u32 __user *)pc);
291 }
292
293 spin_lock_irq(&undef_lock);
294 list_for_each_entry(hook, &undef_hook, node) {
295 if ((instr & hook->instr_mask) == hook->instr_val &&
296 (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) {
297 if (hook->fn(regs, instr) == 0) {
298 spin_unlock_irq(&undef_lock);
299 return;
300 }
301 }
302 }
303 spin_unlock_irq(&undef_lock);
304
305 #ifdef CONFIG_DEBUG_USER
306 if (user_debug & UDBG_UNDEFINED) {
307 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
308 current->comm, current->pid, pc);
309 dump_instr(regs);
310 }
311 #endif
312
313 info.si_signo = SIGILL;
314 info.si_errno = 0;
315 info.si_code = ILL_ILLOPC;
316 info.si_addr = pc;
317
318 notify_die("Oops - undefined instruction", regs, &info, 0, 6);
319 }
320
321 asmlinkage void do_unexp_fiq (struct pt_regs *regs)
322 {
323 #ifndef CONFIG_IGNORE_FIQ
324 printk("Hmm. Unexpected FIQ received, but trying to continue\n");
325 printk("You may have a hardware problem...\n");
326 #endif
327 }
328
329 /*
330 * bad_mode handles the impossible case in the vectors. If you see one of
331 * these, then it's extremely serious, and could mean you have buggy hardware.
332 * It never returns, and never tries to sync. We hope that we can at least
333 * dump out some state information...
334 */
335 asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
336 {
337 console_verbose();
338
339 printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
340 handler[reason], processor_modes[proc_mode]);
341
342 die("Oops - bad mode", regs, 0);
343 local_irq_disable();
344 panic("bad mode");
345 }
346
347 static int bad_syscall(int n, struct pt_regs *regs)
348 {
349 struct thread_info *thread = current_thread_info();
350 siginfo_t info;
351
352 if (current->personality != PER_LINUX &&
353 current->personality != PER_LINUX_32BIT &&
354 thread->exec_domain->handler) {
355 thread->exec_domain->handler(n, regs);
356 return regs->ARM_r0;
357 }
358
359 #ifdef CONFIG_DEBUG_USER
360 if (user_debug & UDBG_SYSCALL) {
361 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
362 current->pid, current->comm, n);
363 dump_instr(regs);
364 }
365 #endif
366
367 info.si_signo = SIGILL;
368 info.si_errno = 0;
369 info.si_code = ILL_ILLTRP;
370 info.si_addr = (void __user *)instruction_pointer(regs) -
371 (thumb_mode(regs) ? 2 : 4);
372
373 notify_die("Oops - bad syscall", regs, &info, n, 0);
374
375 return regs->ARM_r0;
376 }
377
378 static inline void
379 do_cache_op(unsigned long start, unsigned long end, int flags)
380 {
381 struct vm_area_struct *vma;
382
383 if (end < start || flags)
384 return;
385
386 vma = find_vma(current->active_mm, start);
387 if (vma && vma->vm_start < end) {
388 if (start < vma->vm_start)
389 start = vma->vm_start;
390 if (end > vma->vm_end)
391 end = vma->vm_end;
392
393 flush_cache_user_range(vma, start, end);
394 }
395 }
396
397 /*
398 * Handle all unrecognised system calls.
399 * 0x9f0000 - 0x9fffff are some more esoteric system calls
400 */
401 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
402 asmlinkage int arm_syscall(int no, struct pt_regs *regs)
403 {
404 struct thread_info *thread = current_thread_info();
405 siginfo_t info;
406
407 if ((no >> 16) != 0x9f)
408 return bad_syscall(no, regs);
409
410 switch (no & 0xffff) {
411 case 0: /* branch through 0 */
412 info.si_signo = SIGSEGV;
413 info.si_errno = 0;
414 info.si_code = SEGV_MAPERR;
415 info.si_addr = NULL;
416
417 notify_die("branch through zero", regs, &info, 0, 0);
418 return 0;
419
420 case NR(breakpoint): /* SWI BREAK_POINT */
421 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
422 ptrace_break(current, regs);
423 return regs->ARM_r0;
424
425 /*
426 * Flush a region from virtual address 'r0' to virtual address 'r1'
427 * _exclusive_. There is no alignment requirement on either address;
428 * user space does not need to know the hardware cache layout.
429 *
430 * r2 contains flags. It should ALWAYS be passed as ZERO until it
431 * is defined to be something else. For now we ignore it, but may
432 * the fires of hell burn in your belly if you break this rule. ;)
433 *
434 * (at a later date, we may want to allow this call to not flush
435 * various aspects of the cache. Passing '0' will guarantee that
436 * everything necessary gets flushed to maintain consistency in
437 * the specified region).
438 */
439 case NR(cacheflush):
440 do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
441 return 0;
442
443 case NR(usr26):
444 if (!(elf_hwcap & HWCAP_26BIT))
445 break;
446 regs->ARM_cpsr &= ~MODE32_BIT;
447 return regs->ARM_r0;
448
449 case NR(usr32):
450 if (!(elf_hwcap & HWCAP_26BIT))
451 break;
452 regs->ARM_cpsr |= MODE32_BIT;
453 return regs->ARM_r0;
454
455 case NR(set_tls):
456 thread->tp_value = regs->ARM_r0;
457 #if defined(CONFIG_HAS_TLS_REG)
458 asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
459 #elif !defined(CONFIG_TLS_REG_EMUL)
460 /*
461 * User space must never try to access this directly.
462 * Expect your app to break eventually if you do so.
463 * The user helper at 0xffff0fe0 must be used instead.
464 * (see entry-armv.S for details)
465 */
466 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
467 #endif
468 return 0;
469
470 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
471 /*
472 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
473 * Return zero in r0 if *MEM was changed or non-zero if no exchange
474 * happened. Also set the user C flag accordingly.
475 * If access permissions have to be fixed up then non-zero is
476 * returned and the operation has to be re-attempted.
477 *
478 * *NOTE*: This is a ghost syscall private to the kernel. Only the
479 * __kuser_cmpxchg code in entry-armv.S should be aware of its
480 * existence. Don't ever use this from user code.
481 */
482 case 0xfff0:
483 {
484 extern void do_DataAbort(unsigned long addr, unsigned int fsr,
485 struct pt_regs *regs);
486 unsigned long val;
487 unsigned long addr = regs->ARM_r2;
488 struct mm_struct *mm = current->mm;
489 pgd_t *pgd; pmd_t *pmd; pte_t *pte;
490 spinlock_t *ptl;
491
492 regs->ARM_cpsr &= ~PSR_C_BIT;
493 down_read(&mm->mmap_sem);
494 pgd = pgd_offset(mm, addr);
495 if (!pgd_present(*pgd))
496 goto bad_access;
497 pmd = pmd_offset(pgd, addr);
498 if (!pmd_present(*pmd))
499 goto bad_access;
500 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
501 if (!pte_present(*pte) || !pte_write(*pte)) {
502 pte_unmap_unlock(pte, ptl);
503 goto bad_access;
504 }
505 val = *(unsigned long *)addr;
506 val -= regs->ARM_r0;
507 if (val == 0) {
508 *(unsigned long *)addr = regs->ARM_r1;
509 regs->ARM_cpsr |= PSR_C_BIT;
510 }
511 pte_unmap_unlock(pte, ptl);
512 up_read(&mm->mmap_sem);
513 return val;
514
515 bad_access:
516 up_read(&mm->mmap_sem);
517 /* simulate a write access fault */
518 do_DataAbort(addr, 15 + (1 << 11), regs);
519 return -1;
520 }
521 #endif
522
523 default:
524 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
525 if not implemented, rather than raising SIGILL. This
526 way the calling program can gracefully determine whether
527 a feature is supported. */
528 if (no <= 0x7ff)
529 return -ENOSYS;
530 break;
531 }
532 #ifdef CONFIG_DEBUG_USER
533 /*
534 * experience shows that these seem to indicate that
535 * something catastrophic has happened
536 */
537 if (user_debug & UDBG_SYSCALL) {
538 printk("[%d] %s: arm syscall %d\n",
539 current->pid, current->comm, no);
540 dump_instr(regs);
541 if (user_mode(regs)) {
542 __show_regs(regs);
543 c_backtrace(regs->ARM_fp, processor_mode(regs));
544 }
545 }
546 #endif
547 info.si_signo = SIGILL;
548 info.si_errno = 0;
549 info.si_code = ILL_ILLTRP;
550 info.si_addr = (void __user *)instruction_pointer(regs) -
551 (thumb_mode(regs) ? 2 : 4);
552
553 notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
554 return 0;
555 }
556
557 #ifdef CONFIG_TLS_REG_EMUL
558
559 /*
560 * We might be running on an ARMv6+ processor which should have the TLS
561 * register but for some reason we can't use it, or maybe an SMP system
562 * using a pre-ARMv6 processor (there are apparently a few prototypes like
563 * that in existence) and therefore access to that register must be
564 * emulated.
565 */
566
567 static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
568 {
569 int reg = (instr >> 12) & 15;
570 if (reg == 15)
571 return 1;
572 regs->uregs[reg] = current_thread_info()->tp_value;
573 regs->ARM_pc += 4;
574 return 0;
575 }
576
577 static struct undef_hook arm_mrc_hook = {
578 .instr_mask = 0x0fff0fff,
579 .instr_val = 0x0e1d0f70,
580 .cpsr_mask = PSR_T_BIT,
581 .cpsr_val = 0,
582 .fn = get_tp_trap,
583 };
584
585 static int __init arm_mrc_hook_init(void)
586 {
587 register_undef_hook(&arm_mrc_hook);
588 return 0;
589 }
590
591 late_initcall(arm_mrc_hook_init);
592
593 #endif
594
595 void __bad_xchg(volatile void *ptr, int size)
596 {
597 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
598 __builtin_return_address(0), ptr, size);
599 BUG();
600 }
601 EXPORT_SYMBOL(__bad_xchg);
602
603 /*
604 * A data abort trap was taken, but we did not handle the instruction.
605 * Try to abort the user program, or panic if it was the kernel.
606 */
607 asmlinkage void
608 baddataabort(int code, unsigned long instr, struct pt_regs *regs)
609 {
610 unsigned long addr = instruction_pointer(regs);
611 siginfo_t info;
612
613 #ifdef CONFIG_DEBUG_USER
614 if (user_debug & UDBG_BADABORT) {
615 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
616 current->pid, current->comm, code, instr);
617 dump_instr(regs);
618 show_pte(current->mm, addr);
619 }
620 #endif
621
622 info.si_signo = SIGILL;
623 info.si_errno = 0;
624 info.si_code = ILL_ILLOPC;
625 info.si_addr = (void __user *)addr;
626
627 notify_die("unknown data abort code", regs, &info, instr, 0);
628 }
629
630 void __attribute__((noreturn)) __bug(const char *file, int line, void *data)
631 {
632 printk(KERN_CRIT"kernel BUG at %s:%d!", file, line);
633 if (data)
634 printk(" - extra data = %p", data);
635 printk("\n");
636 *(int *)0 = 0;
637
638 /* Avoid "noreturn function does return" */
639 for (;;);
640 }
641 EXPORT_SYMBOL(__bug);
642
643 void __readwrite_bug(const char *fn)
644 {
645 printk("%s called, but not implemented\n", fn);
646 BUG();
647 }
648 EXPORT_SYMBOL(__readwrite_bug);
649
650 void __pte_error(const char *file, int line, unsigned long val)
651 {
652 printk("%s:%d: bad pte %08lx.\n", file, line, val);
653 }
654
655 void __pmd_error(const char *file, int line, unsigned long val)
656 {
657 printk("%s:%d: bad pmd %08lx.\n", file, line, val);
658 }
659
660 void __pgd_error(const char *file, int line, unsigned long val)
661 {
662 printk("%s:%d: bad pgd %08lx.\n", file, line, val);
663 }
664
665 asmlinkage void __div0(void)
666 {
667 printk("Division by zero in kernel.\n");
668 dump_stack();
669 }
670 EXPORT_SYMBOL(__div0);
671
672 void abort(void)
673 {
674 BUG();
675
676 /* if that doesn't kill us, halt */
677 panic("Oops failed to kill thread");
678 }
679 EXPORT_SYMBOL(abort);
680
681 void __init trap_init(void)
682 {
683 extern char __stubs_start[], __stubs_end[];
684 extern char __vectors_start[], __vectors_end[];
685 extern char __kuser_helper_start[], __kuser_helper_end[];
686 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
687
688 /*
689 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
690 * into the vector page, mapped at 0xffff0000, and ensure these
691 * are visible to the instruction stream.
692 */
693 memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
694 memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
695 memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
696
697 /*
698 * Copy signal return handlers into the vector page, and
699 * set sigreturn to be a pointer to these.
700 */
701 memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
702 sizeof(sigreturn_codes));
703
704 flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
705 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
706 }