Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / traps.c
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9 /*
10 * Handle hardware traps and faults.
11 */
12 #include <linux/interrupt.h>
13 #include <linux/kallsyms.h>
14 #include <linux/spinlock.h>
15 #include <linux/kprobes.h>
16 #include <linux/uaccess.h>
17 #include <linux/utsname.h>
18 #include <linux/kdebug.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/ptrace.h>
22 #include <linux/string.h>
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/kexec.h>
26 #include <linux/sched.h>
27 #include <linux/timer.h>
28 #include <linux/init.h>
29 #include <linux/bug.h>
30 #include <linux/nmi.h>
31 #include <linux/mm.h>
32 #include <linux/smp.h>
33 #include <linux/io.h>
34
35 #ifdef CONFIG_EISA
36 #include <linux/ioport.h>
37 #include <linux/eisa.h>
38 #endif
39
40 #ifdef CONFIG_MCA
41 #include <linux/mca.h>
42 #endif
43
44 #if defined(CONFIG_EDAC)
45 #include <linux/edac.h>
46 #endif
47
48 #include <asm/stacktrace.h>
49 #include <asm/processor.h>
50 #include <asm/debugreg.h>
51 #include <asm/atomic.h>
52 #include <asm/system.h>
53 #include <asm/traps.h>
54 #include <asm/desc.h>
55 #include <asm/i387.h>
56
57 #include <asm/mach_traps.h>
58
59 #ifdef CONFIG_X86_64
60 #include <asm/pgalloc.h>
61 #include <asm/proto.h>
62 #else
63 #include <asm/processor-flags.h>
64 #include <asm/setup.h>
65 #include <asm/traps.h>
66
67 #include "cpu/mcheck/mce.h"
68
69 asmlinkage int system_call(void);
70
71 /* Do we ignore FPU interrupts ? */
72 char ignore_fpu_irq;
73
74 /*
75 * The IDT has to be page-aligned to simplify the Pentium
76 * F0 0F bug workaround.. We have a special link segment
77 * for this.
78 */
79 gate_desc idt_table[256]
80 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
81 #endif
82
83 DECLARE_BITMAP(used_vectors, NR_VECTORS);
84 EXPORT_SYMBOL_GPL(used_vectors);
85
86 static int ignore_nmis;
87
88 static inline void conditional_sti(struct pt_regs *regs)
89 {
90 if (regs->flags & X86_EFLAGS_IF)
91 local_irq_enable();
92 }
93
94 static inline void preempt_conditional_sti(struct pt_regs *regs)
95 {
96 inc_preempt_count();
97 if (regs->flags & X86_EFLAGS_IF)
98 local_irq_enable();
99 }
100
101 static inline void conditional_cli(struct pt_regs *regs)
102 {
103 if (regs->flags & X86_EFLAGS_IF)
104 local_irq_disable();
105 }
106
107 static inline void preempt_conditional_cli(struct pt_regs *regs)
108 {
109 if (regs->flags & X86_EFLAGS_IF)
110 local_irq_disable();
111 dec_preempt_count();
112 }
113
114 #ifdef CONFIG_X86_32
115 static inline void
116 die_if_kernel(const char *str, struct pt_regs *regs, long err)
117 {
118 if (!user_mode_vm(regs))
119 die(str, regs, err);
120 }
121 #endif
122
123 static void __kprobes
124 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
125 long error_code, siginfo_t *info)
126 {
127 struct task_struct *tsk = current;
128
129 #ifdef CONFIG_X86_32
130 if (regs->flags & X86_VM_MASK) {
131 /*
132 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
133 * On nmi (interrupt 2), do_trap should not be called.
134 */
135 if (trapnr < 6)
136 goto vm86_trap;
137 goto trap_signal;
138 }
139 #endif
140
141 if (!user_mode(regs))
142 goto kernel_trap;
143
144 #ifdef CONFIG_X86_32
145 trap_signal:
146 #endif
147 /*
148 * We want error_code and trap_no set for userspace faults and
149 * kernelspace faults which result in die(), but not
150 * kernelspace faults which are fixed up. die() gives the
151 * process no chance to handle the signal and notice the
152 * kernel fault information, so that won't result in polluting
153 * the information about previously queued, but not yet
154 * delivered, faults. See also do_general_protection below.
155 */
156 tsk->thread.error_code = error_code;
157 tsk->thread.trap_no = trapnr;
158
159 #ifdef CONFIG_X86_64
160 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
161 printk_ratelimit()) {
162 printk(KERN_INFO
163 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
164 tsk->comm, tsk->pid, str,
165 regs->ip, regs->sp, error_code);
166 print_vma_addr(" in ", regs->ip);
167 printk("\n");
168 }
169 #endif
170
171 if (info)
172 force_sig_info(signr, info, tsk);
173 else
174 force_sig(signr, tsk);
175 return;
176
177 kernel_trap:
178 if (!fixup_exception(regs)) {
179 tsk->thread.error_code = error_code;
180 tsk->thread.trap_no = trapnr;
181 die(str, regs, error_code);
182 }
183 return;
184
185 #ifdef CONFIG_X86_32
186 vm86_trap:
187 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
188 error_code, trapnr))
189 goto trap_signal;
190 return;
191 #endif
192 }
193
194 #define DO_ERROR(trapnr, signr, str, name) \
195 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
196 { \
197 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
198 == NOTIFY_STOP) \
199 return; \
200 conditional_sti(regs); \
201 do_trap(trapnr, signr, str, regs, error_code, NULL); \
202 }
203
204 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
205 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
206 { \
207 siginfo_t info; \
208 info.si_signo = signr; \
209 info.si_errno = 0; \
210 info.si_code = sicode; \
211 info.si_addr = (void __user *)siaddr; \
212 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
213 == NOTIFY_STOP) \
214 return; \
215 conditional_sti(regs); \
216 do_trap(trapnr, signr, str, regs, error_code, &info); \
217 }
218
219 DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
220 DO_ERROR(4, SIGSEGV, "overflow", overflow)
221 DO_ERROR(5, SIGSEGV, "bounds", bounds)
222 DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
223 DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
224 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
225 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
226 #ifdef CONFIG_X86_32
227 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
228 #endif
229 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
230
231 #ifdef CONFIG_X86_64
232 /* Runs on IST stack */
233 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
234 {
235 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
236 12, SIGBUS) == NOTIFY_STOP)
237 return;
238 preempt_conditional_sti(regs);
239 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
240 preempt_conditional_cli(regs);
241 }
242
243 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
244 {
245 static const char str[] = "double fault";
246 struct task_struct *tsk = current;
247
248 /* Return not checked because double check cannot be ignored */
249 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
250
251 tsk->thread.error_code = error_code;
252 tsk->thread.trap_no = 8;
253
254 /*
255 * This is always a kernel trap and never fixable (and thus must
256 * never return).
257 */
258 for (;;)
259 die(str, regs, error_code);
260 }
261 #endif
262
263 dotraplinkage void __kprobes
264 do_general_protection(struct pt_regs *regs, long error_code)
265 {
266 struct task_struct *tsk;
267
268 conditional_sti(regs);
269
270 #ifdef CONFIG_X86_32
271 if (regs->flags & X86_VM_MASK)
272 goto gp_in_vm86;
273 #endif
274
275 tsk = current;
276 if (!user_mode(regs))
277 goto gp_in_kernel;
278
279 tsk->thread.error_code = error_code;
280 tsk->thread.trap_no = 13;
281
282 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
283 printk_ratelimit()) {
284 printk(KERN_INFO
285 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
286 tsk->comm, task_pid_nr(tsk),
287 regs->ip, regs->sp, error_code);
288 print_vma_addr(" in ", regs->ip);
289 printk("\n");
290 }
291
292 force_sig(SIGSEGV, tsk);
293 return;
294
295 #ifdef CONFIG_X86_32
296 gp_in_vm86:
297 local_irq_enable();
298 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
299 return;
300 #endif
301
302 gp_in_kernel:
303 if (fixup_exception(regs))
304 return;
305
306 tsk->thread.error_code = error_code;
307 tsk->thread.trap_no = 13;
308 if (notify_die(DIE_GPF, "general protection fault", regs,
309 error_code, 13, SIGSEGV) == NOTIFY_STOP)
310 return;
311 die("general protection fault", regs, error_code);
312 }
313
314 static notrace __kprobes void
315 mem_parity_error(unsigned char reason, struct pt_regs *regs)
316 {
317 printk(KERN_EMERG
318 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
319 reason, smp_processor_id());
320
321 printk(KERN_EMERG
322 "You have some hardware problem, likely on the PCI bus.\n");
323
324 #if defined(CONFIG_EDAC)
325 if (edac_handler_set()) {
326 edac_atomic_assert_error();
327 return;
328 }
329 #endif
330
331 if (panic_on_unrecovered_nmi)
332 panic("NMI: Not continuing");
333
334 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
335
336 /* Clear and disable the memory parity error line. */
337 reason = (reason & 0xf) | 4;
338 outb(reason, 0x61);
339 }
340
341 static notrace __kprobes void
342 io_check_error(unsigned char reason, struct pt_regs *regs)
343 {
344 unsigned long i;
345
346 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
347 show_registers(regs);
348
349 /* Re-enable the IOCK line, wait for a few seconds */
350 reason = (reason & 0xf) | 8;
351 outb(reason, 0x61);
352
353 i = 2000;
354 while (--i)
355 udelay(1000);
356
357 reason &= ~8;
358 outb(reason, 0x61);
359 }
360
361 static notrace __kprobes void
362 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
363 {
364 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
365 NOTIFY_STOP)
366 return;
367 #ifdef CONFIG_MCA
368 /*
369 * Might actually be able to figure out what the guilty party
370 * is:
371 */
372 if (MCA_bus) {
373 mca_handle_nmi();
374 return;
375 }
376 #endif
377 printk(KERN_EMERG
378 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
379 reason, smp_processor_id());
380
381 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
382 if (panic_on_unrecovered_nmi)
383 panic("NMI: Not continuing");
384
385 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
386 }
387
388 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
389 {
390 unsigned char reason = 0;
391 int cpu;
392
393 cpu = smp_processor_id();
394
395 /* Only the BSP gets external NMIs from the system. */
396 if (!cpu)
397 reason = get_nmi_reason();
398
399 if (!(reason & 0xc0)) {
400 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
401 == NOTIFY_STOP)
402 return;
403 #ifdef CONFIG_X86_LOCAL_APIC
404 /*
405 * Ok, so this is none of the documented NMI sources,
406 * so it must be the NMI watchdog.
407 */
408 if (nmi_watchdog_tick(regs, reason))
409 return;
410 if (!do_nmi_callback(regs, cpu))
411 unknown_nmi_error(reason, regs);
412 #else
413 unknown_nmi_error(reason, regs);
414 #endif
415
416 return;
417 }
418 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
419 return;
420
421 /* AK: following checks seem to be broken on modern chipsets. FIXME */
422 if (reason & 0x80)
423 mem_parity_error(reason, regs);
424 if (reason & 0x40)
425 io_check_error(reason, regs);
426 #ifdef CONFIG_X86_32
427 /*
428 * Reassert NMI in case it became active meanwhile
429 * as it's edge-triggered:
430 */
431 reassert_nmi();
432 #endif
433 }
434
435 dotraplinkage notrace __kprobes void
436 do_nmi(struct pt_regs *regs, long error_code)
437 {
438 nmi_enter();
439
440 inc_irq_stat(__nmi_count);
441
442 if (!ignore_nmis)
443 default_do_nmi(regs);
444
445 nmi_exit();
446 }
447
448 void stop_nmi(void)
449 {
450 acpi_nmi_disable();
451 ignore_nmis++;
452 }
453
454 void restart_nmi(void)
455 {
456 ignore_nmis--;
457 acpi_nmi_enable();
458 }
459
460 /* May run on IST stack. */
461 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
462 {
463 #ifdef CONFIG_KPROBES
464 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
465 == NOTIFY_STOP)
466 return;
467 #else
468 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
469 == NOTIFY_STOP)
470 return;
471 #endif
472
473 preempt_conditional_sti(regs);
474 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
475 preempt_conditional_cli(regs);
476 }
477
478 #ifdef CONFIG_X86_64
479 /*
480 * Help handler running on IST stack to switch back to user stack
481 * for scheduling or signal handling. The actual stack switch is done in
482 * entry.S
483 */
484 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
485 {
486 struct pt_regs *regs = eregs;
487 /* Did already sync */
488 if (eregs == (struct pt_regs *)eregs->sp)
489 ;
490 /* Exception from user space */
491 else if (user_mode(eregs))
492 regs = task_pt_regs(current);
493 /*
494 * Exception from kernel and interrupts are enabled. Move to
495 * kernel process stack.
496 */
497 else if (eregs->flags & X86_EFLAGS_IF)
498 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
499 if (eregs != regs)
500 *regs = *eregs;
501 return regs;
502 }
503 #endif
504
505 /*
506 * Our handling of the processor debug registers is non-trivial.
507 * We do not clear them on entry and exit from the kernel. Therefore
508 * it is possible to get a watchpoint trap here from inside the kernel.
509 * However, the code in ./ptrace.c has ensured that the user can
510 * only set watchpoints on userspace addresses. Therefore the in-kernel
511 * watchpoint trap can only occur in code which is reading/writing
512 * from user space. Such code must not hold kernel locks (since it
513 * can equally take a page fault), therefore it is safe to call
514 * force_sig_info even though that claims and releases locks.
515 *
516 * Code in ./signal.c ensures that the debug control register
517 * is restored before we deliver any signal, and therefore that
518 * user code runs with the correct debug control register even though
519 * we clear it here.
520 *
521 * Being careful here means that we don't have to be as careful in a
522 * lot of more complicated places (task switching can be a bit lazy
523 * about restoring all the debug state, and ptrace doesn't have to
524 * find every occurrence of the TF bit that could be saved away even
525 * by user code)
526 *
527 * May run on IST stack.
528 */
529 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
530 {
531 struct task_struct *tsk = current;
532 unsigned long condition;
533 int si_code;
534
535 get_debugreg(condition, 6);
536
537 /*
538 * The processor cleared BTF, so don't mark that we need it set.
539 */
540 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
541 tsk->thread.debugctlmsr = 0;
542
543 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
544 SIGTRAP) == NOTIFY_STOP)
545 return;
546
547 /* It's safe to allow irq's after DR6 has been saved */
548 preempt_conditional_sti(regs);
549
550 /* Mask out spurious debug traps due to lazy DR7 setting */
551 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
552 if (!tsk->thread.debugreg7)
553 goto clear_dr7;
554 }
555
556 #ifdef CONFIG_X86_32
557 if (regs->flags & X86_VM_MASK)
558 goto debug_vm86;
559 #endif
560
561 /* Save debug status register where ptrace can see it */
562 tsk->thread.debugreg6 = condition;
563
564 /*
565 * Single-stepping through TF: make sure we ignore any events in
566 * kernel space (but re-enable TF when returning to user mode).
567 */
568 if (condition & DR_STEP) {
569 if (!user_mode(regs))
570 goto clear_TF_reenable;
571 }
572
573 si_code = get_si_code(condition);
574 /* Ok, finally something we can handle */
575 send_sigtrap(tsk, regs, error_code, si_code);
576
577 /*
578 * Disable additional traps. They'll be re-enabled when
579 * the signal is delivered.
580 */
581 clear_dr7:
582 set_debugreg(0, 7);
583 preempt_conditional_cli(regs);
584 return;
585
586 #ifdef CONFIG_X86_32
587 debug_vm86:
588 /* reenable preemption: handle_vm86_trap() might sleep */
589 dec_preempt_count();
590 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
591 conditional_cli(regs);
592 return;
593 #endif
594
595 clear_TF_reenable:
596 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
597 regs->flags &= ~X86_EFLAGS_TF;
598 preempt_conditional_cli(regs);
599 return;
600 }
601
602 #ifdef CONFIG_X86_64
603 static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
604 {
605 if (fixup_exception(regs))
606 return 1;
607
608 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
609 /* Illegal floating point operation in the kernel */
610 current->thread.trap_no = trapnr;
611 die(str, regs, 0);
612 return 0;
613 }
614 #endif
615
616 /*
617 * Note that we play around with the 'TS' bit in an attempt to get
618 * the correct behaviour even in the presence of the asynchronous
619 * IRQ13 behaviour
620 */
621 void math_error(void __user *ip)
622 {
623 struct task_struct *task;
624 siginfo_t info;
625 unsigned short cwd, swd, err;
626
627 /*
628 * Save the info for the exception handler and clear the error.
629 */
630 task = current;
631 save_init_fpu(task);
632 task->thread.trap_no = 16;
633 task->thread.error_code = 0;
634 info.si_signo = SIGFPE;
635 info.si_errno = 0;
636 info.si_addr = ip;
637 /*
638 * (~cwd & swd) will mask out exceptions that are not set to unmasked
639 * status. 0x3f is the exception bits in these regs, 0x200 is the
640 * C1 reg you need in case of a stack fault, 0x040 is the stack
641 * fault bit. We should only be taking one exception at a time,
642 * so if this combination doesn't produce any single exception,
643 * then we have a bad program that isn't synchronizing its FPU usage
644 * and it will suffer the consequences since we won't be able to
645 * fully reproduce the context of the exception
646 */
647 cwd = get_fpu_cwd(task);
648 swd = get_fpu_swd(task);
649
650 err = swd & ~cwd;
651
652 if (err & 0x001) { /* Invalid op */
653 /*
654 * swd & 0x240 == 0x040: Stack Underflow
655 * swd & 0x240 == 0x240: Stack Overflow
656 * User must clear the SF bit (0x40) if set
657 */
658 info.si_code = FPE_FLTINV;
659 } else if (err & 0x004) { /* Divide by Zero */
660 info.si_code = FPE_FLTDIV;
661 } else if (err & 0x008) { /* Overflow */
662 info.si_code = FPE_FLTOVF;
663 } else if (err & 0x012) { /* Denormal, Underflow */
664 info.si_code = FPE_FLTUND;
665 } else if (err & 0x020) { /* Precision */
666 info.si_code = FPE_FLTRES;
667 } else {
668 /*
669 * If we're using IRQ 13, or supposedly even some trap 16
670 * implementations, it's possible we get a spurious trap...
671 */
672 return; /* Spurious trap, no error */
673 }
674 force_sig_info(SIGFPE, &info, task);
675 }
676
677 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
678 {
679 conditional_sti(regs);
680
681 #ifdef CONFIG_X86_32
682 ignore_fpu_irq = 1;
683 #else
684 if (!user_mode(regs) &&
685 kernel_math_error(regs, "kernel x87 math error", 16))
686 return;
687 #endif
688
689 math_error((void __user *)regs->ip);
690 }
691
692 static void simd_math_error(void __user *ip)
693 {
694 struct task_struct *task;
695 siginfo_t info;
696 unsigned short mxcsr;
697
698 /*
699 * Save the info for the exception handler and clear the error.
700 */
701 task = current;
702 save_init_fpu(task);
703 task->thread.trap_no = 19;
704 task->thread.error_code = 0;
705 info.si_signo = SIGFPE;
706 info.si_errno = 0;
707 info.si_code = __SI_FAULT;
708 info.si_addr = ip;
709 /*
710 * The SIMD FPU exceptions are handled a little differently, as there
711 * is only a single status/control register. Thus, to determine which
712 * unmasked exception was caught we must mask the exception mask bits
713 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
714 */
715 mxcsr = get_fpu_mxcsr(task);
716 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
717 case 0x000:
718 default:
719 break;
720 case 0x001: /* Invalid Op */
721 info.si_code = FPE_FLTINV;
722 break;
723 case 0x002: /* Denormalize */
724 case 0x010: /* Underflow */
725 info.si_code = FPE_FLTUND;
726 break;
727 case 0x004: /* Zero Divide */
728 info.si_code = FPE_FLTDIV;
729 break;
730 case 0x008: /* Overflow */
731 info.si_code = FPE_FLTOVF;
732 break;
733 case 0x020: /* Precision */
734 info.si_code = FPE_FLTRES;
735 break;
736 }
737 force_sig_info(SIGFPE, &info, task);
738 }
739
740 dotraplinkage void
741 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
742 {
743 conditional_sti(regs);
744
745 #ifdef CONFIG_X86_32
746 if (cpu_has_xmm) {
747 /* Handle SIMD FPU exceptions on PIII+ processors. */
748 ignore_fpu_irq = 1;
749 simd_math_error((void __user *)regs->ip);
750 return;
751 }
752 /*
753 * Handle strange cache flush from user space exception
754 * in all other cases. This is undocumented behaviour.
755 */
756 if (regs->flags & X86_VM_MASK) {
757 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
758 return;
759 }
760 current->thread.trap_no = 19;
761 current->thread.error_code = error_code;
762 die_if_kernel("cache flush denied", regs, error_code);
763 force_sig(SIGSEGV, current);
764 #else
765 if (!user_mode(regs) &&
766 kernel_math_error(regs, "kernel simd math error", 19))
767 return;
768 simd_math_error((void __user *)regs->ip);
769 #endif
770 }
771
772 dotraplinkage void
773 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
774 {
775 conditional_sti(regs);
776 #if 0
777 /* No need to warn about this any longer. */
778 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
779 #endif
780 }
781
782 #ifdef CONFIG_X86_32
783 unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
784 {
785 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
786 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
787 unsigned long new_kesp = kesp - base;
788 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
789 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
790
791 /* Set up base for espfix segment */
792 desc &= 0x00f0ff0000000000ULL;
793 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
794 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
795 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
796 (lim_pages & 0xffff);
797 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
798
799 return new_kesp;
800 }
801 #else
802 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
803 {
804 }
805
806 asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
807 {
808 }
809 #endif
810
811 /*
812 * 'math_state_restore()' saves the current math information in the
813 * old math state array, and gets the new ones from the current task
814 *
815 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
816 * Don't touch unless you *really* know how it works.
817 *
818 * Must be called with kernel preemption disabled (in this case,
819 * local interrupts are disabled at the call-site in entry.S).
820 */
821 asmlinkage void math_state_restore(void)
822 {
823 struct thread_info *thread = current_thread_info();
824 struct task_struct *tsk = thread->task;
825
826 if (!tsk_used_math(tsk)) {
827 local_irq_enable();
828 /*
829 * does a slab alloc which can sleep
830 */
831 if (init_fpu(tsk)) {
832 /*
833 * ran out of memory!
834 */
835 do_group_exit(SIGKILL);
836 return;
837 }
838 local_irq_disable();
839 }
840
841 clts(); /* Allow maths ops (or we recurse) */
842 #ifdef CONFIG_X86_32
843 restore_fpu(tsk);
844 #else
845 /*
846 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
847 */
848 if (unlikely(restore_fpu_checking(tsk))) {
849 stts();
850 force_sig(SIGSEGV, tsk);
851 return;
852 }
853 #endif
854 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
855 tsk->fpu_counter++;
856 }
857 EXPORT_SYMBOL_GPL(math_state_restore);
858
859 #ifndef CONFIG_MATH_EMULATION
860 void math_emulate(struct math_emu_info *info)
861 {
862 printk(KERN_EMERG
863 "math-emulation not enabled and no coprocessor found.\n");
864 printk(KERN_EMERG "killing %s.\n", current->comm);
865 force_sig(SIGFPE, current);
866 schedule();
867 }
868 #endif /* CONFIG_MATH_EMULATION */
869
870 dotraplinkage void __kprobes
871 do_device_not_available(struct pt_regs *regs, long error_code)
872 {
873 #ifdef CONFIG_X86_32
874 if (read_cr0() & X86_CR0_EM) {
875 struct math_emu_info info = { };
876
877 conditional_sti(regs);
878
879 info.regs = regs;
880 math_emulate(&info);
881 } else {
882 math_state_restore(); /* interrupts still off */
883 conditional_sti(regs);
884 }
885 #else
886 math_state_restore();
887 #endif
888 }
889
890 #ifdef CONFIG_X86_32
891 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
892 {
893 siginfo_t info;
894 local_irq_enable();
895
896 info.si_signo = SIGILL;
897 info.si_errno = 0;
898 info.si_code = ILL_BADSTK;
899 info.si_addr = NULL;
900 if (notify_die(DIE_TRAP, "iret exception",
901 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
902 return;
903 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
904 }
905 #endif
906
907 void __init trap_init(void)
908 {
909 int i;
910
911 #ifdef CONFIG_EISA
912 void __iomem *p = early_ioremap(0x0FFFD9, 4);
913
914 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
915 EISA_bus = 1;
916 early_iounmap(p, 4);
917 #endif
918
919 set_intr_gate(0, &divide_error);
920 set_intr_gate_ist(1, &debug, DEBUG_STACK);
921 set_intr_gate_ist(2, &nmi, NMI_STACK);
922 /* int3 can be called from all */
923 set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
924 /* int4 can be called from all */
925 set_system_intr_gate(4, &overflow);
926 set_intr_gate(5, &bounds);
927 set_intr_gate(6, &invalid_op);
928 set_intr_gate(7, &device_not_available);
929 #ifdef CONFIG_X86_32
930 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
931 #else
932 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
933 #endif
934 set_intr_gate(9, &coprocessor_segment_overrun);
935 set_intr_gate(10, &invalid_TSS);
936 set_intr_gate(11, &segment_not_present);
937 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
938 set_intr_gate(13, &general_protection);
939 set_intr_gate(14, &page_fault);
940 set_intr_gate(15, &spurious_interrupt_bug);
941 set_intr_gate(16, &coprocessor_error);
942 set_intr_gate(17, &alignment_check);
943 #ifdef CONFIG_X86_MCE
944 set_intr_gate_ist(18, &machine_check, MCE_STACK);
945 #endif
946 set_intr_gate(19, &simd_coprocessor_error);
947
948 #ifdef CONFIG_IA32_EMULATION
949 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
950 #endif
951
952 #ifdef CONFIG_X86_32
953 if (cpu_has_fxsr) {
954 printk(KERN_INFO "Enabling fast FPU save and restore... ");
955 set_in_cr4(X86_CR4_OSFXSR);
956 printk("done.\n");
957 }
958 if (cpu_has_xmm) {
959 printk(KERN_INFO
960 "Enabling unmasked SIMD FPU exception support... ");
961 set_in_cr4(X86_CR4_OSXMMEXCPT);
962 printk("done.\n");
963 }
964
965 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
966 #endif
967
968 /* Reserve all the builtin and the syscall vector: */
969 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
970 set_bit(i, used_vectors);
971
972 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
973
974 /*
975 * Should be a barrier for any external CPU state:
976 */
977 cpu_init();
978
979 #ifdef CONFIG_X86_32
980 x86_quirk_trap_init();
981 #endif
982 }