2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/export.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
45 #include <asm/processor.h>
48 #include <asm/machdep.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
54 #include <asm/debug.h>
56 #include <asm/firmware.h>
58 #include <linux/kprobes.h>
59 #include <linux/kdebug.h>
61 /* Transactional Memory debug */
63 #define TM_DEBUG(x...) printk(KERN_INFO x)
65 #define TM_DEBUG(x...) do { } while(0)
68 extern unsigned long _get_SP(void);
71 struct task_struct
*last_task_used_math
= NULL
;
72 struct task_struct
*last_task_used_altivec
= NULL
;
73 struct task_struct
*last_task_used_vsx
= NULL
;
74 struct task_struct
*last_task_used_spe
= NULL
;
78 * Make sure the floating-point register state in the
79 * the thread_struct is up to date for task tsk.
81 void flush_fp_to_thread(struct task_struct
*tsk
)
83 if (tsk
->thread
.regs
) {
85 * We need to disable preemption here because if we didn't,
86 * another process could get scheduled after the regs->msr
87 * test but before we have finished saving the FP registers
88 * to the thread_struct. That process could take over the
89 * FPU, and then when we get scheduled again we would store
90 * bogus values for the remaining FP registers.
93 if (tsk
->thread
.regs
->msr
& MSR_FP
) {
96 * This should only ever be called for current or
97 * for a stopped child process. Since we save away
98 * the FP register state on context switch on SMP,
99 * there is something wrong if a stopped child appears
100 * to still have its FP state in the CPU registers.
102 BUG_ON(tsk
!= current
);
109 EXPORT_SYMBOL_GPL(flush_fp_to_thread
);
111 void enable_kernel_fp(void)
113 WARN_ON(preemptible());
116 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_FP
))
119 giveup_fpu(NULL
); /* just enables FP for kernel */
121 giveup_fpu(last_task_used_math
);
122 #endif /* CONFIG_SMP */
124 EXPORT_SYMBOL(enable_kernel_fp
);
126 #ifdef CONFIG_ALTIVEC
127 void enable_kernel_altivec(void)
129 WARN_ON(preemptible());
132 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VEC
))
133 giveup_altivec(current
);
135 giveup_altivec_notask();
137 giveup_altivec(last_task_used_altivec
);
138 #endif /* CONFIG_SMP */
140 EXPORT_SYMBOL(enable_kernel_altivec
);
143 * Make sure the VMX/Altivec register state in the
144 * the thread_struct is up to date for task tsk.
146 void flush_altivec_to_thread(struct task_struct
*tsk
)
148 if (tsk
->thread
.regs
) {
150 if (tsk
->thread
.regs
->msr
& MSR_VEC
) {
152 BUG_ON(tsk
!= current
);
159 EXPORT_SYMBOL_GPL(flush_altivec_to_thread
);
160 #endif /* CONFIG_ALTIVEC */
164 /* not currently used, but some crazy RAID module might want to later */
165 void enable_kernel_vsx(void)
167 WARN_ON(preemptible());
170 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VSX
))
173 giveup_vsx(NULL
); /* just enable vsx for kernel - force */
175 giveup_vsx(last_task_used_vsx
);
176 #endif /* CONFIG_SMP */
178 EXPORT_SYMBOL(enable_kernel_vsx
);
181 void giveup_vsx(struct task_struct
*tsk
)
188 void flush_vsx_to_thread(struct task_struct
*tsk
)
190 if (tsk
->thread
.regs
) {
192 if (tsk
->thread
.regs
->msr
& MSR_VSX
) {
194 BUG_ON(tsk
!= current
);
201 EXPORT_SYMBOL_GPL(flush_vsx_to_thread
);
202 #endif /* CONFIG_VSX */
206 void enable_kernel_spe(void)
208 WARN_ON(preemptible());
211 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_SPE
))
214 giveup_spe(NULL
); /* just enable SPE for kernel - force */
216 giveup_spe(last_task_used_spe
);
217 #endif /* __SMP __ */
219 EXPORT_SYMBOL(enable_kernel_spe
);
221 void flush_spe_to_thread(struct task_struct
*tsk
)
223 if (tsk
->thread
.regs
) {
225 if (tsk
->thread
.regs
->msr
& MSR_SPE
) {
227 BUG_ON(tsk
!= current
);
229 tsk
->thread
.spefscr
= mfspr(SPRN_SPEFSCR
);
235 #endif /* CONFIG_SPE */
239 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
240 * and the current task has some state, discard it.
242 void discard_lazy_cpu_state(void)
245 if (last_task_used_math
== current
)
246 last_task_used_math
= NULL
;
247 #ifdef CONFIG_ALTIVEC
248 if (last_task_used_altivec
== current
)
249 last_task_used_altivec
= NULL
;
250 #endif /* CONFIG_ALTIVEC */
252 if (last_task_used_vsx
== current
)
253 last_task_used_vsx
= NULL
;
254 #endif /* CONFIG_VSX */
256 if (last_task_used_spe
== current
)
257 last_task_used_spe
= NULL
;
261 #endif /* CONFIG_SMP */
263 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
264 void do_send_trap(struct pt_regs
*regs
, unsigned long address
,
265 unsigned long error_code
, int signal_code
, int breakpt
)
269 current
->thread
.trap_nr
= signal_code
;
270 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
271 11, SIGSEGV
) == NOTIFY_STOP
)
274 /* Deliver the signal to userspace */
275 info
.si_signo
= SIGTRAP
;
276 info
.si_errno
= breakpt
; /* breakpoint or watchpoint id */
277 info
.si_code
= signal_code
;
278 info
.si_addr
= (void __user
*)address
;
279 force_sig_info(SIGTRAP
, &info
, current
);
281 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
282 void do_break (struct pt_regs
*regs
, unsigned long address
,
283 unsigned long error_code
)
287 current
->thread
.trap_nr
= TRAP_HWBKPT
;
288 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
289 11, SIGSEGV
) == NOTIFY_STOP
)
292 if (debugger_break_match(regs
))
295 /* Clear the breakpoint */
296 hw_breakpoint_disable();
298 /* Deliver the signal to userspace */
299 info
.si_signo
= SIGTRAP
;
301 info
.si_code
= TRAP_HWBKPT
;
302 info
.si_addr
= (void __user
*)address
;
303 force_sig_info(SIGTRAP
, &info
, current
);
305 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
307 static DEFINE_PER_CPU(struct arch_hw_breakpoint
, current_brk
);
309 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
311 * Set the debug registers back to their default "safe" values.
313 static void set_debug_reg_defaults(struct thread_struct
*thread
)
315 thread
->iac1
= thread
->iac2
= 0;
316 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
317 thread
->iac3
= thread
->iac4
= 0;
319 thread
->dac1
= thread
->dac2
= 0;
320 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
321 thread
->dvc1
= thread
->dvc2
= 0;
326 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
328 thread
->dbcr1
= DBCR1_IAC1US
| DBCR1_IAC2US
| \
329 DBCR1_IAC3US
| DBCR1_IAC4US
;
331 * Force Data Address Compare User/Supervisor bits to be User-only
332 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
334 thread
->dbcr2
= DBCR2_DAC1US
| DBCR2_DAC2US
;
340 static void prime_debug_regs(struct thread_struct
*thread
)
343 * We could have inherited MSR_DE from userspace, since
344 * it doesn't get cleared on exception entry. Make sure
345 * MSR_DE is clear before we enable any debug events.
347 mtmsr(mfmsr() & ~MSR_DE
);
349 mtspr(SPRN_IAC1
, thread
->iac1
);
350 mtspr(SPRN_IAC2
, thread
->iac2
);
351 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
352 mtspr(SPRN_IAC3
, thread
->iac3
);
353 mtspr(SPRN_IAC4
, thread
->iac4
);
355 mtspr(SPRN_DAC1
, thread
->dac1
);
356 mtspr(SPRN_DAC2
, thread
->dac2
);
357 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
358 mtspr(SPRN_DVC1
, thread
->dvc1
);
359 mtspr(SPRN_DVC2
, thread
->dvc2
);
361 mtspr(SPRN_DBCR0
, thread
->dbcr0
);
362 mtspr(SPRN_DBCR1
, thread
->dbcr1
);
364 mtspr(SPRN_DBCR2
, thread
->dbcr2
);
368 * Unless neither the old or new thread are making use of the
369 * debug registers, set the debug registers from the values
370 * stored in the new thread.
372 static void switch_booke_debug_regs(struct thread_struct
*new_thread
)
374 if ((current
->thread
.dbcr0
& DBCR0_IDM
)
375 || (new_thread
->dbcr0
& DBCR0_IDM
))
376 prime_debug_regs(new_thread
);
378 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
379 #ifndef CONFIG_HAVE_HW_BREAKPOINT
380 static void set_debug_reg_defaults(struct thread_struct
*thread
)
382 thread
->hw_brk
.address
= 0;
383 thread
->hw_brk
.type
= 0;
384 set_breakpoint(&thread
->hw_brk
);
386 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
387 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
389 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
390 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
392 mtspr(SPRN_DAC1
, dabr
);
393 #ifdef CONFIG_PPC_47x
398 #elif defined(CONFIG_PPC_BOOK3S)
399 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
401 mtspr(SPRN_DABR
, dabr
);
402 if (cpu_has_feature(CPU_FTR_DABRX
))
403 mtspr(SPRN_DABRX
, dabrx
);
407 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
413 static inline int set_dabr(struct arch_hw_breakpoint
*brk
)
415 unsigned long dabr
, dabrx
;
417 dabr
= brk
->address
| (brk
->type
& HW_BRK_TYPE_DABR
);
418 dabrx
= ((brk
->type
>> 3) & 0x7);
421 return ppc_md
.set_dabr(dabr
, dabrx
);
423 return __set_dabr(dabr
, dabrx
);
426 static inline int set_dawr(struct arch_hw_breakpoint
*brk
)
428 unsigned long dawr
, dawrx
, mrd
;
432 dawrx
= (brk
->type
& (HW_BRK_TYPE_READ
| HW_BRK_TYPE_WRITE
)) \
433 << (63 - 58); //* read/write bits */
434 dawrx
|= ((brk
->type
& (HW_BRK_TYPE_TRANSLATE
)) >> 2) \
435 << (63 - 59); //* translate */
436 dawrx
|= (brk
->type
& (HW_BRK_TYPE_PRIV_ALL
)) \
437 >> 3; //* PRIM bits */
438 /* dawr length is stored in field MDR bits 48:53. Matches range in
439 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
441 brk->len is in bytes.
442 This aligns up to double word size, shifts and does the bias.
444 mrd
= ((brk
->len
+ 7) >> 3) - 1;
445 dawrx
|= (mrd
& 0x3f) << (63 - 53);
448 return ppc_md
.set_dawr(dawr
, dawrx
);
449 mtspr(SPRN_DAWR
, dawr
);
450 mtspr(SPRN_DAWRX
, dawrx
);
454 int set_breakpoint(struct arch_hw_breakpoint
*brk
)
456 __get_cpu_var(current_brk
) = *brk
;
458 if (cpu_has_feature(CPU_FTR_DAWR
))
459 return set_dawr(brk
);
461 return set_dabr(brk
);
465 DEFINE_PER_CPU(struct cpu_usage
, cpu_usage_array
);
468 static inline bool hw_brk_match(struct arch_hw_breakpoint
*a
,
469 struct arch_hw_breakpoint
*b
)
471 if (a
->address
!= b
->address
)
473 if (a
->type
!= b
->type
)
475 if (a
->len
!= b
->len
)
479 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
480 static inline void tm_reclaim_task(struct task_struct
*tsk
)
482 /* We have to work out if we're switching from/to a task that's in the
483 * middle of a transaction.
485 * In switching we need to maintain a 2nd register state as
486 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
487 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
488 * (current) FPRs into oldtask->thread.transact_fpr[].
490 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
492 struct thread_struct
*thr
= &tsk
->thread
;
497 if (!MSR_TM_ACTIVE(thr
->regs
->msr
))
498 goto out_and_saveregs
;
500 /* Stash the original thread MSR, as giveup_fpu et al will
501 * modify it. We hold onto it to see whether the task used
504 thr
->tm_orig_msr
= thr
->regs
->msr
;
506 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
507 "ccr=%lx, msr=%lx, trap=%lx)\n",
508 tsk
->pid
, thr
->regs
->nip
,
509 thr
->regs
->ccr
, thr
->regs
->msr
,
512 tm_reclaim(thr
, thr
->regs
->msr
, TM_CAUSE_RESCHED
);
514 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
518 /* Always save the regs here, even if a transaction's not active.
519 * This context-switches a thread's TM info SPRs. We do it here to
520 * be consistent with the restore path (in recheckpoint) which
521 * cannot happen later in _switch().
526 extern void __tm_recheckpoint(struct thread_struct
*thread
,
527 unsigned long orig_msr
);
529 void tm_recheckpoint(struct thread_struct
*thread
,
530 unsigned long orig_msr
)
534 /* We really can't be interrupted here as the TEXASR registers can't
535 * change and later in the trecheckpoint code, we have a userspace R1.
536 * So let's hard disable over this region.
538 local_irq_save(flags
);
541 /* The TM SPRs are restored here, so that TEXASR.FS can be set
542 * before the trecheckpoint and no explosion occurs.
544 tm_restore_sprs(thread
);
546 __tm_recheckpoint(thread
, orig_msr
);
548 local_irq_restore(flags
);
551 static inline void tm_recheckpoint_new_task(struct task_struct
*new)
555 if (!cpu_has_feature(CPU_FTR_TM
))
558 /* Recheckpoint the registers of the thread we're about to switch to.
560 * If the task was using FP, we non-lazily reload both the original and
561 * the speculative FP register states. This is because the kernel
562 * doesn't see if/when a TM rollback occurs, so if we take an FP
563 * unavoidable later, we are unable to determine which set of FP regs
564 * need to be restored.
566 if (!new->thread
.regs
)
569 if (!MSR_TM_ACTIVE(new->thread
.regs
->msr
)){
570 tm_restore_sprs(&new->thread
);
573 msr
= new->thread
.tm_orig_msr
;
574 /* Recheckpoint to restore original checkpointed register state. */
575 TM_DEBUG("*** tm_recheckpoint of pid %d "
576 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
577 new->pid
, new->thread
.regs
->msr
, msr
);
579 /* This loads the checkpointed FP/VEC state, if used */
580 tm_recheckpoint(&new->thread
, msr
);
582 /* This loads the speculative FP/VEC state, if used */
584 do_load_up_transact_fpu(&new->thread
);
585 new->thread
.regs
->msr
|=
586 (MSR_FP
| new->thread
.fpexc_mode
);
588 #ifdef CONFIG_ALTIVEC
590 do_load_up_transact_altivec(&new->thread
);
591 new->thread
.regs
->msr
|= MSR_VEC
;
594 /* We may as well turn on VSX too since all the state is restored now */
596 new->thread
.regs
->msr
|= MSR_VSX
;
598 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
599 "(kernel msr 0x%lx)\n",
603 static inline void __switch_to_tm(struct task_struct
*prev
)
605 if (cpu_has_feature(CPU_FTR_TM
)) {
607 tm_reclaim_task(prev
);
611 #define tm_recheckpoint_new_task(new)
612 #define __switch_to_tm(prev)
613 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
615 struct task_struct
*__switch_to(struct task_struct
*prev
,
616 struct task_struct
*new)
618 struct thread_struct
*new_thread
, *old_thread
;
620 struct task_struct
*last
;
621 #ifdef CONFIG_PPC_BOOK3S_64
622 struct ppc64_tlb_batch
*batch
;
625 /* Back up the TAR across context switches.
626 * Note that the TAR is not available for use in the kernel. (To
627 * provide this, the TAR should be backed up/restored on exception
628 * entry/exit instead, and be in pt_regs. FIXME, this should be in
629 * pt_regs anyway (for debug).)
630 * Save the TAR here before we do treclaim/trecheckpoint as these
631 * will change the TAR.
633 save_tar(&prev
->thread
);
635 __switch_to_tm(prev
);
638 /* avoid complexity of lazy save/restore of fpu
639 * by just saving it every time we switch out if
640 * this task used the fpu during the last quantum.
642 * If it tries to use the fpu again, it'll trap and
643 * reload its fp regs. So we don't have to do a restore
644 * every switch, just a save.
647 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_FP
))
649 #ifdef CONFIG_ALTIVEC
651 * If the previous thread used altivec in the last quantum
652 * (thus changing altivec regs) then save them.
653 * We used to check the VRSAVE register but not all apps
654 * set it, so we don't rely on it now (and in fact we need
655 * to save & restore VSCR even if VRSAVE == 0). -- paulus
657 * On SMP we always save/restore altivec regs just to avoid the
658 * complexity of changing processors.
661 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VEC
))
662 giveup_altivec(prev
);
663 #endif /* CONFIG_ALTIVEC */
665 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VSX
))
666 /* VMX and FPU registers are already save here */
668 #endif /* CONFIG_VSX */
671 * If the previous thread used spe in the last quantum
672 * (thus changing spe regs) then save them.
674 * On SMP we always save/restore spe regs just to avoid the
675 * complexity of changing processors.
677 if ((prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_SPE
)))
679 #endif /* CONFIG_SPE */
681 #else /* CONFIG_SMP */
682 #ifdef CONFIG_ALTIVEC
683 /* Avoid the trap. On smp this this never happens since
684 * we don't set last_task_used_altivec -- Cort
686 if (new->thread
.regs
&& last_task_used_altivec
== new)
687 new->thread
.regs
->msr
|= MSR_VEC
;
688 #endif /* CONFIG_ALTIVEC */
690 if (new->thread
.regs
&& last_task_used_vsx
== new)
691 new->thread
.regs
->msr
|= MSR_VSX
;
692 #endif /* CONFIG_VSX */
694 /* Avoid the trap. On smp this this never happens since
695 * we don't set last_task_used_spe
697 if (new->thread
.regs
&& last_task_used_spe
== new)
698 new->thread
.regs
->msr
|= MSR_SPE
;
699 #endif /* CONFIG_SPE */
701 #endif /* CONFIG_SMP */
703 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
704 switch_booke_debug_regs(&new->thread
);
707 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
710 #ifndef CONFIG_HAVE_HW_BREAKPOINT
711 if (unlikely(hw_brk_match(&__get_cpu_var(current_brk
), &new->thread
.hw_brk
)))
712 set_breakpoint(&new->thread
.hw_brk
);
713 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
717 new_thread
= &new->thread
;
718 old_thread
= ¤t
->thread
;
722 * Collect processor utilization data per process
724 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
725 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
726 long unsigned start_tb
, current_tb
;
727 start_tb
= old_thread
->start_tb
;
728 cu
->current_tb
= current_tb
= mfspr(SPRN_PURR
);
729 old_thread
->accum_tb
+= (current_tb
- start_tb
);
730 new_thread
->start_tb
= current_tb
;
732 #endif /* CONFIG_PPC64 */
734 #ifdef CONFIG_PPC_BOOK3S_64
735 batch
= &__get_cpu_var(ppc64_tlb_batch
);
737 current_thread_info()->local_flags
|= _TLF_LAZY_MMU
;
739 __flush_tlb_pending(batch
);
742 #endif /* CONFIG_PPC_BOOK3S_64 */
744 local_irq_save(flags
);
747 * We can't take a PMU exception inside _switch() since there is a
748 * window where the kernel stack SLB and the kernel stack are out
749 * of sync. Hard disable here.
753 tm_recheckpoint_new_task(new);
755 last
= _switch(old_thread
, new_thread
);
757 #ifdef CONFIG_PPC_BOOK3S_64
758 if (current_thread_info()->local_flags
& _TLF_LAZY_MMU
) {
759 current_thread_info()->local_flags
&= ~_TLF_LAZY_MMU
;
760 batch
= &__get_cpu_var(ppc64_tlb_batch
);
763 #endif /* CONFIG_PPC_BOOK3S_64 */
765 local_irq_restore(flags
);
770 static int instructions_to_print
= 16;
772 static void show_instructions(struct pt_regs
*regs
)
775 unsigned long pc
= regs
->nip
- (instructions_to_print
* 3 / 4 *
778 printk("Instruction dump:");
780 for (i
= 0; i
< instructions_to_print
; i
++) {
786 #if !defined(CONFIG_BOOKE)
787 /* If executing with the IMMU off, adjust pc rather
788 * than print XXXXXXXX.
790 if (!(regs
->msr
& MSR_IR
))
791 pc
= (unsigned long)phys_to_virt(pc
);
794 /* We use __get_user here *only* to avoid an OOPS on a
795 * bad address because the pc *should* only be a
798 if (!__kernel_text_address(pc
) ||
799 __get_user(instr
, (unsigned int __user
*)pc
)) {
800 printk(KERN_CONT
"XXXXXXXX ");
803 printk(KERN_CONT
"<%08x> ", instr
);
805 printk(KERN_CONT
"%08x ", instr
);
814 static struct regbit
{
818 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
847 static void printbits(unsigned long val
, struct regbit
*bits
)
849 const char *sep
= "";
852 for (; bits
->bit
; ++bits
)
853 if (val
& bits
->bit
) {
854 printk("%s%s", sep
, bits
->name
);
862 #define REGS_PER_LINE 4
863 #define LAST_VOLATILE 13
866 #define REGS_PER_LINE 8
867 #define LAST_VOLATILE 12
870 void show_regs(struct pt_regs
* regs
)
874 show_regs_print_info(KERN_DEFAULT
);
876 printk("NIP: "REG
" LR: "REG
" CTR: "REG
"\n",
877 regs
->nip
, regs
->link
, regs
->ctr
);
878 printk("REGS: %p TRAP: %04lx %s (%s)\n",
879 regs
, regs
->trap
, print_tainted(), init_utsname()->release
);
880 printk("MSR: "REG
" ", regs
->msr
);
881 printbits(regs
->msr
, msr_bits
);
882 printk(" CR: %08lx XER: %08lx\n", regs
->ccr
, regs
->xer
);
884 printk("SOFTE: %ld\n", regs
->softe
);
887 if ((regs
->trap
!= 0xc00) && cpu_has_feature(CPU_FTR_CFAR
))
888 printk("CFAR: "REG
"\n", regs
->orig_gpr3
);
889 if (trap
== 0x300 || trap
== 0x600)
890 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
891 printk("DEAR: "REG
", ESR: "REG
"\n", regs
->dar
, regs
->dsisr
);
893 printk("DAR: "REG
", DSISR: %08lx\n", regs
->dar
, regs
->dsisr
);
896 for (i
= 0; i
< 32; i
++) {
897 if ((i
% REGS_PER_LINE
) == 0)
898 printk("\nGPR%02d: ", i
);
899 printk(REG
" ", regs
->gpr
[i
]);
900 if (i
== LAST_VOLATILE
&& !FULL_REGS(regs
))
904 #ifdef CONFIG_KALLSYMS
906 * Lookup NIP late so we have the best change of getting the
907 * above info out without failing
909 printk("NIP ["REG
"] %pS\n", regs
->nip
, (void *)regs
->nip
);
910 printk("LR ["REG
"] %pS\n", regs
->link
, (void *)regs
->link
);
912 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
913 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch
);
915 show_stack(current
, (unsigned long *) regs
->gpr
[1]);
916 if (!user_mode(regs
))
917 show_instructions(regs
);
920 void exit_thread(void)
922 discard_lazy_cpu_state();
925 void flush_thread(void)
927 discard_lazy_cpu_state();
929 #ifdef CONFIG_HAVE_HW_BREAKPOINT
930 flush_ptrace_hw_breakpoint(current
);
931 #else /* CONFIG_HAVE_HW_BREAKPOINT */
932 set_debug_reg_defaults(¤t
->thread
);
933 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
937 release_thread(struct task_struct
*t
)
942 * this gets called so that we can store coprocessor state into memory and
943 * copy the current task into the new thread.
945 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
947 flush_fp_to_thread(src
);
948 flush_altivec_to_thread(src
);
949 flush_vsx_to_thread(src
);
950 flush_spe_to_thread(src
);
952 * Flush TM state out so we can copy it. __switch_to_tm() does this
953 * flush but it removes the checkpointed state from the current CPU and
954 * transitions the CPU out of TM mode. Hence we need to call
955 * tm_recheckpoint_new_task() (on the same task) to restore the
956 * checkpointed state back and the TM mode.
959 tm_recheckpoint_new_task(src
);
968 extern unsigned long dscr_default
; /* defined in arch/powerpc/kernel/sysfs.c */
970 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
971 unsigned long arg
, struct task_struct
*p
)
973 struct pt_regs
*childregs
, *kregs
;
974 extern void ret_from_fork(void);
975 extern void ret_from_kernel_thread(void);
977 unsigned long sp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
980 sp
-= sizeof(struct pt_regs
);
981 childregs
= (struct pt_regs
*) sp
;
982 if (unlikely(p
->flags
& PF_KTHREAD
)) {
983 struct thread_info
*ti
= (void *)task_stack_page(p
);
984 memset(childregs
, 0, sizeof(struct pt_regs
));
985 childregs
->gpr
[1] = sp
+ sizeof(struct pt_regs
);
986 childregs
->gpr
[14] = usp
; /* function */
988 clear_tsk_thread_flag(p
, TIF_32BIT
);
989 childregs
->softe
= 1;
991 childregs
->gpr
[15] = arg
;
992 p
->thread
.regs
= NULL
; /* no user register state */
993 ti
->flags
|= _TIF_RESTOREALL
;
994 f
= ret_from_kernel_thread
;
996 struct pt_regs
*regs
= current_pt_regs();
997 CHECK_FULL_REGS(regs
);
1000 childregs
->gpr
[1] = usp
;
1001 p
->thread
.regs
= childregs
;
1002 childregs
->gpr
[3] = 0; /* Result from fork() */
1003 if (clone_flags
& CLONE_SETTLS
) {
1005 if (!is_32bit_task())
1006 childregs
->gpr
[13] = childregs
->gpr
[6];
1009 childregs
->gpr
[2] = childregs
->gpr
[6];
1014 sp
-= STACK_FRAME_OVERHEAD
;
1017 * The way this works is that at some point in the future
1018 * some task will call _switch to switch to the new task.
1019 * That will pop off the stack frame created below and start
1020 * the new task running at ret_from_fork. The new task will
1021 * do some house keeping and then return from the fork or clone
1022 * system call, using the stack frame created above.
1024 ((unsigned long *)sp
)[0] = 0;
1025 sp
-= sizeof(struct pt_regs
);
1026 kregs
= (struct pt_regs
*) sp
;
1027 sp
-= STACK_FRAME_OVERHEAD
;
1029 p
->thread
.ksp_limit
= (unsigned long)task_stack_page(p
) +
1030 _ALIGN_UP(sizeof(struct thread_info
), 16);
1032 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1033 p
->thread
.ptrace_bps
[0] = NULL
;
1036 #ifdef CONFIG_PPC_STD_MMU_64
1037 if (mmu_has_feature(MMU_FTR_SLB
)) {
1038 unsigned long sp_vsid
;
1039 unsigned long llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
1041 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1042 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_1T
)
1043 << SLB_VSID_SHIFT_1T
;
1045 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_256M
)
1047 sp_vsid
|= SLB_VSID_KERNEL
| llp
;
1048 p
->thread
.ksp_vsid
= sp_vsid
;
1050 #endif /* CONFIG_PPC_STD_MMU_64 */
1052 if (cpu_has_feature(CPU_FTR_DSCR
)) {
1053 p
->thread
.dscr_inherit
= current
->thread
.dscr_inherit
;
1054 p
->thread
.dscr
= current
->thread
.dscr
;
1056 if (cpu_has_feature(CPU_FTR_HAS_PPR
))
1057 p
->thread
.ppr
= INIT_PPR
;
1060 * The PPC64 ABI makes use of a TOC to contain function
1061 * pointers. The function (ret_from_except) is actually a pointer
1062 * to the TOC entry. The first entry is a pointer to the actual
1066 kregs
->nip
= *((unsigned long *)f
);
1068 kregs
->nip
= (unsigned long)f
;
1074 * Set up a thread for executing a new program
1076 void start_thread(struct pt_regs
*regs
, unsigned long start
, unsigned long sp
)
1079 unsigned long load_addr
= regs
->gpr
[2]; /* saved by ELF_PLAT_INIT */
1083 * If we exec out of a kernel thread then thread.regs will not be
1086 if (!current
->thread
.regs
) {
1087 struct pt_regs
*regs
= task_stack_page(current
) + THREAD_SIZE
;
1088 current
->thread
.regs
= regs
- 1;
1091 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1093 * Clear any transactional state, we're exec()ing. The cause is
1094 * not important as there will never be a recheckpoint so it's not
1097 if (MSR_TM_SUSPENDED(mfmsr()))
1098 tm_reclaim_current(0);
1101 memset(regs
->gpr
, 0, sizeof(regs
->gpr
));
1109 * We have just cleared all the nonvolatile GPRs, so make
1110 * FULL_REGS(regs) return true. This is necessary to allow
1111 * ptrace to examine the thread immediately after exec.
1118 regs
->msr
= MSR_USER
;
1120 if (!is_32bit_task()) {
1121 unsigned long entry
, toc
;
1123 /* start is a relocated pointer to the function descriptor for
1124 * the elf _start routine. The first entry in the function
1125 * descriptor is the entry address of _start and the second
1126 * entry is the TOC value we need to use.
1128 __get_user(entry
, (unsigned long __user
*)start
);
1129 __get_user(toc
, (unsigned long __user
*)start
+1);
1131 /* Check whether the e_entry function descriptor entries
1132 * need to be relocated before we can use them.
1134 if (load_addr
!= 0) {
1140 regs
->msr
= MSR_USER64
;
1144 regs
->msr
= MSR_USER32
;
1147 discard_lazy_cpu_state();
1149 current
->thread
.used_vsr
= 0;
1151 memset(current
->thread
.fpr
, 0, sizeof(current
->thread
.fpr
));
1152 current
->thread
.fpscr
.val
= 0;
1153 #ifdef CONFIG_ALTIVEC
1154 memset(current
->thread
.vr
, 0, sizeof(current
->thread
.vr
));
1155 memset(¤t
->thread
.vscr
, 0, sizeof(current
->thread
.vscr
));
1156 current
->thread
.vscr
.u
[3] = 0x00010000; /* Java mode disabled */
1157 current
->thread
.vrsave
= 0;
1158 current
->thread
.used_vr
= 0;
1159 #endif /* CONFIG_ALTIVEC */
1161 memset(current
->thread
.evr
, 0, sizeof(current
->thread
.evr
));
1162 current
->thread
.acc
= 0;
1163 current
->thread
.spefscr
= 0;
1164 current
->thread
.used_spe
= 0;
1165 #endif /* CONFIG_SPE */
1166 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1167 if (cpu_has_feature(CPU_FTR_TM
))
1168 regs
->msr
|= MSR_TM
;
1169 current
->thread
.tm_tfhar
= 0;
1170 current
->thread
.tm_texasr
= 0;
1171 current
->thread
.tm_tfiar
= 0;
1172 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1175 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1176 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1178 int set_fpexc_mode(struct task_struct
*tsk
, unsigned int val
)
1180 struct pt_regs
*regs
= tsk
->thread
.regs
;
1182 /* This is a bit hairy. If we are an SPE enabled processor
1183 * (have embedded fp) we store the IEEE exception enable flags in
1184 * fpexc_mode. fpexc_mode is also used for setting FP exception
1185 * mode (asyn, precise, disabled) for 'Classic' FP. */
1186 if (val
& PR_FP_EXC_SW_ENABLE
) {
1188 if (cpu_has_feature(CPU_FTR_SPE
)) {
1189 tsk
->thread
.fpexc_mode
= val
&
1190 (PR_FP_EXC_SW_ENABLE
| PR_FP_ALL_EXCEPT
);
1200 /* on a CONFIG_SPE this does not hurt us. The bits that
1201 * __pack_fe01 use do not overlap with bits used for
1202 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1203 * on CONFIG_SPE implementations are reserved so writing to
1204 * them does not change anything */
1205 if (val
> PR_FP_EXC_PRECISE
)
1207 tsk
->thread
.fpexc_mode
= __pack_fe01(val
);
1208 if (regs
!= NULL
&& (regs
->msr
& MSR_FP
) != 0)
1209 regs
->msr
= (regs
->msr
& ~(MSR_FE0
|MSR_FE1
))
1210 | tsk
->thread
.fpexc_mode
;
1214 int get_fpexc_mode(struct task_struct
*tsk
, unsigned long adr
)
1218 if (tsk
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
1220 if (cpu_has_feature(CPU_FTR_SPE
))
1221 val
= tsk
->thread
.fpexc_mode
;
1228 val
= __unpack_fe01(tsk
->thread
.fpexc_mode
);
1229 return put_user(val
, (unsigned int __user
*) adr
);
1232 int set_endian(struct task_struct
*tsk
, unsigned int val
)
1234 struct pt_regs
*regs
= tsk
->thread
.regs
;
1236 if ((val
== PR_ENDIAN_LITTLE
&& !cpu_has_feature(CPU_FTR_REAL_LE
)) ||
1237 (val
== PR_ENDIAN_PPC_LITTLE
&& !cpu_has_feature(CPU_FTR_PPC_LE
)))
1243 if (val
== PR_ENDIAN_BIG
)
1244 regs
->msr
&= ~MSR_LE
;
1245 else if (val
== PR_ENDIAN_LITTLE
|| val
== PR_ENDIAN_PPC_LITTLE
)
1246 regs
->msr
|= MSR_LE
;
1253 int get_endian(struct task_struct
*tsk
, unsigned long adr
)
1255 struct pt_regs
*regs
= tsk
->thread
.regs
;
1258 if (!cpu_has_feature(CPU_FTR_PPC_LE
) &&
1259 !cpu_has_feature(CPU_FTR_REAL_LE
))
1265 if (regs
->msr
& MSR_LE
) {
1266 if (cpu_has_feature(CPU_FTR_REAL_LE
))
1267 val
= PR_ENDIAN_LITTLE
;
1269 val
= PR_ENDIAN_PPC_LITTLE
;
1271 val
= PR_ENDIAN_BIG
;
1273 return put_user(val
, (unsigned int __user
*)adr
);
1276 int set_unalign_ctl(struct task_struct
*tsk
, unsigned int val
)
1278 tsk
->thread
.align_ctl
= val
;
1282 int get_unalign_ctl(struct task_struct
*tsk
, unsigned long adr
)
1284 return put_user(tsk
->thread
.align_ctl
, (unsigned int __user
*)adr
);
1287 static inline int valid_irq_stack(unsigned long sp
, struct task_struct
*p
,
1288 unsigned long nbytes
)
1290 unsigned long stack_page
;
1291 unsigned long cpu
= task_cpu(p
);
1294 * Avoid crashing if the stack has overflowed and corrupted
1295 * task_cpu(p), which is in the thread_info struct.
1297 if (cpu
< NR_CPUS
&& cpu_possible(cpu
)) {
1298 stack_page
= (unsigned long) hardirq_ctx
[cpu
];
1299 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1300 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1303 stack_page
= (unsigned long) softirq_ctx
[cpu
];
1304 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1305 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1311 int validate_sp(unsigned long sp
, struct task_struct
*p
,
1312 unsigned long nbytes
)
1314 unsigned long stack_page
= (unsigned long)task_stack_page(p
);
1316 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1317 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1320 return valid_irq_stack(sp
, p
, nbytes
);
1323 EXPORT_SYMBOL(validate_sp
);
1325 unsigned long get_wchan(struct task_struct
*p
)
1327 unsigned long ip
, sp
;
1330 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
1334 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
1338 sp
= *(unsigned long *)sp
;
1339 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
1342 ip
= ((unsigned long *)sp
)[STACK_FRAME_LR_SAVE
];
1343 if (!in_sched_functions(ip
))
1346 } while (count
++ < 16);
1350 static int kstack_depth_to_print
= CONFIG_PRINT_STACK_DEPTH
;
1352 void show_stack(struct task_struct
*tsk
, unsigned long *stack
)
1354 unsigned long sp
, ip
, lr
, newsp
;
1357 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1358 int curr_frame
= current
->curr_ret_stack
;
1359 extern void return_to_handler(void);
1360 unsigned long rth
= (unsigned long)return_to_handler
;
1361 unsigned long mrth
= -1;
1363 extern void mod_return_to_handler(void);
1364 rth
= *(unsigned long *)rth
;
1365 mrth
= (unsigned long)mod_return_to_handler
;
1366 mrth
= *(unsigned long *)mrth
;
1370 sp
= (unsigned long) stack
;
1375 asm("mr %0,1" : "=r" (sp
));
1377 sp
= tsk
->thread
.ksp
;
1381 printk("Call Trace:\n");
1383 if (!validate_sp(sp
, tsk
, STACK_FRAME_OVERHEAD
))
1386 stack
= (unsigned long *) sp
;
1388 ip
= stack
[STACK_FRAME_LR_SAVE
];
1389 if (!firstframe
|| ip
!= lr
) {
1390 printk("["REG
"] ["REG
"] %pS", sp
, ip
, (void *)ip
);
1391 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1392 if ((ip
== rth
|| ip
== mrth
) && curr_frame
>= 0) {
1394 (void *)current
->ret_stack
[curr_frame
].ret
);
1399 printk(" (unreliable)");
1405 * See if this is an exception frame.
1406 * We look for the "regshere" marker in the current frame.
1408 if (validate_sp(sp
, tsk
, STACK_INT_FRAME_SIZE
)
1409 && stack
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
1410 struct pt_regs
*regs
= (struct pt_regs
*)
1411 (sp
+ STACK_FRAME_OVERHEAD
);
1413 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1414 regs
->trap
, (void *)regs
->nip
, (void *)lr
);
1419 } while (count
++ < kstack_depth_to_print
);
1423 /* Called with hard IRQs off */
1424 void notrace
__ppc64_runlatch_on(void)
1426 struct thread_info
*ti
= current_thread_info();
1429 ctrl
= mfspr(SPRN_CTRLF
);
1430 ctrl
|= CTRL_RUNLATCH
;
1431 mtspr(SPRN_CTRLT
, ctrl
);
1433 ti
->local_flags
|= _TLF_RUNLATCH
;
1436 /* Called with hard IRQs off */
1437 void notrace
__ppc64_runlatch_off(void)
1439 struct thread_info
*ti
= current_thread_info();
1442 ti
->local_flags
&= ~_TLF_RUNLATCH
;
1444 ctrl
= mfspr(SPRN_CTRLF
);
1445 ctrl
&= ~CTRL_RUNLATCH
;
1446 mtspr(SPRN_CTRLT
, ctrl
);
1448 #endif /* CONFIG_PPC64 */
1450 unsigned long arch_align_stack(unsigned long sp
)
1452 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
1453 sp
-= get_random_int() & ~PAGE_MASK
;
1457 static inline unsigned long brk_rnd(void)
1459 unsigned long rnd
= 0;
1461 /* 8MB for 32bit, 1GB for 64bit */
1462 if (is_32bit_task())
1463 rnd
= (long)(get_random_int() % (1<<(23-PAGE_SHIFT
)));
1465 rnd
= (long)(get_random_int() % (1<<(30-PAGE_SHIFT
)));
1467 return rnd
<< PAGE_SHIFT
;
1470 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
1472 unsigned long base
= mm
->brk
;
1475 #ifdef CONFIG_PPC_STD_MMU_64
1477 * If we are using 1TB segments and we are allowed to randomise
1478 * the heap, we can put it above 1TB so it is backed by a 1TB
1479 * segment. Otherwise the heap will be in the bottom 1TB
1480 * which always uses 256MB segments and this may result in a
1481 * performance penalty.
1483 if (!is_32bit_task() && (mmu_highuser_ssize
== MMU_SEGSIZE_1T
))
1484 base
= max_t(unsigned long, mm
->brk
, 1UL << SID_SHIFT_1T
);
1487 ret
= PAGE_ALIGN(base
+ brk_rnd());
1495 unsigned long randomize_et_dyn(unsigned long base
)
1497 unsigned long ret
= PAGE_ALIGN(base
+ brk_rnd());