powerpc: Keep thread.dscr and thread.dscr_inherit in sync
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / process.c
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
14cf11af
PM
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
14cf11af
PM
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h>
30#include <linux/init_task.h>
4b16f8e2 31#include <linux/export.h>
14cf11af
PM
32#include <linux/kallsyms.h>
33#include <linux/mqueue.h>
34#include <linux/hardirq.h>
06d67d54 35#include <linux/utsname.h>
6794c782 36#include <linux/ftrace.h>
79741dd3 37#include <linux/kernel_stat.h>
d839088c
AB
38#include <linux/personality.h>
39#include <linux/random.h>
5aae8a53 40#include <linux/hw_breakpoint.h>
14cf11af
PM
41
42#include <asm/pgtable.h>
43#include <asm/uaccess.h>
14cf11af
PM
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/mmu.h>
47#include <asm/prom.h>
76032de8 48#include <asm/machdep.h>
c6622f63 49#include <asm/time.h>
ae3a197e 50#include <asm/runlatch.h>
a7f31841 51#include <asm/syscalls.h>
ae3a197e
DH
52#include <asm/switch_to.h>
53#include <asm/debug.h>
06d67d54
PM
54#ifdef CONFIG_PPC64
55#include <asm/firmware.h>
06d67d54 56#endif
d6a61bfc
LM
57#include <linux/kprobes.h>
58#include <linux/kdebug.h>
14cf11af
PM
59
60extern unsigned long _get_SP(void);
61
62#ifndef CONFIG_SMP
63struct task_struct *last_task_used_math = NULL;
64struct task_struct *last_task_used_altivec = NULL;
ce48b210 65struct task_struct *last_task_used_vsx = NULL;
14cf11af
PM
66struct task_struct *last_task_used_spe = NULL;
67#endif
68
14cf11af
PM
69/*
70 * Make sure the floating-point register state in the
71 * the thread_struct is up to date for task tsk.
72 */
73void flush_fp_to_thread(struct task_struct *tsk)
74{
75 if (tsk->thread.regs) {
76 /*
77 * We need to disable preemption here because if we didn't,
78 * another process could get scheduled after the regs->msr
79 * test but before we have finished saving the FP registers
80 * to the thread_struct. That process could take over the
81 * FPU, and then when we get scheduled again we would store
82 * bogus values for the remaining FP registers.
83 */
84 preempt_disable();
85 if (tsk->thread.regs->msr & MSR_FP) {
86#ifdef CONFIG_SMP
87 /*
88 * This should only ever be called for current or
89 * for a stopped child process. Since we save away
90 * the FP register state on context switch on SMP,
91 * there is something wrong if a stopped child appears
92 * to still have its FP state in the CPU registers.
93 */
94 BUG_ON(tsk != current);
95#endif
0ee6c15e 96 giveup_fpu(tsk);
14cf11af
PM
97 }
98 preempt_enable();
99 }
100}
de56a948 101EXPORT_SYMBOL_GPL(flush_fp_to_thread);
14cf11af
PM
102
103void enable_kernel_fp(void)
104{
105 WARN_ON(preemptible());
106
107#ifdef CONFIG_SMP
108 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
109 giveup_fpu(current);
110 else
111 giveup_fpu(NULL); /* just enables FP for kernel */
112#else
113 giveup_fpu(last_task_used_math);
114#endif /* CONFIG_SMP */
115}
116EXPORT_SYMBOL(enable_kernel_fp);
117
14cf11af
PM
118#ifdef CONFIG_ALTIVEC
119void enable_kernel_altivec(void)
120{
121 WARN_ON(preemptible());
122
123#ifdef CONFIG_SMP
124 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125 giveup_altivec(current);
126 else
35000870 127 giveup_altivec_notask();
14cf11af
PM
128#else
129 giveup_altivec(last_task_used_altivec);
130#endif /* CONFIG_SMP */
131}
132EXPORT_SYMBOL(enable_kernel_altivec);
133
134/*
135 * Make sure the VMX/Altivec register state in the
136 * the thread_struct is up to date for task tsk.
137 */
138void flush_altivec_to_thread(struct task_struct *tsk)
139{
140 if (tsk->thread.regs) {
141 preempt_disable();
142 if (tsk->thread.regs->msr & MSR_VEC) {
143#ifdef CONFIG_SMP
144 BUG_ON(tsk != current);
145#endif
0ee6c15e 146 giveup_altivec(tsk);
14cf11af
PM
147 }
148 preempt_enable();
149 }
150}
de56a948 151EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
14cf11af
PM
152#endif /* CONFIG_ALTIVEC */
153
ce48b210
MN
154#ifdef CONFIG_VSX
155#if 0
156/* not currently used, but some crazy RAID module might want to later */
157void enable_kernel_vsx(void)
158{
159 WARN_ON(preemptible());
160
161#ifdef CONFIG_SMP
162 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163 giveup_vsx(current);
164 else
165 giveup_vsx(NULL); /* just enable vsx for kernel - force */
166#else
167 giveup_vsx(last_task_used_vsx);
168#endif /* CONFIG_SMP */
169}
170EXPORT_SYMBOL(enable_kernel_vsx);
171#endif
172
7c292170
MN
173void giveup_vsx(struct task_struct *tsk)
174{
175 giveup_fpu(tsk);
176 giveup_altivec(tsk);
177 __giveup_vsx(tsk);
178}
179
ce48b210
MN
180void flush_vsx_to_thread(struct task_struct *tsk)
181{
182 if (tsk->thread.regs) {
183 preempt_disable();
184 if (tsk->thread.regs->msr & MSR_VSX) {
185#ifdef CONFIG_SMP
186 BUG_ON(tsk != current);
187#endif
188 giveup_vsx(tsk);
189 }
190 preempt_enable();
191 }
192}
de56a948 193EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
ce48b210
MN
194#endif /* CONFIG_VSX */
195
14cf11af
PM
196#ifdef CONFIG_SPE
197
198void enable_kernel_spe(void)
199{
200 WARN_ON(preemptible());
201
202#ifdef CONFIG_SMP
203 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
204 giveup_spe(current);
205 else
206 giveup_spe(NULL); /* just enable SPE for kernel - force */
207#else
208 giveup_spe(last_task_used_spe);
209#endif /* __SMP __ */
210}
211EXPORT_SYMBOL(enable_kernel_spe);
212
213void flush_spe_to_thread(struct task_struct *tsk)
214{
215 if (tsk->thread.regs) {
216 preempt_disable();
217 if (tsk->thread.regs->msr & MSR_SPE) {
218#ifdef CONFIG_SMP
219 BUG_ON(tsk != current);
220#endif
685659ee 221 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
0ee6c15e 222 giveup_spe(tsk);
14cf11af
PM
223 }
224 preempt_enable();
225 }
226}
14cf11af
PM
227#endif /* CONFIG_SPE */
228
5388fb10 229#ifndef CONFIG_SMP
48abec07
PM
230/*
231 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
232 * and the current task has some state, discard it.
233 */
5388fb10 234void discard_lazy_cpu_state(void)
48abec07 235{
48abec07
PM
236 preempt_disable();
237 if (last_task_used_math == current)
238 last_task_used_math = NULL;
239#ifdef CONFIG_ALTIVEC
240 if (last_task_used_altivec == current)
241 last_task_used_altivec = NULL;
242#endif /* CONFIG_ALTIVEC */
ce48b210
MN
243#ifdef CONFIG_VSX
244 if (last_task_used_vsx == current)
245 last_task_used_vsx = NULL;
246#endif /* CONFIG_VSX */
48abec07
PM
247#ifdef CONFIG_SPE
248 if (last_task_used_spe == current)
249 last_task_used_spe = NULL;
250#endif
251 preempt_enable();
48abec07 252}
5388fb10 253#endif /* CONFIG_SMP */
48abec07 254
3bffb652
DK
255#ifdef CONFIG_PPC_ADV_DEBUG_REGS
256void do_send_trap(struct pt_regs *regs, unsigned long address,
257 unsigned long error_code, int signal_code, int breakpt)
258{
259 siginfo_t info;
260
261 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
262 11, SIGSEGV) == NOTIFY_STOP)
263 return;
264
265 /* Deliver the signal to userspace */
266 info.si_signo = SIGTRAP;
267 info.si_errno = breakpt; /* breakpoint or watchpoint id */
268 info.si_code = signal_code;
269 info.si_addr = (void __user *)address;
270 force_sig_info(SIGTRAP, &info, current);
271}
272#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
d6a61bfc
LM
273void do_dabr(struct pt_regs *regs, unsigned long address,
274 unsigned long error_code)
275{
276 siginfo_t info;
277
278 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
279 11, SIGSEGV) == NOTIFY_STOP)
280 return;
281
282 if (debugger_dabr_match(regs))
283 return;
284
d6a61bfc
LM
285 /* Clear the DABR */
286 set_dabr(0);
287
288 /* Deliver the signal to userspace */
289 info.si_signo = SIGTRAP;
290 info.si_errno = 0;
291 info.si_code = TRAP_HWBKPT;
292 info.si_addr = (void __user *)address;
293 force_sig_info(SIGTRAP, &info, current);
294}
3bffb652 295#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
d6a61bfc 296
a2ceff5e
ME
297static DEFINE_PER_CPU(unsigned long, current_dabr);
298
3bffb652
DK
299#ifdef CONFIG_PPC_ADV_DEBUG_REGS
300/*
301 * Set the debug registers back to their default "safe" values.
302 */
303static void set_debug_reg_defaults(struct thread_struct *thread)
304{
305 thread->iac1 = thread->iac2 = 0;
306#if CONFIG_PPC_ADV_DEBUG_IACS > 2
307 thread->iac3 = thread->iac4 = 0;
308#endif
309 thread->dac1 = thread->dac2 = 0;
310#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
311 thread->dvc1 = thread->dvc2 = 0;
312#endif
313 thread->dbcr0 = 0;
314#ifdef CONFIG_BOOKE
315 /*
316 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
317 */
318 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
319 DBCR1_IAC3US | DBCR1_IAC4US;
320 /*
321 * Force Data Address Compare User/Supervisor bits to be User-only
322 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
323 */
324 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
325#else
326 thread->dbcr1 = 0;
327#endif
328}
329
330static void prime_debug_regs(struct thread_struct *thread)
331{
332 mtspr(SPRN_IAC1, thread->iac1);
333 mtspr(SPRN_IAC2, thread->iac2);
334#if CONFIG_PPC_ADV_DEBUG_IACS > 2
335 mtspr(SPRN_IAC3, thread->iac3);
336 mtspr(SPRN_IAC4, thread->iac4);
337#endif
338 mtspr(SPRN_DAC1, thread->dac1);
339 mtspr(SPRN_DAC2, thread->dac2);
340#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
341 mtspr(SPRN_DVC1, thread->dvc1);
342 mtspr(SPRN_DVC2, thread->dvc2);
343#endif
344 mtspr(SPRN_DBCR0, thread->dbcr0);
345 mtspr(SPRN_DBCR1, thread->dbcr1);
346#ifdef CONFIG_BOOKE
347 mtspr(SPRN_DBCR2, thread->dbcr2);
348#endif
349}
350/*
351 * Unless neither the old or new thread are making use of the
352 * debug registers, set the debug registers from the values
353 * stored in the new thread.
354 */
355static void switch_booke_debug_regs(struct thread_struct *new_thread)
356{
357 if ((current->thread.dbcr0 & DBCR0_IDM)
358 || (new_thread->dbcr0 & DBCR0_IDM))
359 prime_debug_regs(new_thread);
360}
361#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
e0780b72 362#ifndef CONFIG_HAVE_HW_BREAKPOINT
3bffb652
DK
363static void set_debug_reg_defaults(struct thread_struct *thread)
364{
365 if (thread->dabr) {
366 thread->dabr = 0;
367 set_dabr(0);
368 }
369}
e0780b72 370#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
3bffb652
DK
371#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
372
14cf11af
PM
373int set_dabr(unsigned long dabr)
374{
a2ceff5e
ME
375 __get_cpu_var(current_dabr) = dabr;
376
cab0af98
ME
377 if (ppc_md.set_dabr)
378 return ppc_md.set_dabr(dabr);
14cf11af 379
791cc501 380 /* XXX should we have a CPU_FTR_HAS_DABR ? */
172ae2e7 381#ifdef CONFIG_PPC_ADV_DEBUG_REGS
d6a61bfc 382 mtspr(SPRN_DAC1, dabr);
221c185d
DK
383#ifdef CONFIG_PPC_47x
384 isync();
385#endif
c6c9eace
BH
386#elif defined(CONFIG_PPC_BOOK3S)
387 mtspr(SPRN_DABR, dabr);
d6a61bfc
LM
388#endif
389
c6c9eace 390
cab0af98 391 return 0;
14cf11af
PM
392}
393
06d67d54
PM
394#ifdef CONFIG_PPC64
395DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
06d67d54 396#endif
14cf11af
PM
397
398struct task_struct *__switch_to(struct task_struct *prev,
399 struct task_struct *new)
400{
401 struct thread_struct *new_thread, *old_thread;
402 unsigned long flags;
403 struct task_struct *last;
d6bf29b4
PZ
404#ifdef CONFIG_PPC_BOOK3S_64
405 struct ppc64_tlb_batch *batch;
406#endif
14cf11af
PM
407
408#ifdef CONFIG_SMP
409 /* avoid complexity of lazy save/restore of fpu
410 * by just saving it every time we switch out if
411 * this task used the fpu during the last quantum.
412 *
413 * If it tries to use the fpu again, it'll trap and
414 * reload its fp regs. So we don't have to do a restore
415 * every switch, just a save.
416 * -- Cort
417 */
418 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
419 giveup_fpu(prev);
420#ifdef CONFIG_ALTIVEC
421 /*
422 * If the previous thread used altivec in the last quantum
423 * (thus changing altivec regs) then save them.
424 * We used to check the VRSAVE register but not all apps
425 * set it, so we don't rely on it now (and in fact we need
426 * to save & restore VSCR even if VRSAVE == 0). -- paulus
427 *
428 * On SMP we always save/restore altivec regs just to avoid the
429 * complexity of changing processors.
430 * -- Cort
431 */
432 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
433 giveup_altivec(prev);
14cf11af 434#endif /* CONFIG_ALTIVEC */
ce48b210
MN
435#ifdef CONFIG_VSX
436 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
7c292170
MN
437 /* VMX and FPU registers are already save here */
438 __giveup_vsx(prev);
ce48b210 439#endif /* CONFIG_VSX */
14cf11af
PM
440#ifdef CONFIG_SPE
441 /*
442 * If the previous thread used spe in the last quantum
443 * (thus changing spe regs) then save them.
444 *
445 * On SMP we always save/restore spe regs just to avoid the
446 * complexity of changing processors.
447 */
448 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
449 giveup_spe(prev);
c0c0d996
PM
450#endif /* CONFIG_SPE */
451
452#else /* CONFIG_SMP */
453#ifdef CONFIG_ALTIVEC
454 /* Avoid the trap. On smp this this never happens since
455 * we don't set last_task_used_altivec -- Cort
456 */
457 if (new->thread.regs && last_task_used_altivec == new)
458 new->thread.regs->msr |= MSR_VEC;
459#endif /* CONFIG_ALTIVEC */
ce48b210
MN
460#ifdef CONFIG_VSX
461 if (new->thread.regs && last_task_used_vsx == new)
462 new->thread.regs->msr |= MSR_VSX;
463#endif /* CONFIG_VSX */
c0c0d996 464#ifdef CONFIG_SPE
14cf11af
PM
465 /* Avoid the trap. On smp this this never happens since
466 * we don't set last_task_used_spe
467 */
468 if (new->thread.regs && last_task_used_spe == new)
469 new->thread.regs->msr |= MSR_SPE;
470#endif /* CONFIG_SPE */
c0c0d996 471
14cf11af
PM
472#endif /* CONFIG_SMP */
473
172ae2e7 474#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3bffb652 475 switch_booke_debug_regs(&new->thread);
c6c9eace 476#else
5aae8a53
P
477/*
478 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
479 * schedule DABR
480 */
481#ifndef CONFIG_HAVE_HW_BREAKPOINT
c6c9eace
BH
482 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
483 set_dabr(new->thread.dabr);
5aae8a53 484#endif /* CONFIG_HAVE_HW_BREAKPOINT */
d6a61bfc
LM
485#endif
486
c6c9eace 487
14cf11af
PM
488 new_thread = &new->thread;
489 old_thread = &current->thread;
06d67d54
PM
490
491#ifdef CONFIG_PPC64
492 /*
493 * Collect processor utilization data per process
494 */
495 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
496 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
497 long unsigned start_tb, current_tb;
498 start_tb = old_thread->start_tb;
499 cu->current_tb = current_tb = mfspr(SPRN_PURR);
500 old_thread->accum_tb += (current_tb - start_tb);
501 new_thread->start_tb = current_tb;
502 }
d6bf29b4
PZ
503#endif /* CONFIG_PPC64 */
504
505#ifdef CONFIG_PPC_BOOK3S_64
506 batch = &__get_cpu_var(ppc64_tlb_batch);
507 if (batch->active) {
508 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
509 if (batch->index)
510 __flush_tlb_pending(batch);
511 batch->active = 0;
512 }
513#endif /* CONFIG_PPC_BOOK3S_64 */
06d67d54 514
14cf11af 515 local_irq_save(flags);
c6622f63
PM
516
517 account_system_vtime(current);
81a3843f 518 account_process_vtime(current);
c6622f63 519
44387e9f
AB
520 /*
521 * We can't take a PMU exception inside _switch() since there is a
522 * window where the kernel stack SLB and the kernel stack are out
523 * of sync. Hard disable here.
524 */
525 hard_irq_disable();
14cf11af
PM
526 last = _switch(old_thread, new_thread);
527
d6bf29b4
PZ
528#ifdef CONFIG_PPC_BOOK3S_64
529 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
530 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
531 batch = &__get_cpu_var(ppc64_tlb_batch);
532 batch->active = 1;
533 }
534#endif /* CONFIG_PPC_BOOK3S_64 */
535
14cf11af
PM
536 local_irq_restore(flags);
537
538 return last;
539}
540
06d67d54
PM
541static int instructions_to_print = 16;
542
06d67d54
PM
543static void show_instructions(struct pt_regs *regs)
544{
545 int i;
546 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
547 sizeof(int));
548
549 printk("Instruction dump:");
550
551 for (i = 0; i < instructions_to_print; i++) {
552 int instr;
553
554 if (!(i % 8))
555 printk("\n");
556
0de2d820
SW
557#if !defined(CONFIG_BOOKE)
558 /* If executing with the IMMU off, adjust pc rather
559 * than print XXXXXXXX.
560 */
561 if (!(regs->msr & MSR_IR))
562 pc = (unsigned long)phys_to_virt(pc);
563#endif
564
af308377
SR
565 /* We use __get_user here *only* to avoid an OOPS on a
566 * bad address because the pc *should* only be a
567 * kernel address.
568 */
00ae36de
AB
569 if (!__kernel_text_address(pc) ||
570 __get_user(instr, (unsigned int __user *)pc)) {
40c8cefa 571 printk(KERN_CONT "XXXXXXXX ");
06d67d54
PM
572 } else {
573 if (regs->nip == pc)
40c8cefa 574 printk(KERN_CONT "<%08x> ", instr);
06d67d54 575 else
40c8cefa 576 printk(KERN_CONT "%08x ", instr);
06d67d54
PM
577 }
578
579 pc += sizeof(int);
580 }
581
582 printk("\n");
583}
584
585static struct regbit {
586 unsigned long bit;
587 const char *name;
588} msr_bits[] = {
3bfd0c9c
AB
589#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
590 {MSR_SF, "SF"},
591 {MSR_HV, "HV"},
592#endif
593 {MSR_VEC, "VEC"},
594 {MSR_VSX, "VSX"},
595#ifdef CONFIG_BOOKE
596 {MSR_CE, "CE"},
597#endif
06d67d54
PM
598 {MSR_EE, "EE"},
599 {MSR_PR, "PR"},
600 {MSR_FP, "FP"},
601 {MSR_ME, "ME"},
3bfd0c9c 602#ifdef CONFIG_BOOKE
1b98326b 603 {MSR_DE, "DE"},
3bfd0c9c
AB
604#else
605 {MSR_SE, "SE"},
606 {MSR_BE, "BE"},
607#endif
06d67d54
PM
608 {MSR_IR, "IR"},
609 {MSR_DR, "DR"},
3bfd0c9c
AB
610 {MSR_PMM, "PMM"},
611#ifndef CONFIG_BOOKE
612 {MSR_RI, "RI"},
613 {MSR_LE, "LE"},
614#endif
06d67d54
PM
615 {0, NULL}
616};
617
618static void printbits(unsigned long val, struct regbit *bits)
619{
620 const char *sep = "";
621
622 printk("<");
623 for (; bits->bit; ++bits)
624 if (val & bits->bit) {
625 printk("%s%s", sep, bits->name);
626 sep = ",";
627 }
628 printk(">");
629}
630
631#ifdef CONFIG_PPC64
f6f7dde3 632#define REG "%016lx"
06d67d54
PM
633#define REGS_PER_LINE 4
634#define LAST_VOLATILE 13
635#else
f6f7dde3 636#define REG "%08lx"
06d67d54
PM
637#define REGS_PER_LINE 8
638#define LAST_VOLATILE 12
639#endif
640
14cf11af
PM
641void show_regs(struct pt_regs * regs)
642{
643 int i, trap;
644
06d67d54
PM
645 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
646 regs->nip, regs->link, regs->ctr);
647 printk("REGS: %p TRAP: %04lx %s (%s)\n",
96b644bd 648 regs, regs->trap, print_tainted(), init_utsname()->release);
06d67d54
PM
649 printk("MSR: "REG" ", regs->msr);
650 printbits(regs->msr, msr_bits);
f6f7dde3 651 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
7230c564
BH
652#ifdef CONFIG_PPC64
653 printk("SOFTE: %ld\n", regs->softe);
654#endif
14cf11af 655 trap = TRAP(regs);
5115a026
MN
656 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
657 printk("CFAR: "REG"\n", regs->orig_gpr3);
14cf11af 658 if (trap == 0x300 || trap == 0x600)
ba28c9aa 659#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
14170789
KG
660 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
661#else
7071854b 662 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
14170789 663#endif
06d67d54 664 printk("TASK = %p[%d] '%s' THREAD: %p",
19c5870c 665 current, task_pid_nr(current), current->comm, task_thread_info(current));
14cf11af
PM
666
667#ifdef CONFIG_SMP
79ccd1be 668 printk(" CPU: %d", raw_smp_processor_id());
14cf11af
PM
669#endif /* CONFIG_SMP */
670
671 for (i = 0; i < 32; i++) {
06d67d54 672 if ((i % REGS_PER_LINE) == 0)
a2367194 673 printk("\nGPR%02d: ", i);
06d67d54
PM
674 printk(REG " ", regs->gpr[i]);
675 if (i == LAST_VOLATILE && !FULL_REGS(regs))
14cf11af
PM
676 break;
677 }
678 printk("\n");
679#ifdef CONFIG_KALLSYMS
680 /*
681 * Lookup NIP late so we have the best change of getting the
682 * above info out without failing
683 */
058c78f4
BH
684 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
685 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
14cf11af
PM
686#endif
687 show_stack(current, (unsigned long *) regs->gpr[1]);
06d67d54
PM
688 if (!user_mode(regs))
689 show_instructions(regs);
14cf11af
PM
690}
691
692void exit_thread(void)
693{
48abec07 694 discard_lazy_cpu_state();
14cf11af
PM
695}
696
697void flush_thread(void)
698{
48abec07 699 discard_lazy_cpu_state();
14cf11af 700
e0780b72 701#ifdef CONFIG_HAVE_HW_BREAKPOINT
5aae8a53 702 flush_ptrace_hw_breakpoint(current);
e0780b72 703#else /* CONFIG_HAVE_HW_BREAKPOINT */
3bffb652 704 set_debug_reg_defaults(&current->thread);
e0780b72 705#endif /* CONFIG_HAVE_HW_BREAKPOINT */
14cf11af
PM
706}
707
708void
709release_thread(struct task_struct *t)
710{
711}
712
713/*
55ccf3fe
SS
714 * this gets called so that we can store coprocessor state into memory and
715 * copy the current task into the new thread.
14cf11af 716 */
55ccf3fe 717int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
14cf11af 718{
55ccf3fe
SS
719 flush_fp_to_thread(src);
720 flush_altivec_to_thread(src);
721 flush_vsx_to_thread(src);
722 flush_spe_to_thread(src);
5aae8a53 723#ifdef CONFIG_HAVE_HW_BREAKPOINT
55ccf3fe 724 flush_ptrace_hw_breakpoint(src);
5aae8a53 725#endif /* CONFIG_HAVE_HW_BREAKPOINT */
55ccf3fe
SS
726
727 *dst = *src;
728 return 0;
14cf11af
PM
729}
730
731/*
732 * Copy a thread..
733 */
efcac658
AK
734extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
735
6f2c55b8 736int copy_thread(unsigned long clone_flags, unsigned long usp,
06d67d54
PM
737 unsigned long unused, struct task_struct *p,
738 struct pt_regs *regs)
14cf11af
PM
739{
740 struct pt_regs *childregs, *kregs;
741 extern void ret_from_fork(void);
0cec6fd1 742 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
14cf11af
PM
743
744 CHECK_FULL_REGS(regs);
745 /* Copy registers */
746 sp -= sizeof(struct pt_regs);
747 childregs = (struct pt_regs *) sp;
748 *childregs = *regs;
749 if ((childregs->msr & MSR_PR) == 0) {
750 /* for kernel thread, set `current' and stackptr in new task */
751 childregs->gpr[1] = sp + sizeof(struct pt_regs);
06d67d54 752#ifdef CONFIG_PPC32
14cf11af 753 childregs->gpr[2] = (unsigned long) p;
06d67d54 754#else
b5e2fc1c 755 clear_tsk_thread_flag(p, TIF_32BIT);
06d67d54 756#endif
14cf11af
PM
757 p->thread.regs = NULL; /* no user register state */
758 } else {
759 childregs->gpr[1] = usp;
760 p->thread.regs = childregs;
06d67d54
PM
761 if (clone_flags & CLONE_SETTLS) {
762#ifdef CONFIG_PPC64
9904b005 763 if (!is_32bit_task())
06d67d54
PM
764 childregs->gpr[13] = childregs->gpr[6];
765 else
766#endif
767 childregs->gpr[2] = childregs->gpr[6];
768 }
14cf11af
PM
769 }
770 childregs->gpr[3] = 0; /* Result from fork() */
771 sp -= STACK_FRAME_OVERHEAD;
14cf11af
PM
772
773 /*
774 * The way this works is that at some point in the future
775 * some task will call _switch to switch to the new task.
776 * That will pop off the stack frame created below and start
777 * the new task running at ret_from_fork. The new task will
778 * do some house keeping and then return from the fork or clone
779 * system call, using the stack frame created above.
780 */
781 sp -= sizeof(struct pt_regs);
782 kregs = (struct pt_regs *) sp;
783 sp -= STACK_FRAME_OVERHEAD;
784 p->thread.ksp = sp;
85218827
KG
785 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
786 _ALIGN_UP(sizeof(struct thread_info), 16);
14cf11af 787
94491685 788#ifdef CONFIG_PPC_STD_MMU_64
44ae3ab3 789 if (mmu_has_feature(MMU_FTR_SLB)) {
1189be65 790 unsigned long sp_vsid;
3c726f8d 791 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
06d67d54 792
44ae3ab3 793 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1189be65
PM
794 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
795 << SLB_VSID_SHIFT_1T;
796 else
797 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
798 << SLB_VSID_SHIFT;
3c726f8d 799 sp_vsid |= SLB_VSID_KERNEL | llp;
06d67d54
PM
800 p->thread.ksp_vsid = sp_vsid;
801 }
747bea91 802#endif /* CONFIG_PPC_STD_MMU_64 */
efcac658
AK
803#ifdef CONFIG_PPC64
804 if (cpu_has_feature(CPU_FTR_DSCR)) {
805 if (current->thread.dscr_inherit) {
806 p->thread.dscr_inherit = 1;
807 p->thread.dscr = current->thread.dscr;
808 } else if (0 != dscr_default) {
809 p->thread.dscr_inherit = 1;
810 p->thread.dscr = dscr_default;
811 } else {
812 p->thread.dscr_inherit = 0;
813 p->thread.dscr = 0;
814 }
815 }
816#endif
06d67d54
PM
817
818 /*
819 * The PPC64 ABI makes use of a TOC to contain function
820 * pointers. The function (ret_from_except) is actually a pointer
821 * to the TOC entry. The first entry is a pointer to the actual
822 * function.
823 */
747bea91 824#ifdef CONFIG_PPC64
06d67d54
PM
825 kregs->nip = *((unsigned long *)ret_from_fork);
826#else
827 kregs->nip = (unsigned long)ret_from_fork;
06d67d54 828#endif
14cf11af
PM
829
830 return 0;
831}
832
833/*
834 * Set up a thread for executing a new program
835 */
06d67d54 836void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
14cf11af 837{
90eac727
ME
838#ifdef CONFIG_PPC64
839 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
840#endif
841
06d67d54
PM
842 /*
843 * If we exec out of a kernel thread then thread.regs will not be
844 * set. Do it now.
845 */
846 if (!current->thread.regs) {
0cec6fd1
AV
847 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
848 current->thread.regs = regs - 1;
06d67d54
PM
849 }
850
14cf11af
PM
851 memset(regs->gpr, 0, sizeof(regs->gpr));
852 regs->ctr = 0;
853 regs->link = 0;
854 regs->xer = 0;
855 regs->ccr = 0;
14cf11af 856 regs->gpr[1] = sp;
06d67d54 857
474f8196
RM
858 /*
859 * We have just cleared all the nonvolatile GPRs, so make
860 * FULL_REGS(regs) return true. This is necessary to allow
861 * ptrace to examine the thread immediately after exec.
862 */
863 regs->trap &= ~1UL;
864
06d67d54
PM
865#ifdef CONFIG_PPC32
866 regs->mq = 0;
867 regs->nip = start;
14cf11af 868 regs->msr = MSR_USER;
06d67d54 869#else
9904b005 870 if (!is_32bit_task()) {
90eac727 871 unsigned long entry, toc;
06d67d54
PM
872
873 /* start is a relocated pointer to the function descriptor for
874 * the elf _start routine. The first entry in the function
875 * descriptor is the entry address of _start and the second
876 * entry is the TOC value we need to use.
877 */
878 __get_user(entry, (unsigned long __user *)start);
879 __get_user(toc, (unsigned long __user *)start+1);
880
881 /* Check whether the e_entry function descriptor entries
882 * need to be relocated before we can use them.
883 */
884 if (load_addr != 0) {
885 entry += load_addr;
886 toc += load_addr;
887 }
888 regs->nip = entry;
889 regs->gpr[2] = toc;
890 regs->msr = MSR_USER64;
d4bf9a78
SR
891 } else {
892 regs->nip = start;
893 regs->gpr[2] = 0;
894 regs->msr = MSR_USER32;
06d67d54
PM
895 }
896#endif
897
48abec07 898 discard_lazy_cpu_state();
ce48b210
MN
899#ifdef CONFIG_VSX
900 current->thread.used_vsr = 0;
901#endif
14cf11af 902 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
25c8a78b 903 current->thread.fpscr.val = 0;
14cf11af
PM
904#ifdef CONFIG_ALTIVEC
905 memset(current->thread.vr, 0, sizeof(current->thread.vr));
906 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
06d67d54 907 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
14cf11af
PM
908 current->thread.vrsave = 0;
909 current->thread.used_vr = 0;
910#endif /* CONFIG_ALTIVEC */
911#ifdef CONFIG_SPE
912 memset(current->thread.evr, 0, sizeof(current->thread.evr));
913 current->thread.acc = 0;
914 current->thread.spefscr = 0;
915 current->thread.used_spe = 0;
916#endif /* CONFIG_SPE */
917}
918
919#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
920 | PR_FP_EXC_RES | PR_FP_EXC_INV)
921
922int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
923{
924 struct pt_regs *regs = tsk->thread.regs;
925
926 /* This is a bit hairy. If we are an SPE enabled processor
927 * (have embedded fp) we store the IEEE exception enable flags in
928 * fpexc_mode. fpexc_mode is also used for setting FP exception
929 * mode (asyn, precise, disabled) for 'Classic' FP. */
930 if (val & PR_FP_EXC_SW_ENABLE) {
931#ifdef CONFIG_SPE
5e14d21e
KG
932 if (cpu_has_feature(CPU_FTR_SPE)) {
933 tsk->thread.fpexc_mode = val &
934 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
935 return 0;
936 } else {
937 return -EINVAL;
938 }
14cf11af
PM
939#else
940 return -EINVAL;
941#endif
14cf11af 942 }
06d67d54
PM
943
944 /* on a CONFIG_SPE this does not hurt us. The bits that
945 * __pack_fe01 use do not overlap with bits used for
946 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
947 * on CONFIG_SPE implementations are reserved so writing to
948 * them does not change anything */
949 if (val > PR_FP_EXC_PRECISE)
950 return -EINVAL;
951 tsk->thread.fpexc_mode = __pack_fe01(val);
952 if (regs != NULL && (regs->msr & MSR_FP) != 0)
953 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
954 | tsk->thread.fpexc_mode;
14cf11af
PM
955 return 0;
956}
957
958int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
959{
960 unsigned int val;
961
962 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
963#ifdef CONFIG_SPE
5e14d21e
KG
964 if (cpu_has_feature(CPU_FTR_SPE))
965 val = tsk->thread.fpexc_mode;
966 else
967 return -EINVAL;
14cf11af
PM
968#else
969 return -EINVAL;
970#endif
971 else
972 val = __unpack_fe01(tsk->thread.fpexc_mode);
973 return put_user(val, (unsigned int __user *) adr);
974}
975
fab5db97
PM
976int set_endian(struct task_struct *tsk, unsigned int val)
977{
978 struct pt_regs *regs = tsk->thread.regs;
979
980 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
981 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
982 return -EINVAL;
983
984 if (regs == NULL)
985 return -EINVAL;
986
987 if (val == PR_ENDIAN_BIG)
988 regs->msr &= ~MSR_LE;
989 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
990 regs->msr |= MSR_LE;
991 else
992 return -EINVAL;
993
994 return 0;
995}
996
997int get_endian(struct task_struct *tsk, unsigned long adr)
998{
999 struct pt_regs *regs = tsk->thread.regs;
1000 unsigned int val;
1001
1002 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1003 !cpu_has_feature(CPU_FTR_REAL_LE))
1004 return -EINVAL;
1005
1006 if (regs == NULL)
1007 return -EINVAL;
1008
1009 if (regs->msr & MSR_LE) {
1010 if (cpu_has_feature(CPU_FTR_REAL_LE))
1011 val = PR_ENDIAN_LITTLE;
1012 else
1013 val = PR_ENDIAN_PPC_LITTLE;
1014 } else
1015 val = PR_ENDIAN_BIG;
1016
1017 return put_user(val, (unsigned int __user *)adr);
1018}
1019
e9370ae1
PM
1020int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1021{
1022 tsk->thread.align_ctl = val;
1023 return 0;
1024}
1025
1026int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1027{
1028 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1029}
1030
06d67d54
PM
1031#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1032
14cf11af
PM
1033int sys_clone(unsigned long clone_flags, unsigned long usp,
1034 int __user *parent_tidp, void __user *child_threadptr,
1035 int __user *child_tidp, int p6,
1036 struct pt_regs *regs)
1037{
1038 CHECK_FULL_REGS(regs);
1039 if (usp == 0)
1040 usp = regs->gpr[1]; /* stack pointer for child */
06d67d54 1041#ifdef CONFIG_PPC64
9904b005 1042 if (is_32bit_task()) {
06d67d54
PM
1043 parent_tidp = TRUNC_PTR(parent_tidp);
1044 child_tidp = TRUNC_PTR(child_tidp);
1045 }
1046#endif
14cf11af
PM
1047 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1048}
1049
1050int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1051 unsigned long p4, unsigned long p5, unsigned long p6,
1052 struct pt_regs *regs)
1053{
1054 CHECK_FULL_REGS(regs);
1055 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1056}
1057
1058int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1059 unsigned long p4, unsigned long p5, unsigned long p6,
1060 struct pt_regs *regs)
1061{
1062 CHECK_FULL_REGS(regs);
1063 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1064 regs, 0, NULL, NULL);
1065}
1066
1067int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1068 unsigned long a3, unsigned long a4, unsigned long a5,
1069 struct pt_regs *regs)
1070{
1071 int error;
06d67d54 1072 char *filename;
14cf11af 1073
c7887325 1074 filename = getname((const char __user *) a0);
14cf11af
PM
1075 error = PTR_ERR(filename);
1076 if (IS_ERR(filename))
1077 goto out;
1078 flush_fp_to_thread(current);
1079 flush_altivec_to_thread(current);
1080 flush_spe_to_thread(current);
d7627467
DH
1081 error = do_execve(filename,
1082 (const char __user *const __user *) a1,
1083 (const char __user *const __user *) a2, regs);
14cf11af
PM
1084 putname(filename);
1085out:
1086 return error;
1087}
1088
bb72c481
PM
1089static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1090 unsigned long nbytes)
1091{
1092 unsigned long stack_page;
1093 unsigned long cpu = task_cpu(p);
1094
1095 /*
1096 * Avoid crashing if the stack has overflowed and corrupted
1097 * task_cpu(p), which is in the thread_info struct.
1098 */
1099 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1100 stack_page = (unsigned long) hardirq_ctx[cpu];
1101 if (sp >= stack_page + sizeof(struct thread_struct)
1102 && sp <= stack_page + THREAD_SIZE - nbytes)
1103 return 1;
1104
1105 stack_page = (unsigned long) softirq_ctx[cpu];
1106 if (sp >= stack_page + sizeof(struct thread_struct)
1107 && sp <= stack_page + THREAD_SIZE - nbytes)
1108 return 1;
1109 }
1110 return 0;
1111}
1112
2f25194d 1113int validate_sp(unsigned long sp, struct task_struct *p,
14cf11af
PM
1114 unsigned long nbytes)
1115{
0cec6fd1 1116 unsigned long stack_page = (unsigned long)task_stack_page(p);
14cf11af
PM
1117
1118 if (sp >= stack_page + sizeof(struct thread_struct)
1119 && sp <= stack_page + THREAD_SIZE - nbytes)
1120 return 1;
1121
bb72c481 1122 return valid_irq_stack(sp, p, nbytes);
14cf11af
PM
1123}
1124
2f25194d
AB
1125EXPORT_SYMBOL(validate_sp);
1126
14cf11af
PM
1127unsigned long get_wchan(struct task_struct *p)
1128{
1129 unsigned long ip, sp;
1130 int count = 0;
1131
1132 if (!p || p == current || p->state == TASK_RUNNING)
1133 return 0;
1134
1135 sp = p->thread.ksp;
ec2b36b9 1136 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1137 return 0;
1138
1139 do {
1140 sp = *(unsigned long *)sp;
ec2b36b9 1141 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1142 return 0;
1143 if (count > 0) {
ec2b36b9 1144 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
14cf11af
PM
1145 if (!in_sched_functions(ip))
1146 return ip;
1147 }
1148 } while (count++ < 16);
1149 return 0;
1150}
06d67d54 1151
c4d04be1 1152static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
06d67d54
PM
1153
1154void show_stack(struct task_struct *tsk, unsigned long *stack)
1155{
1156 unsigned long sp, ip, lr, newsp;
1157 int count = 0;
1158 int firstframe = 1;
6794c782
SR
1159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1160 int curr_frame = current->curr_ret_stack;
1161 extern void return_to_handler(void);
9135c3cc
SR
1162 unsigned long rth = (unsigned long)return_to_handler;
1163 unsigned long mrth = -1;
6794c782 1164#ifdef CONFIG_PPC64
9135c3cc
SR
1165 extern void mod_return_to_handler(void);
1166 rth = *(unsigned long *)rth;
1167 mrth = (unsigned long)mod_return_to_handler;
1168 mrth = *(unsigned long *)mrth;
6794c782
SR
1169#endif
1170#endif
06d67d54
PM
1171
1172 sp = (unsigned long) stack;
1173 if (tsk == NULL)
1174 tsk = current;
1175 if (sp == 0) {
1176 if (tsk == current)
1177 asm("mr %0,1" : "=r" (sp));
1178 else
1179 sp = tsk->thread.ksp;
1180 }
1181
1182 lr = 0;
1183 printk("Call Trace:\n");
1184 do {
ec2b36b9 1185 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
06d67d54
PM
1186 return;
1187
1188 stack = (unsigned long *) sp;
1189 newsp = stack[0];
ec2b36b9 1190 ip = stack[STACK_FRAME_LR_SAVE];
06d67d54 1191 if (!firstframe || ip != lr) {
058c78f4 1192 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6794c782 1193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
9135c3cc 1194 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6794c782
SR
1195 printk(" (%pS)",
1196 (void *)current->ret_stack[curr_frame].ret);
1197 curr_frame--;
1198 }
1199#endif
06d67d54
PM
1200 if (firstframe)
1201 printk(" (unreliable)");
1202 printk("\n");
1203 }
1204 firstframe = 0;
1205
1206 /*
1207 * See if this is an exception frame.
1208 * We look for the "regshere" marker in the current frame.
1209 */
ec2b36b9
BH
1210 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1211 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
06d67d54
PM
1212 struct pt_regs *regs = (struct pt_regs *)
1213 (sp + STACK_FRAME_OVERHEAD);
06d67d54 1214 lr = regs->link;
058c78f4
BH
1215 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1216 regs->trap, (void *)regs->nip, (void *)lr);
06d67d54
PM
1217 firstframe = 1;
1218 }
1219
1220 sp = newsp;
1221 } while (count++ < kstack_depth_to_print);
1222}
1223
1224void dump_stack(void)
1225{
1226 show_stack(current, NULL);
1227}
1228EXPORT_SYMBOL(dump_stack);
cb2c9b27
AB
1229
1230#ifdef CONFIG_PPC64
fe1952fc
BH
1231/* Called with hard IRQs off */
1232void __ppc64_runlatch_on(void)
cb2c9b27 1233{
fe1952fc 1234 struct thread_info *ti = current_thread_info();
cb2c9b27
AB
1235 unsigned long ctrl;
1236
fe1952fc
BH
1237 ctrl = mfspr(SPRN_CTRLF);
1238 ctrl |= CTRL_RUNLATCH;
1239 mtspr(SPRN_CTRLT, ctrl);
cb2c9b27 1240
fae2e0fb 1241 ti->local_flags |= _TLF_RUNLATCH;
cb2c9b27
AB
1242}
1243
fe1952fc 1244/* Called with hard IRQs off */
4138d653 1245void __ppc64_runlatch_off(void)
cb2c9b27 1246{
fe1952fc 1247 struct thread_info *ti = current_thread_info();
cb2c9b27
AB
1248 unsigned long ctrl;
1249
fae2e0fb 1250 ti->local_flags &= ~_TLF_RUNLATCH;
cb2c9b27 1251
4138d653
AB
1252 ctrl = mfspr(SPRN_CTRLF);
1253 ctrl &= ~CTRL_RUNLATCH;
1254 mtspr(SPRN_CTRLT, ctrl);
cb2c9b27 1255}
fe1952fc 1256#endif /* CONFIG_PPC64 */
f6a61680 1257
d839088c
AB
1258unsigned long arch_align_stack(unsigned long sp)
1259{
1260 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1261 sp -= get_random_int() & ~PAGE_MASK;
1262 return sp & ~0xf;
1263}
912f9ee2
AB
1264
1265static inline unsigned long brk_rnd(void)
1266{
1267 unsigned long rnd = 0;
1268
1269 /* 8MB for 32bit, 1GB for 64bit */
1270 if (is_32bit_task())
1271 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1272 else
1273 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1274
1275 return rnd << PAGE_SHIFT;
1276}
1277
1278unsigned long arch_randomize_brk(struct mm_struct *mm)
1279{
8bbde7a7
AB
1280 unsigned long base = mm->brk;
1281 unsigned long ret;
1282
ce7a35c7 1283#ifdef CONFIG_PPC_STD_MMU_64
8bbde7a7
AB
1284 /*
1285 * If we are using 1TB segments and we are allowed to randomise
1286 * the heap, we can put it above 1TB so it is backed by a 1TB
1287 * segment. Otherwise the heap will be in the bottom 1TB
1288 * which always uses 256MB segments and this may result in a
1289 * performance penalty.
1290 */
1291 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1292 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1293#endif
1294
1295 ret = PAGE_ALIGN(base + brk_rnd());
912f9ee2
AB
1296
1297 if (ret < mm->brk)
1298 return mm->brk;
1299
1300 return ret;
1301}
501cb16d
AB
1302
1303unsigned long randomize_et_dyn(unsigned long base)
1304{
1305 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1306
1307 if (ret < base)
1308 return base;
1309
1310 return ret;
1311}