KVM: PPC: Resolve real-mode handlers through function exports
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / process.c
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
14cf11af
PM
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
14cf11af
PM
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h>
30#include <linux/init_task.h>
31#include <linux/module.h>
32#include <linux/kallsyms.h>
33#include <linux/mqueue.h>
34#include <linux/hardirq.h>
06d67d54 35#include <linux/utsname.h>
6794c782 36#include <linux/ftrace.h>
79741dd3 37#include <linux/kernel_stat.h>
d839088c
AB
38#include <linux/personality.h>
39#include <linux/random.h>
5aae8a53 40#include <linux/hw_breakpoint.h>
14cf11af
PM
41
42#include <asm/pgtable.h>
43#include <asm/uaccess.h>
44#include <asm/system.h>
45#include <asm/io.h>
46#include <asm/processor.h>
47#include <asm/mmu.h>
48#include <asm/prom.h>
76032de8 49#include <asm/machdep.h>
c6622f63 50#include <asm/time.h>
a7f31841 51#include <asm/syscalls.h>
06d67d54
PM
52#ifdef CONFIG_PPC64
53#include <asm/firmware.h>
06d67d54 54#endif
d6a61bfc
LM
55#include <linux/kprobes.h>
56#include <linux/kdebug.h>
14cf11af
PM
57
58extern unsigned long _get_SP(void);
59
60#ifndef CONFIG_SMP
61struct task_struct *last_task_used_math = NULL;
62struct task_struct *last_task_used_altivec = NULL;
ce48b210 63struct task_struct *last_task_used_vsx = NULL;
14cf11af
PM
64struct task_struct *last_task_used_spe = NULL;
65#endif
66
14cf11af
PM
67/*
68 * Make sure the floating-point register state in the
69 * the thread_struct is up to date for task tsk.
70 */
71void flush_fp_to_thread(struct task_struct *tsk)
72{
73 if (tsk->thread.regs) {
74 /*
75 * We need to disable preemption here because if we didn't,
76 * another process could get scheduled after the regs->msr
77 * test but before we have finished saving the FP registers
78 * to the thread_struct. That process could take over the
79 * FPU, and then when we get scheduled again we would store
80 * bogus values for the remaining FP registers.
81 */
82 preempt_disable();
83 if (tsk->thread.regs->msr & MSR_FP) {
84#ifdef CONFIG_SMP
85 /*
86 * This should only ever be called for current or
87 * for a stopped child process. Since we save away
88 * the FP register state on context switch on SMP,
89 * there is something wrong if a stopped child appears
90 * to still have its FP state in the CPU registers.
91 */
92 BUG_ON(tsk != current);
93#endif
0ee6c15e 94 giveup_fpu(tsk);
14cf11af
PM
95 }
96 preempt_enable();
97 }
98}
99
100void enable_kernel_fp(void)
101{
102 WARN_ON(preemptible());
103
104#ifdef CONFIG_SMP
105 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
106 giveup_fpu(current);
107 else
108 giveup_fpu(NULL); /* just enables FP for kernel */
109#else
110 giveup_fpu(last_task_used_math);
111#endif /* CONFIG_SMP */
112}
113EXPORT_SYMBOL(enable_kernel_fp);
114
14cf11af
PM
115#ifdef CONFIG_ALTIVEC
116void enable_kernel_altivec(void)
117{
118 WARN_ON(preemptible());
119
120#ifdef CONFIG_SMP
121 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
122 giveup_altivec(current);
123 else
124 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
125#else
126 giveup_altivec(last_task_used_altivec);
127#endif /* CONFIG_SMP */
128}
129EXPORT_SYMBOL(enable_kernel_altivec);
130
131/*
132 * Make sure the VMX/Altivec register state in the
133 * the thread_struct is up to date for task tsk.
134 */
135void flush_altivec_to_thread(struct task_struct *tsk)
136{
137 if (tsk->thread.regs) {
138 preempt_disable();
139 if (tsk->thread.regs->msr & MSR_VEC) {
140#ifdef CONFIG_SMP
141 BUG_ON(tsk != current);
142#endif
0ee6c15e 143 giveup_altivec(tsk);
14cf11af
PM
144 }
145 preempt_enable();
146 }
147}
14cf11af
PM
148#endif /* CONFIG_ALTIVEC */
149
ce48b210
MN
150#ifdef CONFIG_VSX
151#if 0
152/* not currently used, but some crazy RAID module might want to later */
153void enable_kernel_vsx(void)
154{
155 WARN_ON(preemptible());
156
157#ifdef CONFIG_SMP
158 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
159 giveup_vsx(current);
160 else
161 giveup_vsx(NULL); /* just enable vsx for kernel - force */
162#else
163 giveup_vsx(last_task_used_vsx);
164#endif /* CONFIG_SMP */
165}
166EXPORT_SYMBOL(enable_kernel_vsx);
167#endif
168
7c292170
MN
169void giveup_vsx(struct task_struct *tsk)
170{
171 giveup_fpu(tsk);
172 giveup_altivec(tsk);
173 __giveup_vsx(tsk);
174}
175
ce48b210
MN
176void flush_vsx_to_thread(struct task_struct *tsk)
177{
178 if (tsk->thread.regs) {
179 preempt_disable();
180 if (tsk->thread.regs->msr & MSR_VSX) {
181#ifdef CONFIG_SMP
182 BUG_ON(tsk != current);
183#endif
184 giveup_vsx(tsk);
185 }
186 preempt_enable();
187 }
188}
ce48b210
MN
189#endif /* CONFIG_VSX */
190
14cf11af
PM
191#ifdef CONFIG_SPE
192
193void enable_kernel_spe(void)
194{
195 WARN_ON(preemptible());
196
197#ifdef CONFIG_SMP
198 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
199 giveup_spe(current);
200 else
201 giveup_spe(NULL); /* just enable SPE for kernel - force */
202#else
203 giveup_spe(last_task_used_spe);
204#endif /* __SMP __ */
205}
206EXPORT_SYMBOL(enable_kernel_spe);
207
208void flush_spe_to_thread(struct task_struct *tsk)
209{
210 if (tsk->thread.regs) {
211 preempt_disable();
212 if (tsk->thread.regs->msr & MSR_SPE) {
213#ifdef CONFIG_SMP
214 BUG_ON(tsk != current);
215#endif
0ee6c15e 216 giveup_spe(tsk);
14cf11af
PM
217 }
218 preempt_enable();
219 }
220}
14cf11af
PM
221#endif /* CONFIG_SPE */
222
5388fb10 223#ifndef CONFIG_SMP
48abec07
PM
224/*
225 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
226 * and the current task has some state, discard it.
227 */
5388fb10 228void discard_lazy_cpu_state(void)
48abec07 229{
48abec07
PM
230 preempt_disable();
231 if (last_task_used_math == current)
232 last_task_used_math = NULL;
233#ifdef CONFIG_ALTIVEC
234 if (last_task_used_altivec == current)
235 last_task_used_altivec = NULL;
236#endif /* CONFIG_ALTIVEC */
ce48b210
MN
237#ifdef CONFIG_VSX
238 if (last_task_used_vsx == current)
239 last_task_used_vsx = NULL;
240#endif /* CONFIG_VSX */
48abec07
PM
241#ifdef CONFIG_SPE
242 if (last_task_used_spe == current)
243 last_task_used_spe = NULL;
244#endif
245 preempt_enable();
48abec07 246}
5388fb10 247#endif /* CONFIG_SMP */
48abec07 248
3bffb652
DK
249#ifdef CONFIG_PPC_ADV_DEBUG_REGS
250void do_send_trap(struct pt_regs *regs, unsigned long address,
251 unsigned long error_code, int signal_code, int breakpt)
252{
253 siginfo_t info;
254
255 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
256 11, SIGSEGV) == NOTIFY_STOP)
257 return;
258
259 /* Deliver the signal to userspace */
260 info.si_signo = SIGTRAP;
261 info.si_errno = breakpt; /* breakpoint or watchpoint id */
262 info.si_code = signal_code;
263 info.si_addr = (void __user *)address;
264 force_sig_info(SIGTRAP, &info, current);
265}
266#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
d6a61bfc
LM
267void do_dabr(struct pt_regs *regs, unsigned long address,
268 unsigned long error_code)
269{
270 siginfo_t info;
271
272 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
273 11, SIGSEGV) == NOTIFY_STOP)
274 return;
275
276 if (debugger_dabr_match(regs))
277 return;
278
d6a61bfc
LM
279 /* Clear the DABR */
280 set_dabr(0);
281
282 /* Deliver the signal to userspace */
283 info.si_signo = SIGTRAP;
284 info.si_errno = 0;
285 info.si_code = TRAP_HWBKPT;
286 info.si_addr = (void __user *)address;
287 force_sig_info(SIGTRAP, &info, current);
288}
3bffb652 289#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
d6a61bfc 290
a2ceff5e
ME
291static DEFINE_PER_CPU(unsigned long, current_dabr);
292
3bffb652
DK
293#ifdef CONFIG_PPC_ADV_DEBUG_REGS
294/*
295 * Set the debug registers back to their default "safe" values.
296 */
297static void set_debug_reg_defaults(struct thread_struct *thread)
298{
299 thread->iac1 = thread->iac2 = 0;
300#if CONFIG_PPC_ADV_DEBUG_IACS > 2
301 thread->iac3 = thread->iac4 = 0;
302#endif
303 thread->dac1 = thread->dac2 = 0;
304#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
305 thread->dvc1 = thread->dvc2 = 0;
306#endif
307 thread->dbcr0 = 0;
308#ifdef CONFIG_BOOKE
309 /*
310 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
311 */
312 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
313 DBCR1_IAC3US | DBCR1_IAC4US;
314 /*
315 * Force Data Address Compare User/Supervisor bits to be User-only
316 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
317 */
318 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
319#else
320 thread->dbcr1 = 0;
321#endif
322}
323
324static void prime_debug_regs(struct thread_struct *thread)
325{
326 mtspr(SPRN_IAC1, thread->iac1);
327 mtspr(SPRN_IAC2, thread->iac2);
328#if CONFIG_PPC_ADV_DEBUG_IACS > 2
329 mtspr(SPRN_IAC3, thread->iac3);
330 mtspr(SPRN_IAC4, thread->iac4);
331#endif
332 mtspr(SPRN_DAC1, thread->dac1);
333 mtspr(SPRN_DAC2, thread->dac2);
334#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
335 mtspr(SPRN_DVC1, thread->dvc1);
336 mtspr(SPRN_DVC2, thread->dvc2);
337#endif
338 mtspr(SPRN_DBCR0, thread->dbcr0);
339 mtspr(SPRN_DBCR1, thread->dbcr1);
340#ifdef CONFIG_BOOKE
341 mtspr(SPRN_DBCR2, thread->dbcr2);
342#endif
343}
344/*
345 * Unless neither the old or new thread are making use of the
346 * debug registers, set the debug registers from the values
347 * stored in the new thread.
348 */
349static void switch_booke_debug_regs(struct thread_struct *new_thread)
350{
351 if ((current->thread.dbcr0 & DBCR0_IDM)
352 || (new_thread->dbcr0 & DBCR0_IDM))
353 prime_debug_regs(new_thread);
354}
355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
e0780b72 356#ifndef CONFIG_HAVE_HW_BREAKPOINT
3bffb652
DK
357static void set_debug_reg_defaults(struct thread_struct *thread)
358{
359 if (thread->dabr) {
360 thread->dabr = 0;
361 set_dabr(0);
362 }
363}
e0780b72 364#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
3bffb652
DK
365#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
366
14cf11af
PM
367int set_dabr(unsigned long dabr)
368{
a2ceff5e
ME
369 __get_cpu_var(current_dabr) = dabr;
370
cab0af98
ME
371 if (ppc_md.set_dabr)
372 return ppc_md.set_dabr(dabr);
14cf11af 373
791cc501 374 /* XXX should we have a CPU_FTR_HAS_DABR ? */
172ae2e7 375#ifdef CONFIG_PPC_ADV_DEBUG_REGS
d6a61bfc 376 mtspr(SPRN_DAC1, dabr);
221c185d
DK
377#ifdef CONFIG_PPC_47x
378 isync();
379#endif
c6c9eace
BH
380#elif defined(CONFIG_PPC_BOOK3S)
381 mtspr(SPRN_DABR, dabr);
d6a61bfc
LM
382#endif
383
c6c9eace 384
cab0af98 385 return 0;
14cf11af
PM
386}
387
06d67d54
PM
388#ifdef CONFIG_PPC64
389DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
06d67d54 390#endif
14cf11af
PM
391
392struct task_struct *__switch_to(struct task_struct *prev,
393 struct task_struct *new)
394{
395 struct thread_struct *new_thread, *old_thread;
396 unsigned long flags;
397 struct task_struct *last;
d6bf29b4
PZ
398#ifdef CONFIG_PPC_BOOK3S_64
399 struct ppc64_tlb_batch *batch;
400#endif
14cf11af
PM
401
402#ifdef CONFIG_SMP
403 /* avoid complexity of lazy save/restore of fpu
404 * by just saving it every time we switch out if
405 * this task used the fpu during the last quantum.
406 *
407 * If it tries to use the fpu again, it'll trap and
408 * reload its fp regs. So we don't have to do a restore
409 * every switch, just a save.
410 * -- Cort
411 */
412 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
413 giveup_fpu(prev);
414#ifdef CONFIG_ALTIVEC
415 /*
416 * If the previous thread used altivec in the last quantum
417 * (thus changing altivec regs) then save them.
418 * We used to check the VRSAVE register but not all apps
419 * set it, so we don't rely on it now (and in fact we need
420 * to save & restore VSCR even if VRSAVE == 0). -- paulus
421 *
422 * On SMP we always save/restore altivec regs just to avoid the
423 * complexity of changing processors.
424 * -- Cort
425 */
426 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
427 giveup_altivec(prev);
14cf11af 428#endif /* CONFIG_ALTIVEC */
ce48b210
MN
429#ifdef CONFIG_VSX
430 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
7c292170
MN
431 /* VMX and FPU registers are already save here */
432 __giveup_vsx(prev);
ce48b210 433#endif /* CONFIG_VSX */
14cf11af
PM
434#ifdef CONFIG_SPE
435 /*
436 * If the previous thread used spe in the last quantum
437 * (thus changing spe regs) then save them.
438 *
439 * On SMP we always save/restore spe regs just to avoid the
440 * complexity of changing processors.
441 */
442 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
443 giveup_spe(prev);
c0c0d996
PM
444#endif /* CONFIG_SPE */
445
446#else /* CONFIG_SMP */
447#ifdef CONFIG_ALTIVEC
448 /* Avoid the trap. On smp this this never happens since
449 * we don't set last_task_used_altivec -- Cort
450 */
451 if (new->thread.regs && last_task_used_altivec == new)
452 new->thread.regs->msr |= MSR_VEC;
453#endif /* CONFIG_ALTIVEC */
ce48b210
MN
454#ifdef CONFIG_VSX
455 if (new->thread.regs && last_task_used_vsx == new)
456 new->thread.regs->msr |= MSR_VSX;
457#endif /* CONFIG_VSX */
c0c0d996 458#ifdef CONFIG_SPE
14cf11af
PM
459 /* Avoid the trap. On smp this this never happens since
460 * we don't set last_task_used_spe
461 */
462 if (new->thread.regs && last_task_used_spe == new)
463 new->thread.regs->msr |= MSR_SPE;
464#endif /* CONFIG_SPE */
c0c0d996 465
14cf11af
PM
466#endif /* CONFIG_SMP */
467
172ae2e7 468#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3bffb652 469 switch_booke_debug_regs(&new->thread);
c6c9eace 470#else
5aae8a53
P
471/*
472 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
473 * schedule DABR
474 */
475#ifndef CONFIG_HAVE_HW_BREAKPOINT
c6c9eace
BH
476 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
477 set_dabr(new->thread.dabr);
5aae8a53 478#endif /* CONFIG_HAVE_HW_BREAKPOINT */
d6a61bfc
LM
479#endif
480
c6c9eace 481
14cf11af
PM
482 new_thread = &new->thread;
483 old_thread = &current->thread;
06d67d54 484
a2e19811
BH
485#if defined(CONFIG_PPC_BOOK3E_64)
486 /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
487 * we always hold the user values, so we set it now.
488 *
489 * However, we ensure the kernel MSR:DE is appropriately cleared too
490 * to avoid spurrious single step exceptions in the kernel.
491 *
492 * This will have to change to merge with the ppc32 code at some point,
493 * but I don't like much what ppc32 is doing today so there's some
494 * thinking needed there
495 */
496 if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
497 u32 dbcr0;
498
499 mtmsr(mfmsr() & ~MSR_DE);
500 isync();
501 dbcr0 = mfspr(SPRN_DBCR0);
502 dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
503 mtspr(SPRN_DBCR0, dbcr0);
504 }
505#endif /* CONFIG_PPC64_BOOK3E */
506
06d67d54
PM
507#ifdef CONFIG_PPC64
508 /*
509 * Collect processor utilization data per process
510 */
511 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
512 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
513 long unsigned start_tb, current_tb;
514 start_tb = old_thread->start_tb;
515 cu->current_tb = current_tb = mfspr(SPRN_PURR);
516 old_thread->accum_tb += (current_tb - start_tb);
517 new_thread->start_tb = current_tb;
518 }
d6bf29b4
PZ
519#endif /* CONFIG_PPC64 */
520
521#ifdef CONFIG_PPC_BOOK3S_64
522 batch = &__get_cpu_var(ppc64_tlb_batch);
523 if (batch->active) {
524 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
525 if (batch->index)
526 __flush_tlb_pending(batch);
527 batch->active = 0;
528 }
529#endif /* CONFIG_PPC_BOOK3S_64 */
06d67d54 530
14cf11af 531 local_irq_save(flags);
c6622f63
PM
532
533 account_system_vtime(current);
81a3843f 534 account_process_vtime(current);
c6622f63 535
44387e9f
AB
536 /*
537 * We can't take a PMU exception inside _switch() since there is a
538 * window where the kernel stack SLB and the kernel stack are out
539 * of sync. Hard disable here.
540 */
541 hard_irq_disable();
14cf11af
PM
542 last = _switch(old_thread, new_thread);
543
d6bf29b4
PZ
544#ifdef CONFIG_PPC_BOOK3S_64
545 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
546 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
547 batch = &__get_cpu_var(ppc64_tlb_batch);
548 batch->active = 1;
549 }
550#endif /* CONFIG_PPC_BOOK3S_64 */
551
14cf11af
PM
552 local_irq_restore(flags);
553
554 return last;
555}
556
06d67d54
PM
557static int instructions_to_print = 16;
558
06d67d54
PM
559static void show_instructions(struct pt_regs *regs)
560{
561 int i;
562 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
563 sizeof(int));
564
565 printk("Instruction dump:");
566
567 for (i = 0; i < instructions_to_print; i++) {
568 int instr;
569
570 if (!(i % 8))
571 printk("\n");
572
0de2d820
SW
573#if !defined(CONFIG_BOOKE)
574 /* If executing with the IMMU off, adjust pc rather
575 * than print XXXXXXXX.
576 */
577 if (!(regs->msr & MSR_IR))
578 pc = (unsigned long)phys_to_virt(pc);
579#endif
580
af308377
SR
581 /* We use __get_user here *only* to avoid an OOPS on a
582 * bad address because the pc *should* only be a
583 * kernel address.
584 */
00ae36de
AB
585 if (!__kernel_text_address(pc) ||
586 __get_user(instr, (unsigned int __user *)pc)) {
06d67d54
PM
587 printk("XXXXXXXX ");
588 } else {
589 if (regs->nip == pc)
590 printk("<%08x> ", instr);
591 else
592 printk("%08x ", instr);
593 }
594
595 pc += sizeof(int);
596 }
597
598 printk("\n");
599}
600
601static struct regbit {
602 unsigned long bit;
603 const char *name;
604} msr_bits[] = {
605 {MSR_EE, "EE"},
606 {MSR_PR, "PR"},
607 {MSR_FP, "FP"},
ce48b210
MN
608 {MSR_VEC, "VEC"},
609 {MSR_VSX, "VSX"},
06d67d54 610 {MSR_ME, "ME"},
1b98326b
KG
611 {MSR_CE, "CE"},
612 {MSR_DE, "DE"},
06d67d54
PM
613 {MSR_IR, "IR"},
614 {MSR_DR, "DR"},
615 {0, NULL}
616};
617
618static void printbits(unsigned long val, struct regbit *bits)
619{
620 const char *sep = "";
621
622 printk("<");
623 for (; bits->bit; ++bits)
624 if (val & bits->bit) {
625 printk("%s%s", sep, bits->name);
626 sep = ",";
627 }
628 printk(">");
629}
630
631#ifdef CONFIG_PPC64
f6f7dde3 632#define REG "%016lx"
06d67d54
PM
633#define REGS_PER_LINE 4
634#define LAST_VOLATILE 13
635#else
f6f7dde3 636#define REG "%08lx"
06d67d54
PM
637#define REGS_PER_LINE 8
638#define LAST_VOLATILE 12
639#endif
640
14cf11af
PM
641void show_regs(struct pt_regs * regs)
642{
643 int i, trap;
644
06d67d54
PM
645 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
646 regs->nip, regs->link, regs->ctr);
647 printk("REGS: %p TRAP: %04lx %s (%s)\n",
96b644bd 648 regs, regs->trap, print_tainted(), init_utsname()->release);
06d67d54
PM
649 printk("MSR: "REG" ", regs->msr);
650 printbits(regs->msr, msr_bits);
f6f7dde3 651 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
14cf11af
PM
652 trap = TRAP(regs);
653 if (trap == 0x300 || trap == 0x600)
172ae2e7 654#ifdef CONFIG_PPC_ADV_DEBUG_REGS
14170789
KG
655 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
656#else
7071854b 657 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
14170789 658#endif
06d67d54 659 printk("TASK = %p[%d] '%s' THREAD: %p",
19c5870c 660 current, task_pid_nr(current), current->comm, task_thread_info(current));
14cf11af
PM
661
662#ifdef CONFIG_SMP
79ccd1be 663 printk(" CPU: %d", raw_smp_processor_id());
14cf11af
PM
664#endif /* CONFIG_SMP */
665
666 for (i = 0; i < 32; i++) {
06d67d54 667 if ((i % REGS_PER_LINE) == 0)
a2367194 668 printk("\nGPR%02d: ", i);
06d67d54
PM
669 printk(REG " ", regs->gpr[i]);
670 if (i == LAST_VOLATILE && !FULL_REGS(regs))
14cf11af
PM
671 break;
672 }
673 printk("\n");
674#ifdef CONFIG_KALLSYMS
675 /*
676 * Lookup NIP late so we have the best change of getting the
677 * above info out without failing
678 */
058c78f4
BH
679 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
680 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
14cf11af
PM
681#endif
682 show_stack(current, (unsigned long *) regs->gpr[1]);
06d67d54
PM
683 if (!user_mode(regs))
684 show_instructions(regs);
14cf11af
PM
685}
686
687void exit_thread(void)
688{
48abec07 689 discard_lazy_cpu_state();
14cf11af
PM
690}
691
692void flush_thread(void)
693{
48abec07 694 discard_lazy_cpu_state();
14cf11af 695
e0780b72 696#ifdef CONFIG_HAVE_HW_BREAKPOINT
5aae8a53 697 flush_ptrace_hw_breakpoint(current);
e0780b72 698#else /* CONFIG_HAVE_HW_BREAKPOINT */
3bffb652 699 set_debug_reg_defaults(&current->thread);
e0780b72 700#endif /* CONFIG_HAVE_HW_BREAKPOINT */
14cf11af
PM
701}
702
703void
704release_thread(struct task_struct *t)
705{
706}
707
708/*
709 * This gets called before we allocate a new thread and copy
710 * the current task into it.
711 */
712void prepare_to_copy(struct task_struct *tsk)
713{
714 flush_fp_to_thread(current);
715 flush_altivec_to_thread(current);
ce48b210 716 flush_vsx_to_thread(current);
14cf11af 717 flush_spe_to_thread(current);
5aae8a53
P
718#ifdef CONFIG_HAVE_HW_BREAKPOINT
719 flush_ptrace_hw_breakpoint(tsk);
720#endif /* CONFIG_HAVE_HW_BREAKPOINT */
14cf11af
PM
721}
722
723/*
724 * Copy a thread..
725 */
efcac658
AK
726extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
727
6f2c55b8 728int copy_thread(unsigned long clone_flags, unsigned long usp,
06d67d54
PM
729 unsigned long unused, struct task_struct *p,
730 struct pt_regs *regs)
14cf11af
PM
731{
732 struct pt_regs *childregs, *kregs;
733 extern void ret_from_fork(void);
0cec6fd1 734 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
14cf11af
PM
735
736 CHECK_FULL_REGS(regs);
737 /* Copy registers */
738 sp -= sizeof(struct pt_regs);
739 childregs = (struct pt_regs *) sp;
740 *childregs = *regs;
741 if ((childregs->msr & MSR_PR) == 0) {
742 /* for kernel thread, set `current' and stackptr in new task */
743 childregs->gpr[1] = sp + sizeof(struct pt_regs);
06d67d54 744#ifdef CONFIG_PPC32
14cf11af 745 childregs->gpr[2] = (unsigned long) p;
06d67d54 746#else
b5e2fc1c 747 clear_tsk_thread_flag(p, TIF_32BIT);
06d67d54 748#endif
14cf11af
PM
749 p->thread.regs = NULL; /* no user register state */
750 } else {
751 childregs->gpr[1] = usp;
752 p->thread.regs = childregs;
06d67d54
PM
753 if (clone_flags & CLONE_SETTLS) {
754#ifdef CONFIG_PPC64
9904b005 755 if (!is_32bit_task())
06d67d54
PM
756 childregs->gpr[13] = childregs->gpr[6];
757 else
758#endif
759 childregs->gpr[2] = childregs->gpr[6];
760 }
14cf11af
PM
761 }
762 childregs->gpr[3] = 0; /* Result from fork() */
763 sp -= STACK_FRAME_OVERHEAD;
14cf11af
PM
764
765 /*
766 * The way this works is that at some point in the future
767 * some task will call _switch to switch to the new task.
768 * That will pop off the stack frame created below and start
769 * the new task running at ret_from_fork. The new task will
770 * do some house keeping and then return from the fork or clone
771 * system call, using the stack frame created above.
772 */
773 sp -= sizeof(struct pt_regs);
774 kregs = (struct pt_regs *) sp;
775 sp -= STACK_FRAME_OVERHEAD;
776 p->thread.ksp = sp;
85218827
KG
777 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
778 _ALIGN_UP(sizeof(struct thread_info), 16);
14cf11af 779
94491685 780#ifdef CONFIG_PPC_STD_MMU_64
44ae3ab3 781 if (mmu_has_feature(MMU_FTR_SLB)) {
1189be65 782 unsigned long sp_vsid;
3c726f8d 783 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
06d67d54 784
44ae3ab3 785 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1189be65
PM
786 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
787 << SLB_VSID_SHIFT_1T;
788 else
789 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
790 << SLB_VSID_SHIFT;
3c726f8d 791 sp_vsid |= SLB_VSID_KERNEL | llp;
06d67d54
PM
792 p->thread.ksp_vsid = sp_vsid;
793 }
747bea91 794#endif /* CONFIG_PPC_STD_MMU_64 */
efcac658
AK
795#ifdef CONFIG_PPC64
796 if (cpu_has_feature(CPU_FTR_DSCR)) {
797 if (current->thread.dscr_inherit) {
798 p->thread.dscr_inherit = 1;
799 p->thread.dscr = current->thread.dscr;
800 } else if (0 != dscr_default) {
801 p->thread.dscr_inherit = 1;
802 p->thread.dscr = dscr_default;
803 } else {
804 p->thread.dscr_inherit = 0;
805 p->thread.dscr = 0;
806 }
807 }
808#endif
06d67d54
PM
809
810 /*
811 * The PPC64 ABI makes use of a TOC to contain function
812 * pointers. The function (ret_from_except) is actually a pointer
813 * to the TOC entry. The first entry is a pointer to the actual
814 * function.
815 */
747bea91 816#ifdef CONFIG_PPC64
06d67d54
PM
817 kregs->nip = *((unsigned long *)ret_from_fork);
818#else
819 kregs->nip = (unsigned long)ret_from_fork;
06d67d54 820#endif
14cf11af
PM
821
822 return 0;
823}
824
825/*
826 * Set up a thread for executing a new program
827 */
06d67d54 828void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
14cf11af 829{
90eac727
ME
830#ifdef CONFIG_PPC64
831 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
832#endif
833
14cf11af 834 set_fs(USER_DS);
06d67d54
PM
835
836 /*
837 * If we exec out of a kernel thread then thread.regs will not be
838 * set. Do it now.
839 */
840 if (!current->thread.regs) {
0cec6fd1
AV
841 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
842 current->thread.regs = regs - 1;
06d67d54
PM
843 }
844
14cf11af
PM
845 memset(regs->gpr, 0, sizeof(regs->gpr));
846 regs->ctr = 0;
847 regs->link = 0;
848 regs->xer = 0;
849 regs->ccr = 0;
14cf11af 850 regs->gpr[1] = sp;
06d67d54 851
474f8196
RM
852 /*
853 * We have just cleared all the nonvolatile GPRs, so make
854 * FULL_REGS(regs) return true. This is necessary to allow
855 * ptrace to examine the thread immediately after exec.
856 */
857 regs->trap &= ~1UL;
858
06d67d54
PM
859#ifdef CONFIG_PPC32
860 regs->mq = 0;
861 regs->nip = start;
14cf11af 862 regs->msr = MSR_USER;
06d67d54 863#else
9904b005 864 if (!is_32bit_task()) {
90eac727 865 unsigned long entry, toc;
06d67d54
PM
866
867 /* start is a relocated pointer to the function descriptor for
868 * the elf _start routine. The first entry in the function
869 * descriptor is the entry address of _start and the second
870 * entry is the TOC value we need to use.
871 */
872 __get_user(entry, (unsigned long __user *)start);
873 __get_user(toc, (unsigned long __user *)start+1);
874
875 /* Check whether the e_entry function descriptor entries
876 * need to be relocated before we can use them.
877 */
878 if (load_addr != 0) {
879 entry += load_addr;
880 toc += load_addr;
881 }
882 regs->nip = entry;
883 regs->gpr[2] = toc;
884 regs->msr = MSR_USER64;
d4bf9a78
SR
885 } else {
886 regs->nip = start;
887 regs->gpr[2] = 0;
888 regs->msr = MSR_USER32;
06d67d54
PM
889 }
890#endif
891
48abec07 892 discard_lazy_cpu_state();
ce48b210
MN
893#ifdef CONFIG_VSX
894 current->thread.used_vsr = 0;
895#endif
14cf11af 896 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
25c8a78b 897 current->thread.fpscr.val = 0;
14cf11af
PM
898#ifdef CONFIG_ALTIVEC
899 memset(current->thread.vr, 0, sizeof(current->thread.vr));
900 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
06d67d54 901 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
14cf11af
PM
902 current->thread.vrsave = 0;
903 current->thread.used_vr = 0;
904#endif /* CONFIG_ALTIVEC */
905#ifdef CONFIG_SPE
906 memset(current->thread.evr, 0, sizeof(current->thread.evr));
907 current->thread.acc = 0;
908 current->thread.spefscr = 0;
909 current->thread.used_spe = 0;
910#endif /* CONFIG_SPE */
911}
912
913#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
914 | PR_FP_EXC_RES | PR_FP_EXC_INV)
915
916int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
917{
918 struct pt_regs *regs = tsk->thread.regs;
919
920 /* This is a bit hairy. If we are an SPE enabled processor
921 * (have embedded fp) we store the IEEE exception enable flags in
922 * fpexc_mode. fpexc_mode is also used for setting FP exception
923 * mode (asyn, precise, disabled) for 'Classic' FP. */
924 if (val & PR_FP_EXC_SW_ENABLE) {
925#ifdef CONFIG_SPE
5e14d21e
KG
926 if (cpu_has_feature(CPU_FTR_SPE)) {
927 tsk->thread.fpexc_mode = val &
928 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
929 return 0;
930 } else {
931 return -EINVAL;
932 }
14cf11af
PM
933#else
934 return -EINVAL;
935#endif
14cf11af 936 }
06d67d54
PM
937
938 /* on a CONFIG_SPE this does not hurt us. The bits that
939 * __pack_fe01 use do not overlap with bits used for
940 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
941 * on CONFIG_SPE implementations are reserved so writing to
942 * them does not change anything */
943 if (val > PR_FP_EXC_PRECISE)
944 return -EINVAL;
945 tsk->thread.fpexc_mode = __pack_fe01(val);
946 if (regs != NULL && (regs->msr & MSR_FP) != 0)
947 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
948 | tsk->thread.fpexc_mode;
14cf11af
PM
949 return 0;
950}
951
952int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
953{
954 unsigned int val;
955
956 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
957#ifdef CONFIG_SPE
5e14d21e
KG
958 if (cpu_has_feature(CPU_FTR_SPE))
959 val = tsk->thread.fpexc_mode;
960 else
961 return -EINVAL;
14cf11af
PM
962#else
963 return -EINVAL;
964#endif
965 else
966 val = __unpack_fe01(tsk->thread.fpexc_mode);
967 return put_user(val, (unsigned int __user *) adr);
968}
969
fab5db97
PM
970int set_endian(struct task_struct *tsk, unsigned int val)
971{
972 struct pt_regs *regs = tsk->thread.regs;
973
974 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
975 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
976 return -EINVAL;
977
978 if (regs == NULL)
979 return -EINVAL;
980
981 if (val == PR_ENDIAN_BIG)
982 regs->msr &= ~MSR_LE;
983 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
984 regs->msr |= MSR_LE;
985 else
986 return -EINVAL;
987
988 return 0;
989}
990
991int get_endian(struct task_struct *tsk, unsigned long adr)
992{
993 struct pt_regs *regs = tsk->thread.regs;
994 unsigned int val;
995
996 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
997 !cpu_has_feature(CPU_FTR_REAL_LE))
998 return -EINVAL;
999
1000 if (regs == NULL)
1001 return -EINVAL;
1002
1003 if (regs->msr & MSR_LE) {
1004 if (cpu_has_feature(CPU_FTR_REAL_LE))
1005 val = PR_ENDIAN_LITTLE;
1006 else
1007 val = PR_ENDIAN_PPC_LITTLE;
1008 } else
1009 val = PR_ENDIAN_BIG;
1010
1011 return put_user(val, (unsigned int __user *)adr);
1012}
1013
e9370ae1
PM
1014int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1015{
1016 tsk->thread.align_ctl = val;
1017 return 0;
1018}
1019
1020int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1021{
1022 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1023}
1024
06d67d54
PM
1025#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1026
14cf11af
PM
1027int sys_clone(unsigned long clone_flags, unsigned long usp,
1028 int __user *parent_tidp, void __user *child_threadptr,
1029 int __user *child_tidp, int p6,
1030 struct pt_regs *regs)
1031{
1032 CHECK_FULL_REGS(regs);
1033 if (usp == 0)
1034 usp = regs->gpr[1]; /* stack pointer for child */
06d67d54 1035#ifdef CONFIG_PPC64
9904b005 1036 if (is_32bit_task()) {
06d67d54
PM
1037 parent_tidp = TRUNC_PTR(parent_tidp);
1038 child_tidp = TRUNC_PTR(child_tidp);
1039 }
1040#endif
14cf11af
PM
1041 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1042}
1043
1044int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1045 unsigned long p4, unsigned long p5, unsigned long p6,
1046 struct pt_regs *regs)
1047{
1048 CHECK_FULL_REGS(regs);
1049 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1050}
1051
1052int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1053 unsigned long p4, unsigned long p5, unsigned long p6,
1054 struct pt_regs *regs)
1055{
1056 CHECK_FULL_REGS(regs);
1057 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1058 regs, 0, NULL, NULL);
1059}
1060
1061int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1062 unsigned long a3, unsigned long a4, unsigned long a5,
1063 struct pt_regs *regs)
1064{
1065 int error;
06d67d54 1066 char *filename;
14cf11af 1067
c7887325 1068 filename = getname((const char __user *) a0);
14cf11af
PM
1069 error = PTR_ERR(filename);
1070 if (IS_ERR(filename))
1071 goto out;
1072 flush_fp_to_thread(current);
1073 flush_altivec_to_thread(current);
1074 flush_spe_to_thread(current);
d7627467
DH
1075 error = do_execve(filename,
1076 (const char __user *const __user *) a1,
1077 (const char __user *const __user *) a2, regs);
14cf11af
PM
1078 putname(filename);
1079out:
1080 return error;
1081}
1082
bb72c481
PM
1083static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1084 unsigned long nbytes)
1085{
1086 unsigned long stack_page;
1087 unsigned long cpu = task_cpu(p);
1088
1089 /*
1090 * Avoid crashing if the stack has overflowed and corrupted
1091 * task_cpu(p), which is in the thread_info struct.
1092 */
1093 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1094 stack_page = (unsigned long) hardirq_ctx[cpu];
1095 if (sp >= stack_page + sizeof(struct thread_struct)
1096 && sp <= stack_page + THREAD_SIZE - nbytes)
1097 return 1;
1098
1099 stack_page = (unsigned long) softirq_ctx[cpu];
1100 if (sp >= stack_page + sizeof(struct thread_struct)
1101 && sp <= stack_page + THREAD_SIZE - nbytes)
1102 return 1;
1103 }
1104 return 0;
1105}
1106
2f25194d 1107int validate_sp(unsigned long sp, struct task_struct *p,
14cf11af
PM
1108 unsigned long nbytes)
1109{
0cec6fd1 1110 unsigned long stack_page = (unsigned long)task_stack_page(p);
14cf11af
PM
1111
1112 if (sp >= stack_page + sizeof(struct thread_struct)
1113 && sp <= stack_page + THREAD_SIZE - nbytes)
1114 return 1;
1115
bb72c481 1116 return valid_irq_stack(sp, p, nbytes);
14cf11af
PM
1117}
1118
2f25194d
AB
1119EXPORT_SYMBOL(validate_sp);
1120
14cf11af
PM
1121unsigned long get_wchan(struct task_struct *p)
1122{
1123 unsigned long ip, sp;
1124 int count = 0;
1125
1126 if (!p || p == current || p->state == TASK_RUNNING)
1127 return 0;
1128
1129 sp = p->thread.ksp;
ec2b36b9 1130 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1131 return 0;
1132
1133 do {
1134 sp = *(unsigned long *)sp;
ec2b36b9 1135 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1136 return 0;
1137 if (count > 0) {
ec2b36b9 1138 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
14cf11af
PM
1139 if (!in_sched_functions(ip))
1140 return ip;
1141 }
1142 } while (count++ < 16);
1143 return 0;
1144}
06d67d54 1145
c4d04be1 1146static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
06d67d54
PM
1147
1148void show_stack(struct task_struct *tsk, unsigned long *stack)
1149{
1150 unsigned long sp, ip, lr, newsp;
1151 int count = 0;
1152 int firstframe = 1;
6794c782
SR
1153#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1154 int curr_frame = current->curr_ret_stack;
1155 extern void return_to_handler(void);
9135c3cc
SR
1156 unsigned long rth = (unsigned long)return_to_handler;
1157 unsigned long mrth = -1;
6794c782 1158#ifdef CONFIG_PPC64
9135c3cc
SR
1159 extern void mod_return_to_handler(void);
1160 rth = *(unsigned long *)rth;
1161 mrth = (unsigned long)mod_return_to_handler;
1162 mrth = *(unsigned long *)mrth;
6794c782
SR
1163#endif
1164#endif
06d67d54
PM
1165
1166 sp = (unsigned long) stack;
1167 if (tsk == NULL)
1168 tsk = current;
1169 if (sp == 0) {
1170 if (tsk == current)
1171 asm("mr %0,1" : "=r" (sp));
1172 else
1173 sp = tsk->thread.ksp;
1174 }
1175
1176 lr = 0;
1177 printk("Call Trace:\n");
1178 do {
ec2b36b9 1179 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
06d67d54
PM
1180 return;
1181
1182 stack = (unsigned long *) sp;
1183 newsp = stack[0];
ec2b36b9 1184 ip = stack[STACK_FRAME_LR_SAVE];
06d67d54 1185 if (!firstframe || ip != lr) {
058c78f4 1186 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6794c782 1187#ifdef CONFIG_FUNCTION_GRAPH_TRACER
9135c3cc 1188 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6794c782
SR
1189 printk(" (%pS)",
1190 (void *)current->ret_stack[curr_frame].ret);
1191 curr_frame--;
1192 }
1193#endif
06d67d54
PM
1194 if (firstframe)
1195 printk(" (unreliable)");
1196 printk("\n");
1197 }
1198 firstframe = 0;
1199
1200 /*
1201 * See if this is an exception frame.
1202 * We look for the "regshere" marker in the current frame.
1203 */
ec2b36b9
BH
1204 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1205 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
06d67d54
PM
1206 struct pt_regs *regs = (struct pt_regs *)
1207 (sp + STACK_FRAME_OVERHEAD);
06d67d54 1208 lr = regs->link;
058c78f4
BH
1209 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1210 regs->trap, (void *)regs->nip, (void *)lr);
06d67d54
PM
1211 firstframe = 1;
1212 }
1213
1214 sp = newsp;
1215 } while (count++ < kstack_depth_to_print);
1216}
1217
1218void dump_stack(void)
1219{
1220 show_stack(current, NULL);
1221}
1222EXPORT_SYMBOL(dump_stack);
cb2c9b27
AB
1223
1224#ifdef CONFIG_PPC64
1225void ppc64_runlatch_on(void)
1226{
1227 unsigned long ctrl;
1228
1229 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
1230 HMT_medium();
1231
1232 ctrl = mfspr(SPRN_CTRLF);
1233 ctrl |= CTRL_RUNLATCH;
1234 mtspr(SPRN_CTRLT, ctrl);
1235
1236 set_thread_flag(TIF_RUNLATCH);
1237 }
1238}
1239
4138d653 1240void __ppc64_runlatch_off(void)
cb2c9b27
AB
1241{
1242 unsigned long ctrl;
1243
4138d653 1244 HMT_medium();
cb2c9b27 1245
4138d653 1246 clear_thread_flag(TIF_RUNLATCH);
cb2c9b27 1247
4138d653
AB
1248 ctrl = mfspr(SPRN_CTRLF);
1249 ctrl &= ~CTRL_RUNLATCH;
1250 mtspr(SPRN_CTRLT, ctrl);
cb2c9b27
AB
1251}
1252#endif
f6a61680
BH
1253
1254#if THREAD_SHIFT < PAGE_SHIFT
1255
1256static struct kmem_cache *thread_info_cache;
1257
b6a84016 1258struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
f6a61680
BH
1259{
1260 struct thread_info *ti;
1261
b6a84016 1262 ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
f6a61680
BH
1263 if (unlikely(ti == NULL))
1264 return NULL;
1265#ifdef CONFIG_DEBUG_STACK_USAGE
1266 memset(ti, 0, THREAD_SIZE);
1267#endif
1268 return ti;
1269}
1270
1271void free_thread_info(struct thread_info *ti)
1272{
1273 kmem_cache_free(thread_info_cache, ti);
1274}
1275
1276void thread_info_cache_init(void)
1277{
1278 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1279 THREAD_SIZE, 0, NULL);
1280 BUG_ON(thread_info_cache == NULL);
1281}
1282
1283#endif /* THREAD_SHIFT < PAGE_SHIFT */
d839088c
AB
1284
1285unsigned long arch_align_stack(unsigned long sp)
1286{
1287 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1288 sp -= get_random_int() & ~PAGE_MASK;
1289 return sp & ~0xf;
1290}
912f9ee2
AB
1291
1292static inline unsigned long brk_rnd(void)
1293{
1294 unsigned long rnd = 0;
1295
1296 /* 8MB for 32bit, 1GB for 64bit */
1297 if (is_32bit_task())
1298 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1299 else
1300 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1301
1302 return rnd << PAGE_SHIFT;
1303}
1304
1305unsigned long arch_randomize_brk(struct mm_struct *mm)
1306{
8bbde7a7
AB
1307 unsigned long base = mm->brk;
1308 unsigned long ret;
1309
ce7a35c7 1310#ifdef CONFIG_PPC_STD_MMU_64
8bbde7a7
AB
1311 /*
1312 * If we are using 1TB segments and we are allowed to randomise
1313 * the heap, we can put it above 1TB so it is backed by a 1TB
1314 * segment. Otherwise the heap will be in the bottom 1TB
1315 * which always uses 256MB segments and this may result in a
1316 * performance penalty.
1317 */
1318 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1319 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1320#endif
1321
1322 ret = PAGE_ALIGN(base + brk_rnd());
912f9ee2
AB
1323
1324 if (ret < mm->brk)
1325 return mm->brk;
1326
1327 return ret;
1328}
501cb16d
AB
1329
1330unsigned long randomize_et_dyn(unsigned long base)
1331{
1332 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1333
1334 if (ret < base)
1335 return base;
1336
1337 return ret;
1338}