powerpc: Use RCU based pte freeing mechanism for all powerpc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / process.c
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
14cf11af
PM
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
14cf11af
PM
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h>
30#include <linux/init_task.h>
31#include <linux/module.h>
32#include <linux/kallsyms.h>
33#include <linux/mqueue.h>
34#include <linux/hardirq.h>
06d67d54 35#include <linux/utsname.h>
14cf11af
PM
36
37#include <asm/pgtable.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40#include <asm/io.h>
41#include <asm/processor.h>
42#include <asm/mmu.h>
43#include <asm/prom.h>
76032de8 44#include <asm/machdep.h>
c6622f63 45#include <asm/time.h>
a7f31841 46#include <asm/syscalls.h>
06d67d54
PM
47#ifdef CONFIG_PPC64
48#include <asm/firmware.h>
06d67d54 49#endif
d6a61bfc
LM
50#include <linux/kprobes.h>
51#include <linux/kdebug.h>
14cf11af
PM
52
53extern unsigned long _get_SP(void);
54
55#ifndef CONFIG_SMP
56struct task_struct *last_task_used_math = NULL;
57struct task_struct *last_task_used_altivec = NULL;
ce48b210 58struct task_struct *last_task_used_vsx = NULL;
14cf11af
PM
59struct task_struct *last_task_used_spe = NULL;
60#endif
61
14cf11af
PM
62/*
63 * Make sure the floating-point register state in the
64 * the thread_struct is up to date for task tsk.
65 */
66void flush_fp_to_thread(struct task_struct *tsk)
67{
68 if (tsk->thread.regs) {
69 /*
70 * We need to disable preemption here because if we didn't,
71 * another process could get scheduled after the regs->msr
72 * test but before we have finished saving the FP registers
73 * to the thread_struct. That process could take over the
74 * FPU, and then when we get scheduled again we would store
75 * bogus values for the remaining FP registers.
76 */
77 preempt_disable();
78 if (tsk->thread.regs->msr & MSR_FP) {
79#ifdef CONFIG_SMP
80 /*
81 * This should only ever be called for current or
82 * for a stopped child process. Since we save away
83 * the FP register state on context switch on SMP,
84 * there is something wrong if a stopped child appears
85 * to still have its FP state in the CPU registers.
86 */
87 BUG_ON(tsk != current);
88#endif
0ee6c15e 89 giveup_fpu(tsk);
14cf11af
PM
90 }
91 preempt_enable();
92 }
93}
94
95void enable_kernel_fp(void)
96{
97 WARN_ON(preemptible());
98
99#ifdef CONFIG_SMP
100 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
101 giveup_fpu(current);
102 else
103 giveup_fpu(NULL); /* just enables FP for kernel */
104#else
105 giveup_fpu(last_task_used_math);
106#endif /* CONFIG_SMP */
107}
108EXPORT_SYMBOL(enable_kernel_fp);
109
14cf11af
PM
110#ifdef CONFIG_ALTIVEC
111void enable_kernel_altivec(void)
112{
113 WARN_ON(preemptible());
114
115#ifdef CONFIG_SMP
116 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
117 giveup_altivec(current);
118 else
119 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
120#else
121 giveup_altivec(last_task_used_altivec);
122#endif /* CONFIG_SMP */
123}
124EXPORT_SYMBOL(enable_kernel_altivec);
125
126/*
127 * Make sure the VMX/Altivec register state in the
128 * the thread_struct is up to date for task tsk.
129 */
130void flush_altivec_to_thread(struct task_struct *tsk)
131{
132 if (tsk->thread.regs) {
133 preempt_disable();
134 if (tsk->thread.regs->msr & MSR_VEC) {
135#ifdef CONFIG_SMP
136 BUG_ON(tsk != current);
137#endif
0ee6c15e 138 giveup_altivec(tsk);
14cf11af
PM
139 }
140 preempt_enable();
141 }
142}
14cf11af
PM
143#endif /* CONFIG_ALTIVEC */
144
ce48b210
MN
145#ifdef CONFIG_VSX
146#if 0
147/* not currently used, but some crazy RAID module might want to later */
148void enable_kernel_vsx(void)
149{
150 WARN_ON(preemptible());
151
152#ifdef CONFIG_SMP
153 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
154 giveup_vsx(current);
155 else
156 giveup_vsx(NULL); /* just enable vsx for kernel - force */
157#else
158 giveup_vsx(last_task_used_vsx);
159#endif /* CONFIG_SMP */
160}
161EXPORT_SYMBOL(enable_kernel_vsx);
162#endif
163
7c292170
MN
164void giveup_vsx(struct task_struct *tsk)
165{
166 giveup_fpu(tsk);
167 giveup_altivec(tsk);
168 __giveup_vsx(tsk);
169}
170
ce48b210
MN
171void flush_vsx_to_thread(struct task_struct *tsk)
172{
173 if (tsk->thread.regs) {
174 preempt_disable();
175 if (tsk->thread.regs->msr & MSR_VSX) {
176#ifdef CONFIG_SMP
177 BUG_ON(tsk != current);
178#endif
179 giveup_vsx(tsk);
180 }
181 preempt_enable();
182 }
183}
ce48b210
MN
184#endif /* CONFIG_VSX */
185
14cf11af
PM
186#ifdef CONFIG_SPE
187
188void enable_kernel_spe(void)
189{
190 WARN_ON(preemptible());
191
192#ifdef CONFIG_SMP
193 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
194 giveup_spe(current);
195 else
196 giveup_spe(NULL); /* just enable SPE for kernel - force */
197#else
198 giveup_spe(last_task_used_spe);
199#endif /* __SMP __ */
200}
201EXPORT_SYMBOL(enable_kernel_spe);
202
203void flush_spe_to_thread(struct task_struct *tsk)
204{
205 if (tsk->thread.regs) {
206 preempt_disable();
207 if (tsk->thread.regs->msr & MSR_SPE) {
208#ifdef CONFIG_SMP
209 BUG_ON(tsk != current);
210#endif
0ee6c15e 211 giveup_spe(tsk);
14cf11af
PM
212 }
213 preempt_enable();
214 }
215}
14cf11af
PM
216#endif /* CONFIG_SPE */
217
5388fb10 218#ifndef CONFIG_SMP
48abec07
PM
219/*
220 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
221 * and the current task has some state, discard it.
222 */
5388fb10 223void discard_lazy_cpu_state(void)
48abec07 224{
48abec07
PM
225 preempt_disable();
226 if (last_task_used_math == current)
227 last_task_used_math = NULL;
228#ifdef CONFIG_ALTIVEC
229 if (last_task_used_altivec == current)
230 last_task_used_altivec = NULL;
231#endif /* CONFIG_ALTIVEC */
ce48b210
MN
232#ifdef CONFIG_VSX
233 if (last_task_used_vsx == current)
234 last_task_used_vsx = NULL;
235#endif /* CONFIG_VSX */
48abec07
PM
236#ifdef CONFIG_SPE
237 if (last_task_used_spe == current)
238 last_task_used_spe = NULL;
239#endif
240 preempt_enable();
48abec07 241}
5388fb10 242#endif /* CONFIG_SMP */
48abec07 243
d6a61bfc
LM
244void do_dabr(struct pt_regs *regs, unsigned long address,
245 unsigned long error_code)
246{
247 siginfo_t info;
248
249 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
250 11, SIGSEGV) == NOTIFY_STOP)
251 return;
252
253 if (debugger_dabr_match(regs))
254 return;
255
256 /* Clear the DAC and struct entries. One shot trigger */
2325f0a0 257#if defined(CONFIG_BOOKE)
d6a61bfc
LM
258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
259 | DBCR0_IDM));
260#endif
261
262 /* Clear the DABR */
263 set_dabr(0);
264
265 /* Deliver the signal to userspace */
266 info.si_signo = SIGTRAP;
267 info.si_errno = 0;
268 info.si_code = TRAP_HWBKPT;
269 info.si_addr = (void __user *)address;
270 force_sig_info(SIGTRAP, &info, current);
271}
272
a2ceff5e
ME
273static DEFINE_PER_CPU(unsigned long, current_dabr);
274
14cf11af
PM
275int set_dabr(unsigned long dabr)
276{
a2ceff5e
ME
277 __get_cpu_var(current_dabr) = dabr;
278
cab0af98
ME
279 if (ppc_md.set_dabr)
280 return ppc_md.set_dabr(dabr);
14cf11af 281
791cc501
BH
282 /* XXX should we have a CPU_FTR_HAS_DABR ? */
283#if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
cab0af98 284 mtspr(SPRN_DABR, dabr);
791cc501 285#endif
d6a61bfc 286
2325f0a0 287#if defined(CONFIG_BOOKE)
d6a61bfc
LM
288 mtspr(SPRN_DAC1, dabr);
289#endif
290
cab0af98 291 return 0;
14cf11af
PM
292}
293
06d67d54
PM
294#ifdef CONFIG_PPC64
295DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
06d67d54 296#endif
14cf11af
PM
297
298struct task_struct *__switch_to(struct task_struct *prev,
299 struct task_struct *new)
300{
301 struct thread_struct *new_thread, *old_thread;
302 unsigned long flags;
303 struct task_struct *last;
304
305#ifdef CONFIG_SMP
306 /* avoid complexity of lazy save/restore of fpu
307 * by just saving it every time we switch out if
308 * this task used the fpu during the last quantum.
309 *
310 * If it tries to use the fpu again, it'll trap and
311 * reload its fp regs. So we don't have to do a restore
312 * every switch, just a save.
313 * -- Cort
314 */
315 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
316 giveup_fpu(prev);
317#ifdef CONFIG_ALTIVEC
318 /*
319 * If the previous thread used altivec in the last quantum
320 * (thus changing altivec regs) then save them.
321 * We used to check the VRSAVE register but not all apps
322 * set it, so we don't rely on it now (and in fact we need
323 * to save & restore VSCR even if VRSAVE == 0). -- paulus
324 *
325 * On SMP we always save/restore altivec regs just to avoid the
326 * complexity of changing processors.
327 * -- Cort
328 */
329 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
330 giveup_altivec(prev);
14cf11af 331#endif /* CONFIG_ALTIVEC */
ce48b210
MN
332#ifdef CONFIG_VSX
333 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
7c292170
MN
334 /* VMX and FPU registers are already save here */
335 __giveup_vsx(prev);
ce48b210 336#endif /* CONFIG_VSX */
14cf11af
PM
337#ifdef CONFIG_SPE
338 /*
339 * If the previous thread used spe in the last quantum
340 * (thus changing spe regs) then save them.
341 *
342 * On SMP we always save/restore spe regs just to avoid the
343 * complexity of changing processors.
344 */
345 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
346 giveup_spe(prev);
c0c0d996
PM
347#endif /* CONFIG_SPE */
348
349#else /* CONFIG_SMP */
350#ifdef CONFIG_ALTIVEC
351 /* Avoid the trap. On smp this this never happens since
352 * we don't set last_task_used_altivec -- Cort
353 */
354 if (new->thread.regs && last_task_used_altivec == new)
355 new->thread.regs->msr |= MSR_VEC;
356#endif /* CONFIG_ALTIVEC */
ce48b210
MN
357#ifdef CONFIG_VSX
358 if (new->thread.regs && last_task_used_vsx == new)
359 new->thread.regs->msr |= MSR_VSX;
360#endif /* CONFIG_VSX */
c0c0d996 361#ifdef CONFIG_SPE
14cf11af
PM
362 /* Avoid the trap. On smp this this never happens since
363 * we don't set last_task_used_spe
364 */
365 if (new->thread.regs && last_task_used_spe == new)
366 new->thread.regs->msr |= MSR_SPE;
367#endif /* CONFIG_SPE */
c0c0d996 368
14cf11af
PM
369#endif /* CONFIG_SMP */
370
a2ceff5e 371 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
14cf11af 372 set_dabr(new->thread.dabr);
14cf11af 373
2325f0a0 374#if defined(CONFIG_BOOKE)
d6a61bfc
LM
375 /* If new thread DAC (HW breakpoint) is the same then leave it */
376 if (new->thread.dabr)
377 set_dabr(new->thread.dabr);
378#endif
379
14cf11af
PM
380 new_thread = &new->thread;
381 old_thread = &current->thread;
06d67d54
PM
382
383#ifdef CONFIG_PPC64
384 /*
385 * Collect processor utilization data per process
386 */
387 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
388 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
389 long unsigned start_tb, current_tb;
390 start_tb = old_thread->start_tb;
391 cu->current_tb = current_tb = mfspr(SPRN_PURR);
392 old_thread->accum_tb += (current_tb - start_tb);
393 new_thread->start_tb = current_tb;
394 }
395#endif
396
14cf11af 397 local_irq_save(flags);
c6622f63
PM
398
399 account_system_vtime(current);
81a3843f 400 account_process_vtime(current);
c6622f63
PM
401 calculate_steal_time();
402
44387e9f
AB
403 /*
404 * We can't take a PMU exception inside _switch() since there is a
405 * window where the kernel stack SLB and the kernel stack are out
406 * of sync. Hard disable here.
407 */
408 hard_irq_disable();
14cf11af
PM
409 last = _switch(old_thread, new_thread);
410
411 local_irq_restore(flags);
412
413 return last;
414}
415
06d67d54
PM
416static int instructions_to_print = 16;
417
06d67d54
PM
418static void show_instructions(struct pt_regs *regs)
419{
420 int i;
421 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
422 sizeof(int));
423
424 printk("Instruction dump:");
425
426 for (i = 0; i < instructions_to_print; i++) {
427 int instr;
428
429 if (!(i % 8))
430 printk("\n");
431
0de2d820
SW
432#if !defined(CONFIG_BOOKE)
433 /* If executing with the IMMU off, adjust pc rather
434 * than print XXXXXXXX.
435 */
436 if (!(regs->msr & MSR_IR))
437 pc = (unsigned long)phys_to_virt(pc);
438#endif
439
af308377
SR
440 /* We use __get_user here *only* to avoid an OOPS on a
441 * bad address because the pc *should* only be a
442 * kernel address.
443 */
00ae36de
AB
444 if (!__kernel_text_address(pc) ||
445 __get_user(instr, (unsigned int __user *)pc)) {
06d67d54
PM
446 printk("XXXXXXXX ");
447 } else {
448 if (regs->nip == pc)
449 printk("<%08x> ", instr);
450 else
451 printk("%08x ", instr);
452 }
453
454 pc += sizeof(int);
455 }
456
457 printk("\n");
458}
459
460static struct regbit {
461 unsigned long bit;
462 const char *name;
463} msr_bits[] = {
464 {MSR_EE, "EE"},
465 {MSR_PR, "PR"},
466 {MSR_FP, "FP"},
ce48b210
MN
467 {MSR_VEC, "VEC"},
468 {MSR_VSX, "VSX"},
06d67d54 469 {MSR_ME, "ME"},
1b98326b
KG
470 {MSR_CE, "CE"},
471 {MSR_DE, "DE"},
06d67d54
PM
472 {MSR_IR, "IR"},
473 {MSR_DR, "DR"},
474 {0, NULL}
475};
476
477static void printbits(unsigned long val, struct regbit *bits)
478{
479 const char *sep = "";
480
481 printk("<");
482 for (; bits->bit; ++bits)
483 if (val & bits->bit) {
484 printk("%s%s", sep, bits->name);
485 sep = ",";
486 }
487 printk(">");
488}
489
490#ifdef CONFIG_PPC64
f6f7dde3 491#define REG "%016lx"
06d67d54
PM
492#define REGS_PER_LINE 4
493#define LAST_VOLATILE 13
494#else
f6f7dde3 495#define REG "%08lx"
06d67d54
PM
496#define REGS_PER_LINE 8
497#define LAST_VOLATILE 12
498#endif
499
14cf11af
PM
500void show_regs(struct pt_regs * regs)
501{
502 int i, trap;
503
06d67d54
PM
504 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
505 regs->nip, regs->link, regs->ctr);
506 printk("REGS: %p TRAP: %04lx %s (%s)\n",
96b644bd 507 regs, regs->trap, print_tainted(), init_utsname()->release);
06d67d54
PM
508 printk("MSR: "REG" ", regs->msr);
509 printbits(regs->msr, msr_bits);
f6f7dde3 510 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
14cf11af
PM
511 trap = TRAP(regs);
512 if (trap == 0x300 || trap == 0x600)
14170789
KG
513#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
514 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
515#else
06d67d54 516 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
14170789 517#endif
06d67d54 518 printk("TASK = %p[%d] '%s' THREAD: %p",
19c5870c 519 current, task_pid_nr(current), current->comm, task_thread_info(current));
14cf11af
PM
520
521#ifdef CONFIG_SMP
79ccd1be 522 printk(" CPU: %d", raw_smp_processor_id());
14cf11af
PM
523#endif /* CONFIG_SMP */
524
525 for (i = 0; i < 32; i++) {
06d67d54 526 if ((i % REGS_PER_LINE) == 0)
14cf11af 527 printk("\n" KERN_INFO "GPR%02d: ", i);
06d67d54
PM
528 printk(REG " ", regs->gpr[i]);
529 if (i == LAST_VOLATILE && !FULL_REGS(regs))
14cf11af
PM
530 break;
531 }
532 printk("\n");
533#ifdef CONFIG_KALLSYMS
534 /*
535 * Lookup NIP late so we have the best change of getting the
536 * above info out without failing
537 */
058c78f4
BH
538 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
539 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
14cf11af
PM
540#endif
541 show_stack(current, (unsigned long *) regs->gpr[1]);
06d67d54
PM
542 if (!user_mode(regs))
543 show_instructions(regs);
14cf11af
PM
544}
545
546void exit_thread(void)
547{
48abec07 548 discard_lazy_cpu_state();
14cf11af
PM
549}
550
551void flush_thread(void)
552{
06d67d54
PM
553#ifdef CONFIG_PPC64
554 struct thread_info *t = current_thread_info();
555
f144e7c7
MD
556 if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
557 clear_ti_thread_flag(t, TIF_ABI_PENDING);
558 if (test_ti_thread_flag(t, TIF_32BIT))
559 clear_ti_thread_flag(t, TIF_32BIT);
560 else
561 set_ti_thread_flag(t, TIF_32BIT);
562 }
06d67d54 563#endif
06d67d54 564
48abec07 565 discard_lazy_cpu_state();
14cf11af 566
14cf11af
PM
567 if (current->thread.dabr) {
568 current->thread.dabr = 0;
569 set_dabr(0);
d6a61bfc 570
2325f0a0 571#if defined(CONFIG_BOOKE)
d6a61bfc
LM
572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
573#endif
14cf11af 574 }
14cf11af
PM
575}
576
577void
578release_thread(struct task_struct *t)
579{
580}
581
582/*
583 * This gets called before we allocate a new thread and copy
584 * the current task into it.
585 */
586void prepare_to_copy(struct task_struct *tsk)
587{
588 flush_fp_to_thread(current);
589 flush_altivec_to_thread(current);
ce48b210 590 flush_vsx_to_thread(current);
14cf11af
PM
591 flush_spe_to_thread(current);
592}
593
594/*
595 * Copy a thread..
596 */
06d67d54
PM
597int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
598 unsigned long unused, struct task_struct *p,
599 struct pt_regs *regs)
14cf11af
PM
600{
601 struct pt_regs *childregs, *kregs;
602 extern void ret_from_fork(void);
0cec6fd1 603 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
14cf11af
PM
604
605 CHECK_FULL_REGS(regs);
606 /* Copy registers */
607 sp -= sizeof(struct pt_regs);
608 childregs = (struct pt_regs *) sp;
609 *childregs = *regs;
610 if ((childregs->msr & MSR_PR) == 0) {
611 /* for kernel thread, set `current' and stackptr in new task */
612 childregs->gpr[1] = sp + sizeof(struct pt_regs);
06d67d54 613#ifdef CONFIG_PPC32
14cf11af 614 childregs->gpr[2] = (unsigned long) p;
06d67d54 615#else
b5e2fc1c 616 clear_tsk_thread_flag(p, TIF_32BIT);
06d67d54 617#endif
14cf11af
PM
618 p->thread.regs = NULL; /* no user register state */
619 } else {
620 childregs->gpr[1] = usp;
621 p->thread.regs = childregs;
06d67d54
PM
622 if (clone_flags & CLONE_SETTLS) {
623#ifdef CONFIG_PPC64
624 if (!test_thread_flag(TIF_32BIT))
625 childregs->gpr[13] = childregs->gpr[6];
626 else
627#endif
628 childregs->gpr[2] = childregs->gpr[6];
629 }
14cf11af
PM
630 }
631 childregs->gpr[3] = 0; /* Result from fork() */
632 sp -= STACK_FRAME_OVERHEAD;
14cf11af
PM
633
634 /*
635 * The way this works is that at some point in the future
636 * some task will call _switch to switch to the new task.
637 * That will pop off the stack frame created below and start
638 * the new task running at ret_from_fork. The new task will
639 * do some house keeping and then return from the fork or clone
640 * system call, using the stack frame created above.
641 */
642 sp -= sizeof(struct pt_regs);
643 kregs = (struct pt_regs *) sp;
644 sp -= STACK_FRAME_OVERHEAD;
645 p->thread.ksp = sp;
85218827
KG
646 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
647 _ALIGN_UP(sizeof(struct thread_info), 16);
14cf11af 648
06d67d54
PM
649#ifdef CONFIG_PPC64
650 if (cpu_has_feature(CPU_FTR_SLB)) {
1189be65 651 unsigned long sp_vsid;
3c726f8d 652 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
06d67d54 653
1189be65
PM
654 if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
655 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
656 << SLB_VSID_SHIFT_1T;
657 else
658 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
659 << SLB_VSID_SHIFT;
3c726f8d 660 sp_vsid |= SLB_VSID_KERNEL | llp;
06d67d54
PM
661 p->thread.ksp_vsid = sp_vsid;
662 }
663
664 /*
665 * The PPC64 ABI makes use of a TOC to contain function
666 * pointers. The function (ret_from_except) is actually a pointer
667 * to the TOC entry. The first entry is a pointer to the actual
668 * function.
669 */
670 kregs->nip = *((unsigned long *)ret_from_fork);
671#else
672 kregs->nip = (unsigned long)ret_from_fork;
06d67d54 673#endif
14cf11af
PM
674
675 return 0;
676}
677
678/*
679 * Set up a thread for executing a new program
680 */
06d67d54 681void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
14cf11af 682{
90eac727
ME
683#ifdef CONFIG_PPC64
684 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
685#endif
686
14cf11af 687 set_fs(USER_DS);
06d67d54
PM
688
689 /*
690 * If we exec out of a kernel thread then thread.regs will not be
691 * set. Do it now.
692 */
693 if (!current->thread.regs) {
0cec6fd1
AV
694 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
695 current->thread.regs = regs - 1;
06d67d54
PM
696 }
697
14cf11af
PM
698 memset(regs->gpr, 0, sizeof(regs->gpr));
699 regs->ctr = 0;
700 regs->link = 0;
701 regs->xer = 0;
702 regs->ccr = 0;
14cf11af 703 regs->gpr[1] = sp;
06d67d54 704
474f8196
RM
705 /*
706 * We have just cleared all the nonvolatile GPRs, so make
707 * FULL_REGS(regs) return true. This is necessary to allow
708 * ptrace to examine the thread immediately after exec.
709 */
710 regs->trap &= ~1UL;
711
06d67d54
PM
712#ifdef CONFIG_PPC32
713 regs->mq = 0;
714 regs->nip = start;
14cf11af 715 regs->msr = MSR_USER;
06d67d54 716#else
d4bf9a78 717 if (!test_thread_flag(TIF_32BIT)) {
90eac727 718 unsigned long entry, toc;
06d67d54
PM
719
720 /* start is a relocated pointer to the function descriptor for
721 * the elf _start routine. The first entry in the function
722 * descriptor is the entry address of _start and the second
723 * entry is the TOC value we need to use.
724 */
725 __get_user(entry, (unsigned long __user *)start);
726 __get_user(toc, (unsigned long __user *)start+1);
727
728 /* Check whether the e_entry function descriptor entries
729 * need to be relocated before we can use them.
730 */
731 if (load_addr != 0) {
732 entry += load_addr;
733 toc += load_addr;
734 }
735 regs->nip = entry;
736 regs->gpr[2] = toc;
737 regs->msr = MSR_USER64;
d4bf9a78
SR
738 } else {
739 regs->nip = start;
740 regs->gpr[2] = 0;
741 regs->msr = MSR_USER32;
06d67d54
PM
742 }
743#endif
744
48abec07 745 discard_lazy_cpu_state();
ce48b210
MN
746#ifdef CONFIG_VSX
747 current->thread.used_vsr = 0;
748#endif
14cf11af 749 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
25c8a78b 750 current->thread.fpscr.val = 0;
14cf11af
PM
751#ifdef CONFIG_ALTIVEC
752 memset(current->thread.vr, 0, sizeof(current->thread.vr));
753 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
06d67d54 754 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
14cf11af
PM
755 current->thread.vrsave = 0;
756 current->thread.used_vr = 0;
757#endif /* CONFIG_ALTIVEC */
758#ifdef CONFIG_SPE
759 memset(current->thread.evr, 0, sizeof(current->thread.evr));
760 current->thread.acc = 0;
761 current->thread.spefscr = 0;
762 current->thread.used_spe = 0;
763#endif /* CONFIG_SPE */
764}
765
766#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
767 | PR_FP_EXC_RES | PR_FP_EXC_INV)
768
769int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
770{
771 struct pt_regs *regs = tsk->thread.regs;
772
773 /* This is a bit hairy. If we are an SPE enabled processor
774 * (have embedded fp) we store the IEEE exception enable flags in
775 * fpexc_mode. fpexc_mode is also used for setting FP exception
776 * mode (asyn, precise, disabled) for 'Classic' FP. */
777 if (val & PR_FP_EXC_SW_ENABLE) {
778#ifdef CONFIG_SPE
5e14d21e
KG
779 if (cpu_has_feature(CPU_FTR_SPE)) {
780 tsk->thread.fpexc_mode = val &
781 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
782 return 0;
783 } else {
784 return -EINVAL;
785 }
14cf11af
PM
786#else
787 return -EINVAL;
788#endif
14cf11af 789 }
06d67d54
PM
790
791 /* on a CONFIG_SPE this does not hurt us. The bits that
792 * __pack_fe01 use do not overlap with bits used for
793 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
794 * on CONFIG_SPE implementations are reserved so writing to
795 * them does not change anything */
796 if (val > PR_FP_EXC_PRECISE)
797 return -EINVAL;
798 tsk->thread.fpexc_mode = __pack_fe01(val);
799 if (regs != NULL && (regs->msr & MSR_FP) != 0)
800 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
801 | tsk->thread.fpexc_mode;
14cf11af
PM
802 return 0;
803}
804
805int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
806{
807 unsigned int val;
808
809 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
810#ifdef CONFIG_SPE
5e14d21e
KG
811 if (cpu_has_feature(CPU_FTR_SPE))
812 val = tsk->thread.fpexc_mode;
813 else
814 return -EINVAL;
14cf11af
PM
815#else
816 return -EINVAL;
817#endif
818 else
819 val = __unpack_fe01(tsk->thread.fpexc_mode);
820 return put_user(val, (unsigned int __user *) adr);
821}
822
fab5db97
PM
823int set_endian(struct task_struct *tsk, unsigned int val)
824{
825 struct pt_regs *regs = tsk->thread.regs;
826
827 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
828 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
829 return -EINVAL;
830
831 if (regs == NULL)
832 return -EINVAL;
833
834 if (val == PR_ENDIAN_BIG)
835 regs->msr &= ~MSR_LE;
836 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
837 regs->msr |= MSR_LE;
838 else
839 return -EINVAL;
840
841 return 0;
842}
843
844int get_endian(struct task_struct *tsk, unsigned long adr)
845{
846 struct pt_regs *regs = tsk->thread.regs;
847 unsigned int val;
848
849 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
850 !cpu_has_feature(CPU_FTR_REAL_LE))
851 return -EINVAL;
852
853 if (regs == NULL)
854 return -EINVAL;
855
856 if (regs->msr & MSR_LE) {
857 if (cpu_has_feature(CPU_FTR_REAL_LE))
858 val = PR_ENDIAN_LITTLE;
859 else
860 val = PR_ENDIAN_PPC_LITTLE;
861 } else
862 val = PR_ENDIAN_BIG;
863
864 return put_user(val, (unsigned int __user *)adr);
865}
866
e9370ae1
PM
867int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
868{
869 tsk->thread.align_ctl = val;
870 return 0;
871}
872
873int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
874{
875 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
876}
877
06d67d54
PM
878#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
879
14cf11af
PM
880int sys_clone(unsigned long clone_flags, unsigned long usp,
881 int __user *parent_tidp, void __user *child_threadptr,
882 int __user *child_tidp, int p6,
883 struct pt_regs *regs)
884{
885 CHECK_FULL_REGS(regs);
886 if (usp == 0)
887 usp = regs->gpr[1]; /* stack pointer for child */
06d67d54
PM
888#ifdef CONFIG_PPC64
889 if (test_thread_flag(TIF_32BIT)) {
890 parent_tidp = TRUNC_PTR(parent_tidp);
891 child_tidp = TRUNC_PTR(child_tidp);
892 }
893#endif
14cf11af
PM
894 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
895}
896
897int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
898 unsigned long p4, unsigned long p5, unsigned long p6,
899 struct pt_regs *regs)
900{
901 CHECK_FULL_REGS(regs);
902 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
903}
904
905int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
906 unsigned long p4, unsigned long p5, unsigned long p6,
907 struct pt_regs *regs)
908{
909 CHECK_FULL_REGS(regs);
910 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
911 regs, 0, NULL, NULL);
912}
913
914int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
915 unsigned long a3, unsigned long a4, unsigned long a5,
916 struct pt_regs *regs)
917{
918 int error;
06d67d54 919 char *filename;
14cf11af
PM
920
921 filename = getname((char __user *) a0);
922 error = PTR_ERR(filename);
923 if (IS_ERR(filename))
924 goto out;
925 flush_fp_to_thread(current);
926 flush_altivec_to_thread(current);
927 flush_spe_to_thread(current);
20c8c210
PM
928 error = do_execve(filename, (char __user * __user *) a1,
929 (char __user * __user *) a2, regs);
14cf11af
PM
930 putname(filename);
931out:
932 return error;
933}
934
bb72c481
PM
935#ifdef CONFIG_IRQSTACKS
936static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
937 unsigned long nbytes)
938{
939 unsigned long stack_page;
940 unsigned long cpu = task_cpu(p);
941
942 /*
943 * Avoid crashing if the stack has overflowed and corrupted
944 * task_cpu(p), which is in the thread_info struct.
945 */
946 if (cpu < NR_CPUS && cpu_possible(cpu)) {
947 stack_page = (unsigned long) hardirq_ctx[cpu];
948 if (sp >= stack_page + sizeof(struct thread_struct)
949 && sp <= stack_page + THREAD_SIZE - nbytes)
950 return 1;
951
952 stack_page = (unsigned long) softirq_ctx[cpu];
953 if (sp >= stack_page + sizeof(struct thread_struct)
954 && sp <= stack_page + THREAD_SIZE - nbytes)
955 return 1;
956 }
957 return 0;
958}
959
960#else
961#define valid_irq_stack(sp, p, nb) 0
962#endif /* CONFIG_IRQSTACKS */
963
2f25194d 964int validate_sp(unsigned long sp, struct task_struct *p,
14cf11af
PM
965 unsigned long nbytes)
966{
0cec6fd1 967 unsigned long stack_page = (unsigned long)task_stack_page(p);
14cf11af
PM
968
969 if (sp >= stack_page + sizeof(struct thread_struct)
970 && sp <= stack_page + THREAD_SIZE - nbytes)
971 return 1;
972
bb72c481 973 return valid_irq_stack(sp, p, nbytes);
14cf11af
PM
974}
975
2f25194d
AB
976EXPORT_SYMBOL(validate_sp);
977
14cf11af
PM
978unsigned long get_wchan(struct task_struct *p)
979{
980 unsigned long ip, sp;
981 int count = 0;
982
983 if (!p || p == current || p->state == TASK_RUNNING)
984 return 0;
985
986 sp = p->thread.ksp;
ec2b36b9 987 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
988 return 0;
989
990 do {
991 sp = *(unsigned long *)sp;
ec2b36b9 992 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
993 return 0;
994 if (count > 0) {
ec2b36b9 995 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
14cf11af
PM
996 if (!in_sched_functions(ip))
997 return ip;
998 }
999 } while (count++ < 16);
1000 return 0;
1001}
06d67d54
PM
1002
1003static int kstack_depth_to_print = 64;
1004
1005void show_stack(struct task_struct *tsk, unsigned long *stack)
1006{
1007 unsigned long sp, ip, lr, newsp;
1008 int count = 0;
1009 int firstframe = 1;
1010
1011 sp = (unsigned long) stack;
1012 if (tsk == NULL)
1013 tsk = current;
1014 if (sp == 0) {
1015 if (tsk == current)
1016 asm("mr %0,1" : "=r" (sp));
1017 else
1018 sp = tsk->thread.ksp;
1019 }
1020
1021 lr = 0;
1022 printk("Call Trace:\n");
1023 do {
ec2b36b9 1024 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
06d67d54
PM
1025 return;
1026
1027 stack = (unsigned long *) sp;
1028 newsp = stack[0];
ec2b36b9 1029 ip = stack[STACK_FRAME_LR_SAVE];
06d67d54 1030 if (!firstframe || ip != lr) {
058c78f4 1031 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
06d67d54
PM
1032 if (firstframe)
1033 printk(" (unreliable)");
1034 printk("\n");
1035 }
1036 firstframe = 0;
1037
1038 /*
1039 * See if this is an exception frame.
1040 * We look for the "regshere" marker in the current frame.
1041 */
ec2b36b9
BH
1042 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1043 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
06d67d54
PM
1044 struct pt_regs *regs = (struct pt_regs *)
1045 (sp + STACK_FRAME_OVERHEAD);
06d67d54 1046 lr = regs->link;
058c78f4
BH
1047 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1048 regs->trap, (void *)regs->nip, (void *)lr);
06d67d54
PM
1049 firstframe = 1;
1050 }
1051
1052 sp = newsp;
1053 } while (count++ < kstack_depth_to_print);
1054}
1055
1056void dump_stack(void)
1057{
1058 show_stack(current, NULL);
1059}
1060EXPORT_SYMBOL(dump_stack);
cb2c9b27
AB
1061
1062#ifdef CONFIG_PPC64
1063void ppc64_runlatch_on(void)
1064{
1065 unsigned long ctrl;
1066
1067 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
1068 HMT_medium();
1069
1070 ctrl = mfspr(SPRN_CTRLF);
1071 ctrl |= CTRL_RUNLATCH;
1072 mtspr(SPRN_CTRLT, ctrl);
1073
1074 set_thread_flag(TIF_RUNLATCH);
1075 }
1076}
1077
1078void ppc64_runlatch_off(void)
1079{
1080 unsigned long ctrl;
1081
1082 if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
1083 HMT_medium();
1084
1085 clear_thread_flag(TIF_RUNLATCH);
1086
1087 ctrl = mfspr(SPRN_CTRLF);
1088 ctrl &= ~CTRL_RUNLATCH;
1089 mtspr(SPRN_CTRLT, ctrl);
1090 }
1091}
1092#endif
f6a61680
BH
1093
1094#if THREAD_SHIFT < PAGE_SHIFT
1095
1096static struct kmem_cache *thread_info_cache;
1097
1098struct thread_info *alloc_thread_info(struct task_struct *tsk)
1099{
1100 struct thread_info *ti;
1101
1102 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
1103 if (unlikely(ti == NULL))
1104 return NULL;
1105#ifdef CONFIG_DEBUG_STACK_USAGE
1106 memset(ti, 0, THREAD_SIZE);
1107#endif
1108 return ti;
1109}
1110
1111void free_thread_info(struct thread_info *ti)
1112{
1113 kmem_cache_free(thread_info_cache, ti);
1114}
1115
1116void thread_info_cache_init(void)
1117{
1118 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1119 THREAD_SIZE, 0, NULL);
1120 BUG_ON(thread_info_cache == NULL);
1121}
1122
1123#endif /* THREAD_SHIFT < PAGE_SHIFT */