Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * arch/ppc/kernel/process.c | |
3 | * | |
4 | * Derived from "arch/i386/kernel/process.c" | |
5 | * Copyright (C) 1995 Linus Torvalds | |
6 | * | |
7 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | |
8 | * Paul Mackerras (paulus@cs.anu.edu.au) | |
9 | * | |
10 | * PowerPC version | |
11 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | ||
19 | #include <linux/config.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/smp_lock.h> | |
26 | #include <linux/stddef.h> | |
27 | #include <linux/unistd.h> | |
28 | #include <linux/ptrace.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/user.h> | |
31 | #include <linux/elf.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/prctl.h> | |
34 | #include <linux/init_task.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/kallsyms.h> | |
37 | #include <linux/mqueue.h> | |
38 | #include <linux/hardirq.h> | |
39 | ||
40 | #include <asm/pgtable.h> | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/system.h> | |
43 | #include <asm/io.h> | |
44 | #include <asm/processor.h> | |
45 | #include <asm/mmu.h> | |
46 | #include <asm/prom.h> | |
47 | ||
48 | extern unsigned long _get_SP(void); | |
49 | ||
50 | #ifndef CONFIG_SMP | |
51 | struct task_struct *last_task_used_math = NULL; | |
52 | struct task_struct *last_task_used_altivec = NULL; | |
53 | struct task_struct *last_task_used_spe = NULL; | |
54 | #endif | |
55 | ||
56 | static struct fs_struct init_fs = INIT_FS; | |
57 | static struct files_struct init_files = INIT_FILES; | |
58 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | |
59 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | |
60 | struct mm_struct init_mm = INIT_MM(init_mm); | |
61 | EXPORT_SYMBOL(init_mm); | |
62 | ||
63 | /* this is 8kB-aligned so we can get to the thread_info struct | |
64 | at the base of it from the stack pointer with 1 integer instruction. */ | |
65 | union thread_union init_thread_union | |
66 | __attribute__((__section__(".data.init_task"))) = | |
67 | { INIT_THREAD_INFO(init_task) }; | |
68 | ||
69 | /* initial task structure */ | |
70 | struct task_struct init_task = INIT_TASK(init_task); | |
71 | EXPORT_SYMBOL(init_task); | |
72 | ||
73 | /* only used to get secondary processor up */ | |
74 | struct task_struct *current_set[NR_CPUS] = {&init_task, }; | |
75 | ||
76 | /* | |
77 | * Make sure the floating-point register state in the | |
78 | * the thread_struct is up to date for task tsk. | |
79 | */ | |
80 | void flush_fp_to_thread(struct task_struct *tsk) | |
81 | { | |
82 | if (tsk->thread.regs) { | |
83 | /* | |
84 | * We need to disable preemption here because if we didn't, | |
85 | * another process could get scheduled after the regs->msr | |
86 | * test but before we have finished saving the FP registers | |
87 | * to the thread_struct. That process could take over the | |
88 | * FPU, and then when we get scheduled again we would store | |
89 | * bogus values for the remaining FP registers. | |
90 | */ | |
91 | preempt_disable(); | |
92 | if (tsk->thread.regs->msr & MSR_FP) { | |
93 | #ifdef CONFIG_SMP | |
94 | /* | |
95 | * This should only ever be called for current or | |
96 | * for a stopped child process. Since we save away | |
97 | * the FP register state on context switch on SMP, | |
98 | * there is something wrong if a stopped child appears | |
99 | * to still have its FP state in the CPU registers. | |
100 | */ | |
101 | BUG_ON(tsk != current); | |
102 | #endif | |
103 | giveup_fpu(current); | |
104 | } | |
105 | preempt_enable(); | |
106 | } | |
107 | } | |
108 | ||
109 | void enable_kernel_fp(void) | |
110 | { | |
111 | WARN_ON(preemptible()); | |
112 | ||
113 | #ifdef CONFIG_SMP | |
114 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | |
115 | giveup_fpu(current); | |
116 | else | |
117 | giveup_fpu(NULL); /* just enables FP for kernel */ | |
118 | #else | |
119 | giveup_fpu(last_task_used_math); | |
120 | #endif /* CONFIG_SMP */ | |
121 | } | |
122 | EXPORT_SYMBOL(enable_kernel_fp); | |
123 | ||
124 | int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | |
125 | { | |
126 | if (!tsk->thread.regs) | |
127 | return 0; | |
128 | flush_fp_to_thread(current); | |
129 | ||
130 | memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); | |
131 | ||
132 | return 1; | |
133 | } | |
134 | ||
135 | #ifdef CONFIG_ALTIVEC | |
136 | void enable_kernel_altivec(void) | |
137 | { | |
138 | WARN_ON(preemptible()); | |
139 | ||
140 | #ifdef CONFIG_SMP | |
141 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | |
142 | giveup_altivec(current); | |
143 | else | |
144 | giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ | |
145 | #else | |
146 | giveup_altivec(last_task_used_altivec); | |
147 | #endif /* CONFIG_SMP */ | |
148 | } | |
149 | EXPORT_SYMBOL(enable_kernel_altivec); | |
150 | ||
151 | /* | |
152 | * Make sure the VMX/Altivec register state in the | |
153 | * the thread_struct is up to date for task tsk. | |
154 | */ | |
155 | void flush_altivec_to_thread(struct task_struct *tsk) | |
156 | { | |
157 | if (tsk->thread.regs) { | |
158 | preempt_disable(); | |
159 | if (tsk->thread.regs->msr & MSR_VEC) { | |
160 | #ifdef CONFIG_SMP | |
161 | BUG_ON(tsk != current); | |
162 | #endif | |
163 | giveup_altivec(current); | |
164 | } | |
165 | preempt_enable(); | |
166 | } | |
167 | } | |
168 | ||
169 | int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) | |
170 | { | |
171 | flush_altivec_to_thread(current); | |
172 | memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); | |
173 | return 1; | |
174 | } | |
175 | #endif /* CONFIG_ALTIVEC */ | |
176 | ||
177 | #ifdef CONFIG_SPE | |
178 | ||
179 | void enable_kernel_spe(void) | |
180 | { | |
181 | WARN_ON(preemptible()); | |
182 | ||
183 | #ifdef CONFIG_SMP | |
184 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | |
185 | giveup_spe(current); | |
186 | else | |
187 | giveup_spe(NULL); /* just enable SPE for kernel - force */ | |
188 | #else | |
189 | giveup_spe(last_task_used_spe); | |
190 | #endif /* __SMP __ */ | |
191 | } | |
192 | EXPORT_SYMBOL(enable_kernel_spe); | |
193 | ||
194 | void flush_spe_to_thread(struct task_struct *tsk) | |
195 | { | |
196 | if (tsk->thread.regs) { | |
197 | preempt_disable(); | |
198 | if (tsk->thread.regs->msr & MSR_SPE) { | |
199 | #ifdef CONFIG_SMP | |
200 | BUG_ON(tsk != current); | |
201 | #endif | |
202 | giveup_spe(current); | |
203 | } | |
204 | preempt_enable(); | |
205 | } | |
206 | } | |
207 | ||
208 | int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | |
209 | { | |
210 | flush_spe_to_thread(current); | |
211 | /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ | |
212 | memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); | |
213 | return 1; | |
214 | } | |
215 | #endif /* CONFIG_SPE */ | |
216 | ||
217 | static void set_dabr_spr(unsigned long val) | |
218 | { | |
219 | mtspr(SPRN_DABR, val); | |
220 | } | |
221 | ||
222 | int set_dabr(unsigned long dabr) | |
223 | { | |
224 | int ret = 0; | |
225 | ||
226 | #ifdef CONFIG_PPC64 | |
227 | if (firmware_has_feature(FW_FEATURE_XDABR)) { | |
228 | /* We want to catch accesses from kernel and userspace */ | |
229 | unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER; | |
230 | ret = plpar_set_xdabr(dabr, flags); | |
231 | } else if (firmware_has_feature(FW_FEATURE_DABR)) { | |
232 | ret = plpar_set_dabr(dabr); | |
233 | } else | |
234 | #endif | |
235 | set_dabr_spr(dabr); | |
236 | ||
237 | return ret; | |
238 | } | |
239 | ||
240 | static DEFINE_PER_CPU(unsigned long, current_dabr); | |
241 | ||
242 | struct task_struct *__switch_to(struct task_struct *prev, | |
243 | struct task_struct *new) | |
244 | { | |
245 | struct thread_struct *new_thread, *old_thread; | |
246 | unsigned long flags; | |
247 | struct task_struct *last; | |
248 | ||
249 | #ifdef CONFIG_SMP | |
250 | /* avoid complexity of lazy save/restore of fpu | |
251 | * by just saving it every time we switch out if | |
252 | * this task used the fpu during the last quantum. | |
253 | * | |
254 | * If it tries to use the fpu again, it'll trap and | |
255 | * reload its fp regs. So we don't have to do a restore | |
256 | * every switch, just a save. | |
257 | * -- Cort | |
258 | */ | |
259 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | |
260 | giveup_fpu(prev); | |
261 | #ifdef CONFIG_ALTIVEC | |
262 | /* | |
263 | * If the previous thread used altivec in the last quantum | |
264 | * (thus changing altivec regs) then save them. | |
265 | * We used to check the VRSAVE register but not all apps | |
266 | * set it, so we don't rely on it now (and in fact we need | |
267 | * to save & restore VSCR even if VRSAVE == 0). -- paulus | |
268 | * | |
269 | * On SMP we always save/restore altivec regs just to avoid the | |
270 | * complexity of changing processors. | |
271 | * -- Cort | |
272 | */ | |
273 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | |
274 | giveup_altivec(prev); | |
275 | /* Avoid the trap. On smp this this never happens since | |
276 | * we don't set last_task_used_altivec -- Cort | |
277 | */ | |
278 | if (new->thread.regs && last_task_used_altivec == new) | |
279 | new->thread.regs->msr |= MSR_VEC; | |
280 | #endif /* CONFIG_ALTIVEC */ | |
281 | #ifdef CONFIG_SPE | |
282 | /* | |
283 | * If the previous thread used spe in the last quantum | |
284 | * (thus changing spe regs) then save them. | |
285 | * | |
286 | * On SMP we always save/restore spe regs just to avoid the | |
287 | * complexity of changing processors. | |
288 | */ | |
289 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | |
290 | giveup_spe(prev); | |
291 | /* Avoid the trap. On smp this this never happens since | |
292 | * we don't set last_task_used_spe | |
293 | */ | |
294 | if (new->thread.regs && last_task_used_spe == new) | |
295 | new->thread.regs->msr |= MSR_SPE; | |
296 | #endif /* CONFIG_SPE */ | |
297 | #endif /* CONFIG_SMP */ | |
298 | ||
299 | #ifdef CONFIG_PPC64 /* for now */ | |
300 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { | |
301 | set_dabr(new->thread.dabr); | |
302 | __get_cpu_var(current_dabr) = new->thread.dabr; | |
303 | } | |
304 | #endif | |
305 | ||
306 | new_thread = &new->thread; | |
307 | old_thread = ¤t->thread; | |
308 | local_irq_save(flags); | |
309 | last = _switch(old_thread, new_thread); | |
310 | ||
311 | local_irq_restore(flags); | |
312 | ||
313 | return last; | |
314 | } | |
315 | ||
316 | void show_regs(struct pt_regs * regs) | |
317 | { | |
318 | int i, trap; | |
319 | ||
320 | printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", | |
321 | regs->nip, regs->link, regs->gpr[1], regs, regs->trap, | |
322 | print_tainted()); | |
323 | printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", | |
324 | regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, | |
325 | regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, | |
326 | regs->msr&MSR_IR ? 1 : 0, | |
327 | regs->msr&MSR_DR ? 1 : 0); | |
328 | trap = TRAP(regs); | |
329 | if (trap == 0x300 || trap == 0x600) | |
330 | printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); | |
331 | printk("TASK = %p[%d] '%s' THREAD: %p\n", | |
332 | current, current->pid, current->comm, current->thread_info); | |
333 | printk("Last syscall: %ld ", current->thread.last_syscall); | |
334 | ||
335 | #ifdef CONFIG_SMP | |
336 | printk(" CPU: %d", smp_processor_id()); | |
337 | #endif /* CONFIG_SMP */ | |
338 | ||
339 | for (i = 0; i < 32; i++) { | |
340 | long r; | |
341 | if ((i % 8) == 0) | |
342 | printk("\n" KERN_INFO "GPR%02d: ", i); | |
343 | if (__get_user(r, ®s->gpr[i])) | |
344 | break; | |
345 | printk("%08lX ", r); | |
346 | if (i == 12 && !FULL_REGS(regs)) | |
347 | break; | |
348 | } | |
349 | printk("\n"); | |
350 | #ifdef CONFIG_KALLSYMS | |
351 | /* | |
352 | * Lookup NIP late so we have the best change of getting the | |
353 | * above info out without failing | |
354 | */ | |
355 | printk("NIP [%08lx] ", regs->nip); | |
356 | print_symbol("%s\n", regs->nip); | |
357 | printk("LR [%08lx] ", regs->link); | |
358 | print_symbol("%s\n", regs->link); | |
359 | #endif | |
360 | show_stack(current, (unsigned long *) regs->gpr[1]); | |
361 | } | |
362 | ||
363 | void exit_thread(void) | |
364 | { | |
365 | #ifndef CONFIG_SMP | |
366 | if (last_task_used_math == current) | |
367 | last_task_used_math = NULL; | |
368 | #ifdef CONFIG_ALTIVEC | |
369 | if (last_task_used_altivec == current) | |
370 | last_task_used_altivec = NULL; | |
371 | #endif /* CONFIG_ALTIVEC */ | |
372 | #ifdef CONFIG_SPE | |
373 | if (last_task_used_spe == current) | |
374 | last_task_used_spe = NULL; | |
375 | #endif | |
376 | #endif /* CONFIG_SMP */ | |
377 | } | |
378 | ||
379 | void flush_thread(void) | |
380 | { | |
381 | #ifndef CONFIG_SMP | |
382 | if (last_task_used_math == current) | |
383 | last_task_used_math = NULL; | |
384 | #ifdef CONFIG_ALTIVEC | |
385 | if (last_task_used_altivec == current) | |
386 | last_task_used_altivec = NULL; | |
387 | #endif /* CONFIG_ALTIVEC */ | |
388 | #ifdef CONFIG_SPE | |
389 | if (last_task_used_spe == current) | |
390 | last_task_used_spe = NULL; | |
391 | #endif | |
392 | #endif /* CONFIG_SMP */ | |
393 | ||
394 | #ifdef CONFIG_PPC64 /* for now */ | |
395 | if (current->thread.dabr) { | |
396 | current->thread.dabr = 0; | |
397 | set_dabr(0); | |
398 | } | |
399 | #endif | |
400 | } | |
401 | ||
402 | void | |
403 | release_thread(struct task_struct *t) | |
404 | { | |
405 | } | |
406 | ||
407 | /* | |
408 | * This gets called before we allocate a new thread and copy | |
409 | * the current task into it. | |
410 | */ | |
411 | void prepare_to_copy(struct task_struct *tsk) | |
412 | { | |
413 | flush_fp_to_thread(current); | |
414 | flush_altivec_to_thread(current); | |
415 | flush_spe_to_thread(current); | |
416 | } | |
417 | ||
418 | /* | |
419 | * Copy a thread.. | |
420 | */ | |
421 | int | |
422 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |
423 | unsigned long unused, | |
424 | struct task_struct *p, struct pt_regs *regs) | |
425 | { | |
426 | struct pt_regs *childregs, *kregs; | |
427 | extern void ret_from_fork(void); | |
428 | unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; | |
429 | unsigned long childframe; | |
430 | ||
431 | CHECK_FULL_REGS(regs); | |
432 | /* Copy registers */ | |
433 | sp -= sizeof(struct pt_regs); | |
434 | childregs = (struct pt_regs *) sp; | |
435 | *childregs = *regs; | |
436 | if ((childregs->msr & MSR_PR) == 0) { | |
437 | /* for kernel thread, set `current' and stackptr in new task */ | |
438 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | |
439 | childregs->gpr[2] = (unsigned long) p; | |
440 | p->thread.regs = NULL; /* no user register state */ | |
441 | } else { | |
442 | childregs->gpr[1] = usp; | |
443 | p->thread.regs = childregs; | |
444 | if (clone_flags & CLONE_SETTLS) | |
445 | childregs->gpr[2] = childregs->gpr[6]; | |
446 | } | |
447 | childregs->gpr[3] = 0; /* Result from fork() */ | |
448 | sp -= STACK_FRAME_OVERHEAD; | |
449 | childframe = sp; | |
450 | ||
451 | /* | |
452 | * The way this works is that at some point in the future | |
453 | * some task will call _switch to switch to the new task. | |
454 | * That will pop off the stack frame created below and start | |
455 | * the new task running at ret_from_fork. The new task will | |
456 | * do some house keeping and then return from the fork or clone | |
457 | * system call, using the stack frame created above. | |
458 | */ | |
459 | sp -= sizeof(struct pt_regs); | |
460 | kregs = (struct pt_regs *) sp; | |
461 | sp -= STACK_FRAME_OVERHEAD; | |
462 | p->thread.ksp = sp; | |
463 | kregs->nip = (unsigned long)ret_from_fork; | |
464 | ||
465 | p->thread.last_syscall = -1; | |
466 | ||
467 | return 0; | |
468 | } | |
469 | ||
470 | /* | |
471 | * Set up a thread for executing a new program | |
472 | */ | |
473 | void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) | |
474 | { | |
475 | set_fs(USER_DS); | |
476 | memset(regs->gpr, 0, sizeof(regs->gpr)); | |
477 | regs->ctr = 0; | |
478 | regs->link = 0; | |
479 | regs->xer = 0; | |
480 | regs->ccr = 0; | |
481 | regs->mq = 0; | |
482 | regs->nip = nip; | |
483 | regs->gpr[1] = sp; | |
484 | regs->msr = MSR_USER; | |
485 | #ifndef CONFIG_SMP | |
486 | if (last_task_used_math == current) | |
487 | last_task_used_math = NULL; | |
488 | #ifdef CONFIG_ALTIVEC | |
489 | if (last_task_used_altivec == current) | |
490 | last_task_used_altivec = NULL; | |
491 | #endif | |
492 | #ifdef CONFIG_SPE | |
493 | if (last_task_used_spe == current) | |
494 | last_task_used_spe = NULL; | |
495 | #endif | |
496 | #endif /* CONFIG_SMP */ | |
497 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | |
498 | current->thread.fpscr = 0; | |
499 | #ifdef CONFIG_ALTIVEC | |
500 | memset(current->thread.vr, 0, sizeof(current->thread.vr)); | |
501 | memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); | |
502 | current->thread.vrsave = 0; | |
503 | current->thread.used_vr = 0; | |
504 | #endif /* CONFIG_ALTIVEC */ | |
505 | #ifdef CONFIG_SPE | |
506 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); | |
507 | current->thread.acc = 0; | |
508 | current->thread.spefscr = 0; | |
509 | current->thread.used_spe = 0; | |
510 | #endif /* CONFIG_SPE */ | |
511 | } | |
512 | ||
513 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | |
514 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | |
515 | ||
516 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | |
517 | { | |
518 | struct pt_regs *regs = tsk->thread.regs; | |
519 | ||
520 | /* This is a bit hairy. If we are an SPE enabled processor | |
521 | * (have embedded fp) we store the IEEE exception enable flags in | |
522 | * fpexc_mode. fpexc_mode is also used for setting FP exception | |
523 | * mode (asyn, precise, disabled) for 'Classic' FP. */ | |
524 | if (val & PR_FP_EXC_SW_ENABLE) { | |
525 | #ifdef CONFIG_SPE | |
526 | tsk->thread.fpexc_mode = val & | |
527 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | |
528 | #else | |
529 | return -EINVAL; | |
530 | #endif | |
531 | } else { | |
532 | /* on a CONFIG_SPE this does not hurt us. The bits that | |
533 | * __pack_fe01 use do not overlap with bits used for | |
534 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits | |
535 | * on CONFIG_SPE implementations are reserved so writing to | |
536 | * them does not change anything */ | |
537 | if (val > PR_FP_EXC_PRECISE) | |
538 | return -EINVAL; | |
539 | tsk->thread.fpexc_mode = __pack_fe01(val); | |
540 | if (regs != NULL && (regs->msr & MSR_FP) != 0) | |
541 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | |
542 | | tsk->thread.fpexc_mode; | |
543 | } | |
544 | return 0; | |
545 | } | |
546 | ||
547 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | |
548 | { | |
549 | unsigned int val; | |
550 | ||
551 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | |
552 | #ifdef CONFIG_SPE | |
553 | val = tsk->thread.fpexc_mode; | |
554 | #else | |
555 | return -EINVAL; | |
556 | #endif | |
557 | else | |
558 | val = __unpack_fe01(tsk->thread.fpexc_mode); | |
559 | return put_user(val, (unsigned int __user *) adr); | |
560 | } | |
561 | ||
562 | int sys_clone(unsigned long clone_flags, unsigned long usp, | |
563 | int __user *parent_tidp, void __user *child_threadptr, | |
564 | int __user *child_tidp, int p6, | |
565 | struct pt_regs *regs) | |
566 | { | |
567 | CHECK_FULL_REGS(regs); | |
568 | if (usp == 0) | |
569 | usp = regs->gpr[1]; /* stack pointer for child */ | |
570 | return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); | |
571 | } | |
572 | ||
573 | int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, | |
574 | unsigned long p4, unsigned long p5, unsigned long p6, | |
575 | struct pt_regs *regs) | |
576 | { | |
577 | CHECK_FULL_REGS(regs); | |
578 | return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); | |
579 | } | |
580 | ||
581 | int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, | |
582 | unsigned long p4, unsigned long p5, unsigned long p6, | |
583 | struct pt_regs *regs) | |
584 | { | |
585 | CHECK_FULL_REGS(regs); | |
586 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], | |
587 | regs, 0, NULL, NULL); | |
588 | } | |
589 | ||
590 | int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, | |
591 | unsigned long a3, unsigned long a4, unsigned long a5, | |
592 | struct pt_regs *regs) | |
593 | { | |
594 | int error; | |
595 | char * filename; | |
596 | ||
597 | filename = getname((char __user *) a0); | |
598 | error = PTR_ERR(filename); | |
599 | if (IS_ERR(filename)) | |
600 | goto out; | |
601 | flush_fp_to_thread(current); | |
602 | flush_altivec_to_thread(current); | |
603 | flush_spe_to_thread(current); | |
20c8c210 PM |
604 | error = do_execve(filename, (char __user * __user *) a1, |
605 | (char __user * __user *) a2, regs); | |
14cf11af PM |
606 | if (error == 0) { |
607 | task_lock(current); | |
608 | current->ptrace &= ~PT_DTRACE; | |
609 | task_unlock(current); | |
610 | } | |
611 | putname(filename); | |
612 | out: | |
613 | return error; | |
614 | } | |
615 | ||
616 | static int validate_sp(unsigned long sp, struct task_struct *p, | |
617 | unsigned long nbytes) | |
618 | { | |
619 | unsigned long stack_page = (unsigned long)p->thread_info; | |
620 | ||
621 | if (sp >= stack_page + sizeof(struct thread_struct) | |
622 | && sp <= stack_page + THREAD_SIZE - nbytes) | |
623 | return 1; | |
624 | ||
625 | #ifdef CONFIG_IRQSTACKS | |
626 | stack_page = (unsigned long) hardirq_ctx[task_cpu(p)]; | |
627 | if (sp >= stack_page + sizeof(struct thread_struct) | |
628 | && sp <= stack_page + THREAD_SIZE - nbytes) | |
629 | return 1; | |
630 | ||
631 | stack_page = (unsigned long) softirq_ctx[task_cpu(p)]; | |
632 | if (sp >= stack_page + sizeof(struct thread_struct) | |
633 | && sp <= stack_page + THREAD_SIZE - nbytes) | |
634 | return 1; | |
635 | #endif | |
636 | ||
637 | return 0; | |
638 | } | |
639 | ||
640 | void dump_stack(void) | |
641 | { | |
642 | show_stack(current, NULL); | |
643 | } | |
644 | ||
645 | EXPORT_SYMBOL(dump_stack); | |
646 | ||
647 | void show_stack(struct task_struct *tsk, unsigned long *stack) | |
648 | { | |
649 | unsigned long sp, stack_top, prev_sp, ret; | |
650 | int count = 0; | |
651 | unsigned long next_exc = 0; | |
652 | struct pt_regs *regs; | |
653 | extern char ret_from_except, ret_from_except_full, ret_from_syscall; | |
654 | ||
655 | sp = (unsigned long) stack; | |
656 | if (tsk == NULL) | |
657 | tsk = current; | |
658 | if (sp == 0) { | |
659 | if (tsk == current) | |
660 | asm("mr %0,1" : "=r" (sp)); | |
661 | else | |
662 | sp = tsk->thread.ksp; | |
663 | } | |
664 | ||
665 | prev_sp = (unsigned long) (tsk->thread_info + 1); | |
666 | stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE; | |
667 | while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { | |
668 | if (count == 0) { | |
669 | printk("Call trace:"); | |
670 | #ifdef CONFIG_KALLSYMS | |
671 | printk("\n"); | |
672 | #endif | |
673 | } else { | |
674 | if (next_exc) { | |
675 | ret = next_exc; | |
676 | next_exc = 0; | |
677 | } else | |
678 | ret = *(unsigned long *)(sp + 4); | |
679 | printk(" [%08lx] ", ret); | |
680 | #ifdef CONFIG_KALLSYMS | |
681 | print_symbol("%s", ret); | |
682 | printk("\n"); | |
683 | #endif | |
684 | if (ret == (unsigned long) &ret_from_except | |
685 | || ret == (unsigned long) &ret_from_except_full | |
686 | || ret == (unsigned long) &ret_from_syscall) { | |
687 | /* sp + 16 points to an exception frame */ | |
688 | regs = (struct pt_regs *) (sp + 16); | |
689 | if (sp + 16 + sizeof(*regs) <= stack_top) | |
690 | next_exc = regs->nip; | |
691 | } | |
692 | } | |
693 | ++count; | |
694 | sp = *(unsigned long *)sp; | |
695 | } | |
696 | #ifndef CONFIG_KALLSYMS | |
697 | if (count > 0) | |
698 | printk("\n"); | |
699 | #endif | |
700 | } | |
701 | ||
702 | unsigned long get_wchan(struct task_struct *p) | |
703 | { | |
704 | unsigned long ip, sp; | |
705 | int count = 0; | |
706 | ||
707 | if (!p || p == current || p->state == TASK_RUNNING) | |
708 | return 0; | |
709 | ||
710 | sp = p->thread.ksp; | |
711 | if (!validate_sp(sp, p, 16)) | |
712 | return 0; | |
713 | ||
714 | do { | |
715 | sp = *(unsigned long *)sp; | |
716 | if (!validate_sp(sp, p, 16)) | |
717 | return 0; | |
718 | if (count > 0) { | |
719 | ip = *(unsigned long *)(sp + 4); | |
720 | if (!in_sched_functions(ip)) | |
721 | return ip; | |
722 | } | |
723 | } while (count++ < 16); | |
724 | return 0; | |
725 | } | |
726 | EXPORT_SYMBOL(get_wchan); |