ptrace: Collapse ptrace_untrace() into __ptrace_unlink()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / ptrace.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
c59ede7b 10#include <linux/capability.h>
1da177e4
LT
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
1da177e4
LT
17#include <linux/ptrace.h>
18#include <linux/security.h>
7ed20e1a 19#include <linux/signal.h>
a5cb013d 20#include <linux/audit.h>
b488893a 21#include <linux/pid_namespace.h>
f17d30a8 22#include <linux/syscalls.h>
3a709703 23#include <linux/uaccess.h>
2225a122 24#include <linux/regset.h>
1da177e4 25
bf53de90 26
1da177e4
LT
27/*
28 * ptrace a task: make the debugger its new parent and
29 * move it to the ptrace list.
30 *
31 * Must be called with the tasklist lock write-held.
32 */
36c8b586 33void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
1da177e4 34{
f470021a
RM
35 BUG_ON(!list_empty(&child->ptrace_entry));
36 list_add(&child->ptrace_entry, &new_parent->ptraced);
1da177e4 37 child->parent = new_parent;
1da177e4 38}
3a709703 39
e3bd058f
TH
40/**
41 * __ptrace_unlink - unlink ptracee and restore its execution state
42 * @child: ptracee to be unlinked
43 *
44 * Remove @child from the ptrace list, move it back to the original parent.
45 *
46 * CONTEXT:
47 * write_lock_irq(tasklist_lock)
1da177e4 48 */
e3bd058f 49void __ptrace_unlink(struct task_struct *child)
1da177e4 50{
e3bd058f
TH
51 BUG_ON(!child->ptrace);
52
53 child->ptrace = 0;
54 child->parent = child->real_parent;
55 list_del_init(&child->ptrace_entry);
56
1da177e4 57 spin_lock(&child->sighand->siglock);
6618a3e2 58 if (task_is_traced(child)) {
1ee11844 59 /*
d79fdd6d
TH
60 * If group stop is completed or in progress, it should
61 * participate in the group stop. Set GROUP_STOP_PENDING
62 * before kicking it.
63 *
64 * This involves TRACED -> RUNNING -> STOPPED transition
65 * which is similar to but in the opposite direction of
66 * what happens while attaching to a stopped task.
67 * However, in this direction, the intermediate RUNNING
68 * state is not hidden even from the current ptracer and if
69 * it immediately re-attaches and performs a WNOHANG
70 * wait(2), it may fail.
1ee11844
ON
71 */
72 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
73 child->signal->group_stop_count)
d79fdd6d
TH
74 child->group_stop |= GROUP_STOP_PENDING;
75 signal_wake_up(child, 1);
1da177e4
LT
76 }
77 spin_unlock(&child->sighand->siglock);
78}
79
1da177e4
LT
80/*
81 * Check that we have indeed attached to the thing..
82 */
83int ptrace_check_attach(struct task_struct *child, int kill)
84{
85 int ret = -ESRCH;
86
87 /*
88 * We take the read lock around doing both checks to close a
89 * possible race where someone else was tracing our child and
90 * detached between these two checks. After this locked check,
91 * we are sure that this is our traced child and that can only
92 * be changed by us so it's not changing right after this.
93 */
94 read_lock(&tasklist_lock);
c0c0b649 95 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
1da177e4 96 ret = 0;
c0c0b649
ON
97 /*
98 * child->sighand can't be NULL, release_task()
99 * does ptrace_unlink() before __exit_signal().
100 */
1da177e4 101 spin_lock_irq(&child->sighand->siglock);
d9ae90ac 102 if (task_is_stopped(child))
1da177e4 103 child->state = TASK_TRACED;
d9ae90ac 104 else if (!task_is_traced(child) && !kill)
1da177e4 105 ret = -ESRCH;
1da177e4
LT
106 spin_unlock_irq(&child->sighand->siglock);
107 }
108 read_unlock(&tasklist_lock);
109
d9ae90ac 110 if (!ret && !kill)
85ba2d86 111 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
1da177e4
LT
112
113 /* All systems go.. */
114 return ret;
115}
116
006ebb40 117int __ptrace_may_access(struct task_struct *task, unsigned int mode)
ab8d11be 118{
c69e8d9c 119 const struct cred *cred = current_cred(), *tcred;
b6dff3ec 120
df26c40e
EB
121 /* May we inspect the given task?
122 * This check is used both for attaching with ptrace
123 * and for allowing access to sensitive information in /proc.
124 *
125 * ptrace_attach denies several cases that /proc allows
126 * because setting up the necessary parent/child relationship
127 * or halting the specified task is impossible.
128 */
129 int dumpable = 0;
130 /* Don't let security modules deny introspection */
131 if (task == current)
132 return 0;
c69e8d9c
DH
133 rcu_read_lock();
134 tcred = __task_cred(task);
135 if ((cred->uid != tcred->euid ||
136 cred->uid != tcred->suid ||
137 cred->uid != tcred->uid ||
138 cred->gid != tcred->egid ||
139 cred->gid != tcred->sgid ||
140 cred->gid != tcred->gid) &&
141 !capable(CAP_SYS_PTRACE)) {
142 rcu_read_unlock();
ab8d11be 143 return -EPERM;
c69e8d9c
DH
144 }
145 rcu_read_unlock();
ab8d11be 146 smp_rmb();
df26c40e 147 if (task->mm)
6c5d5238 148 dumpable = get_dumpable(task->mm);
df26c40e 149 if (!dumpable && !capable(CAP_SYS_PTRACE))
ab8d11be
MS
150 return -EPERM;
151
9e48858f 152 return security_ptrace_access_check(task, mode);
ab8d11be
MS
153}
154
006ebb40 155bool ptrace_may_access(struct task_struct *task, unsigned int mode)
ab8d11be
MS
156{
157 int err;
158 task_lock(task);
006ebb40 159 err = __ptrace_may_access(task, mode);
ab8d11be 160 task_unlock(task);
3a709703 161 return !err;
ab8d11be
MS
162}
163
e3e89cc5 164static int ptrace_attach(struct task_struct *task)
1da177e4 165{
d79fdd6d 166 bool wait_trap = false;
1da177e4 167 int retval;
f5b40e36 168
a5cb013d
AV
169 audit_ptrace(task);
170
1da177e4 171 retval = -EPERM;
b79b7ba9
ON
172 if (unlikely(task->flags & PF_KTHREAD))
173 goto out;
bac0abd6 174 if (same_thread_group(task, current))
f5b40e36
LT
175 goto out;
176
f2f0b00a
ON
177 /*
178 * Protect exec's credential calculations against our interference;
5e751e99
DH
179 * interference; SUID, SGID and LSM creds get determined differently
180 * under ptrace.
d84f4f99 181 */
793285fc 182 retval = -ERESTARTNOINTR;
9b1bf12d 183 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
d84f4f99 184 goto out;
f5b40e36 185
4b105cbb 186 task_lock(task);
006ebb40 187 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
4b105cbb 188 task_unlock(task);
1da177e4 189 if (retval)
4b105cbb 190 goto unlock_creds;
1da177e4 191
4b105cbb 192 write_lock_irq(&tasklist_lock);
b79b7ba9
ON
193 retval = -EPERM;
194 if (unlikely(task->exit_state))
4b105cbb 195 goto unlock_tasklist;
f2f0b00a 196 if (task->ptrace)
4b105cbb 197 goto unlock_tasklist;
b79b7ba9 198
f2f0b00a 199 task->ptrace = PT_PTRACED;
1da177e4
LT
200 if (capable(CAP_SYS_PTRACE))
201 task->ptrace |= PT_PTRACE_CAP;
1da177e4 202
1da177e4 203 __ptrace_link(task, current);
33e9fc7d 204 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
b79b7ba9 205
d79fdd6d
TH
206 spin_lock(&task->sighand->siglock);
207
208 /*
209 * If the task is already STOPPED, set GROUP_STOP_PENDING and
210 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
211 * will be cleared if the child completes the transition or any
212 * event which clears the group stop states happens. We'll wait
213 * for the transition to complete before returning from this
214 * function.
215 *
216 * This hides STOPPED -> RUNNING -> TRACED transition from the
217 * attaching thread but a different thread in the same group can
218 * still observe the transient RUNNING state. IOW, if another
219 * thread's WNOHANG wait(2) on the stopped tracee races against
220 * ATTACH, the wait(2) may fail due to the transient RUNNING.
221 *
222 * The following task_is_stopped() test is safe as both transitions
223 * in and out of STOPPED are protected by siglock.
224 */
225 if (task_is_stopped(task)) {
226 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
227 signal_wake_up(task, 1);
228 wait_trap = true;
229 }
230
231 spin_unlock(&task->sighand->siglock);
232
b79b7ba9 233 retval = 0;
4b105cbb
ON
234unlock_tasklist:
235 write_unlock_irq(&tasklist_lock);
236unlock_creds:
9b1bf12d 237 mutex_unlock(&task->signal->cred_guard_mutex);
f5b40e36 238out:
d79fdd6d
TH
239 if (wait_trap)
240 wait_event(current->signal->wait_chldexit,
241 !(task->group_stop & GROUP_STOP_TRAPPING));
1da177e4
LT
242 return retval;
243}
244
f2f0b00a
ON
245/**
246 * ptrace_traceme -- helper for PTRACE_TRACEME
247 *
248 * Performs checks and sets PT_PTRACED.
249 * Should be used by all ptrace implementations for PTRACE_TRACEME.
250 */
e3e89cc5 251static int ptrace_traceme(void)
f2f0b00a
ON
252{
253 int ret = -EPERM;
254
4b105cbb
ON
255 write_lock_irq(&tasklist_lock);
256 /* Are we already being traced? */
f2f0b00a 257 if (!current->ptrace) {
f2f0b00a 258 ret = security_ptrace_traceme(current->parent);
f2f0b00a
ON
259 /*
260 * Check PF_EXITING to ensure ->real_parent has not passed
261 * exit_ptrace(). Otherwise we don't report the error but
262 * pretend ->real_parent untraces us right after return.
263 */
264 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
265 current->ptrace = PT_PTRACED;
266 __ptrace_link(current, current->real_parent);
267 }
f2f0b00a 268 }
4b105cbb
ON
269 write_unlock_irq(&tasklist_lock);
270
f2f0b00a
ON
271 return ret;
272}
273
39c626ae
ON
274/*
275 * Called with irqs disabled, returns true if childs should reap themselves.
276 */
277static int ignoring_children(struct sighand_struct *sigh)
278{
279 int ret;
280 spin_lock(&sigh->siglock);
281 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
282 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
283 spin_unlock(&sigh->siglock);
284 return ret;
285}
286
287/*
288 * Called with tasklist_lock held for writing.
289 * Unlink a traced task, and clean it up if it was a traced zombie.
290 * Return true if it needs to be reaped with release_task().
291 * (We can't call release_task() here because we already hold tasklist_lock.)
292 *
293 * If it's a zombie, our attachedness prevented normal parent notification
294 * or self-reaping. Do notification now if it would have happened earlier.
295 * If it should reap itself, return true.
296 *
a7f0765e
ON
297 * If it's our own child, there is no notification to do. But if our normal
298 * children self-reap, then this child was prevented by ptrace and we must
299 * reap it now, in that case we must also wake up sub-threads sleeping in
300 * do_wait().
39c626ae
ON
301 */
302static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
303{
304 __ptrace_unlink(p);
305
306 if (p->exit_state == EXIT_ZOMBIE) {
307 if (!task_detached(p) && thread_group_empty(p)) {
308 if (!same_thread_group(p->real_parent, tracer))
309 do_notify_parent(p, p->exit_signal);
a7f0765e
ON
310 else if (ignoring_children(tracer->sighand)) {
311 __wake_up_parent(p, tracer);
39c626ae 312 p->exit_signal = -1;
a7f0765e 313 }
39c626ae
ON
314 }
315 if (task_detached(p)) {
316 /* Mark it as in the process of being reaped. */
317 p->exit_state = EXIT_DEAD;
318 return true;
319 }
320 }
321
322 return false;
323}
324
e3e89cc5 325static int ptrace_detach(struct task_struct *child, unsigned int data)
1da177e4 326{
39c626ae 327 bool dead = false;
4576145c 328
7ed20e1a 329 if (!valid_signal(data))
5ecfbae0 330 return -EIO;
1da177e4
LT
331
332 /* Architecture-specific hardware disable .. */
333 ptrace_disable(child);
7d941432 334 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1da177e4 335
95c3eb76 336 write_lock_irq(&tasklist_lock);
39c626ae
ON
337 /*
338 * This child can be already killed. Make sure de_thread() or
339 * our sub-thread doing do_wait() didn't do release_task() yet.
340 */
95c3eb76
ON
341 if (child->ptrace) {
342 child->exit_code = data;
4576145c 343 dead = __ptrace_detach(current, child);
95c3eb76 344 }
1da177e4
LT
345 write_unlock_irq(&tasklist_lock);
346
4576145c
ON
347 if (unlikely(dead))
348 release_task(child);
349
1da177e4
LT
350 return 0;
351}
352
39c626ae 353/*
c7e49c14
ON
354 * Detach all tasks we were using ptrace on. Called with tasklist held
355 * for writing, and returns with it held too. But note it can release
356 * and reacquire the lock.
39c626ae
ON
357 */
358void exit_ptrace(struct task_struct *tracer)
c4b5ed25
NK
359 __releases(&tasklist_lock)
360 __acquires(&tasklist_lock)
39c626ae
ON
361{
362 struct task_struct *p, *n;
363 LIST_HEAD(ptrace_dead);
364
c7e49c14
ON
365 if (likely(list_empty(&tracer->ptraced)))
366 return;
367
39c626ae
ON
368 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
369 if (__ptrace_detach(tracer, p))
370 list_add(&p->ptrace_entry, &ptrace_dead);
371 }
39c626ae 372
c7e49c14 373 write_unlock_irq(&tasklist_lock);
39c626ae
ON
374 BUG_ON(!list_empty(&tracer->ptraced));
375
376 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
377 list_del_init(&p->ptrace_entry);
378 release_task(p);
379 }
c7e49c14
ON
380
381 write_lock_irq(&tasklist_lock);
39c626ae
ON
382}
383
1da177e4
LT
384int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
385{
386 int copied = 0;
387
388 while (len > 0) {
389 char buf[128];
390 int this_len, retval;
391
392 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
393 retval = access_process_vm(tsk, src, buf, this_len, 0);
394 if (!retval) {
395 if (copied)
396 break;
397 return -EIO;
398 }
399 if (copy_to_user(dst, buf, retval))
400 return -EFAULT;
401 copied += retval;
402 src += retval;
403 dst += retval;
3a709703 404 len -= retval;
1da177e4
LT
405 }
406 return copied;
407}
408
409int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
410{
411 int copied = 0;
412
413 while (len > 0) {
414 char buf[128];
415 int this_len, retval;
416
417 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
418 if (copy_from_user(buf, src, this_len))
419 return -EFAULT;
420 retval = access_process_vm(tsk, dst, buf, this_len, 1);
421 if (!retval) {
422 if (copied)
423 break;
424 return -EIO;
425 }
426 copied += retval;
427 src += retval;
428 dst += retval;
3a709703 429 len -= retval;
1da177e4
LT
430 }
431 return copied;
432}
433
4abf9869 434static int ptrace_setoptions(struct task_struct *child, unsigned long data)
1da177e4
LT
435{
436 child->ptrace &= ~PT_TRACE_MASK;
437
438 if (data & PTRACE_O_TRACESYSGOOD)
439 child->ptrace |= PT_TRACESYSGOOD;
440
441 if (data & PTRACE_O_TRACEFORK)
442 child->ptrace |= PT_TRACE_FORK;
443
444 if (data & PTRACE_O_TRACEVFORK)
445 child->ptrace |= PT_TRACE_VFORK;
446
447 if (data & PTRACE_O_TRACECLONE)
448 child->ptrace |= PT_TRACE_CLONE;
449
450 if (data & PTRACE_O_TRACEEXEC)
451 child->ptrace |= PT_TRACE_EXEC;
452
453 if (data & PTRACE_O_TRACEVFORKDONE)
454 child->ptrace |= PT_TRACE_VFORK_DONE;
455
456 if (data & PTRACE_O_TRACEEXIT)
457 child->ptrace |= PT_TRACE_EXIT;
458
459 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
460}
461
e16b2781 462static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
1da177e4 463{
e4961254 464 unsigned long flags;
1da177e4
LT
465 int error = -ESRCH;
466
e4961254 467 if (lock_task_sighand(child, &flags)) {
1da177e4 468 error = -EINVAL;
1da177e4 469 if (likely(child->last_siginfo != NULL)) {
e16b2781 470 *info = *child->last_siginfo;
1da177e4
LT
471 error = 0;
472 }
e4961254 473 unlock_task_sighand(child, &flags);
1da177e4 474 }
1da177e4
LT
475 return error;
476}
477
e16b2781 478static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
1da177e4 479{
e4961254 480 unsigned long flags;
1da177e4
LT
481 int error = -ESRCH;
482
e4961254 483 if (lock_task_sighand(child, &flags)) {
1da177e4 484 error = -EINVAL;
1da177e4 485 if (likely(child->last_siginfo != NULL)) {
e16b2781 486 *child->last_siginfo = *info;
1da177e4
LT
487 error = 0;
488 }
e4961254 489 unlock_task_sighand(child, &flags);
1da177e4 490 }
1da177e4
LT
491 return error;
492}
493
36df29d7
RM
494
495#ifdef PTRACE_SINGLESTEP
496#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
497#else
498#define is_singlestep(request) 0
499#endif
500
5b88abbf
RM
501#ifdef PTRACE_SINGLEBLOCK
502#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
503#else
504#define is_singleblock(request) 0
505#endif
506
36df29d7
RM
507#ifdef PTRACE_SYSEMU
508#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
509#else
510#define is_sysemu_singlestep(request) 0
511#endif
512
4abf9869
NK
513static int ptrace_resume(struct task_struct *child, long request,
514 unsigned long data)
36df29d7
RM
515{
516 if (!valid_signal(data))
517 return -EIO;
518
519 if (request == PTRACE_SYSCALL)
520 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
521 else
522 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
523
524#ifdef TIF_SYSCALL_EMU
525 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
526 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
527 else
528 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
529#endif
530
5b88abbf
RM
531 if (is_singleblock(request)) {
532 if (unlikely(!arch_has_block_step()))
533 return -EIO;
534 user_enable_block_step(child);
535 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
36df29d7
RM
536 if (unlikely(!arch_has_single_step()))
537 return -EIO;
538 user_enable_single_step(child);
3a709703 539 } else {
36df29d7 540 user_disable_single_step(child);
3a709703 541 }
36df29d7
RM
542
543 child->exit_code = data;
544 wake_up_process(child);
545
546 return 0;
547}
548
2225a122
SS
549#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
550
551static const struct user_regset *
552find_regset(const struct user_regset_view *view, unsigned int type)
553{
554 const struct user_regset *regset;
555 int n;
556
557 for (n = 0; n < view->n; ++n) {
558 regset = view->regsets + n;
559 if (regset->core_note_type == type)
560 return regset;
561 }
562
563 return NULL;
564}
565
566static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
567 struct iovec *kiov)
568{
569 const struct user_regset_view *view = task_user_regset_view(task);
570 const struct user_regset *regset = find_regset(view, type);
571 int regset_no;
572
573 if (!regset || (kiov->iov_len % regset->size) != 0)
c6a0dd7e 574 return -EINVAL;
2225a122
SS
575
576 regset_no = regset - view->regsets;
577 kiov->iov_len = min(kiov->iov_len,
578 (__kernel_size_t) (regset->n * regset->size));
579
580 if (req == PTRACE_GETREGSET)
581 return copy_regset_to_user(task, view, regset_no, 0,
582 kiov->iov_len, kiov->iov_base);
583 else
584 return copy_regset_from_user(task, view, regset_no, 0,
585 kiov->iov_len, kiov->iov_base);
586}
587
588#endif
589
1da177e4 590int ptrace_request(struct task_struct *child, long request,
4abf9869 591 unsigned long addr, unsigned long data)
1da177e4
LT
592{
593 int ret = -EIO;
e16b2781 594 siginfo_t siginfo;
9fed81dc
NK
595 void __user *datavp = (void __user *) data;
596 unsigned long __user *datalp = datavp;
1da177e4
LT
597
598 switch (request) {
16c3e389
RM
599 case PTRACE_PEEKTEXT:
600 case PTRACE_PEEKDATA:
601 return generic_ptrace_peekdata(child, addr, data);
602 case PTRACE_POKETEXT:
603 case PTRACE_POKEDATA:
604 return generic_ptrace_pokedata(child, addr, data);
605
1da177e4
LT
606#ifdef PTRACE_OLDSETOPTIONS
607 case PTRACE_OLDSETOPTIONS:
608#endif
609 case PTRACE_SETOPTIONS:
610 ret = ptrace_setoptions(child, data);
611 break;
612 case PTRACE_GETEVENTMSG:
9fed81dc 613 ret = put_user(child->ptrace_message, datalp);
1da177e4 614 break;
e16b2781 615
1da177e4 616 case PTRACE_GETSIGINFO:
e16b2781
RM
617 ret = ptrace_getsiginfo(child, &siginfo);
618 if (!ret)
9fed81dc 619 ret = copy_siginfo_to_user(datavp, &siginfo);
1da177e4 620 break;
e16b2781 621
1da177e4 622 case PTRACE_SETSIGINFO:
9fed81dc 623 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
e16b2781
RM
624 ret = -EFAULT;
625 else
626 ret = ptrace_setsiginfo(child, &siginfo);
1da177e4 627 break;
e16b2781 628
1bcf5482
AD
629 case PTRACE_DETACH: /* detach a process that was attached. */
630 ret = ptrace_detach(child, data);
631 break;
36df29d7 632
9c1a1259
MF
633#ifdef CONFIG_BINFMT_ELF_FDPIC
634 case PTRACE_GETFDPIC: {
e0129ef9 635 struct mm_struct *mm = get_task_mm(child);
9c1a1259
MF
636 unsigned long tmp = 0;
637
e0129ef9
ON
638 ret = -ESRCH;
639 if (!mm)
640 break;
641
9c1a1259
MF
642 switch (addr) {
643 case PTRACE_GETFDPIC_EXEC:
e0129ef9 644 tmp = mm->context.exec_fdpic_loadmap;
9c1a1259
MF
645 break;
646 case PTRACE_GETFDPIC_INTERP:
e0129ef9 647 tmp = mm->context.interp_fdpic_loadmap;
9c1a1259
MF
648 break;
649 default:
650 break;
651 }
e0129ef9 652 mmput(mm);
9c1a1259 653
9fed81dc 654 ret = put_user(tmp, datalp);
9c1a1259
MF
655 break;
656 }
657#endif
658
36df29d7
RM
659#ifdef PTRACE_SINGLESTEP
660 case PTRACE_SINGLESTEP:
661#endif
5b88abbf
RM
662#ifdef PTRACE_SINGLEBLOCK
663 case PTRACE_SINGLEBLOCK:
664#endif
36df29d7
RM
665#ifdef PTRACE_SYSEMU
666 case PTRACE_SYSEMU:
667 case PTRACE_SYSEMU_SINGLESTEP:
668#endif
669 case PTRACE_SYSCALL:
670 case PTRACE_CONT:
671 return ptrace_resume(child, request, data);
672
673 case PTRACE_KILL:
674 if (child->exit_state) /* already dead */
675 return 0;
676 return ptrace_resume(child, request, SIGKILL);
677
2225a122
SS
678#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
679 case PTRACE_GETREGSET:
680 case PTRACE_SETREGSET:
681 {
682 struct iovec kiov;
9fed81dc 683 struct iovec __user *uiov = datavp;
2225a122
SS
684
685 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
686 return -EFAULT;
687
688 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
689 __get_user(kiov.iov_len, &uiov->iov_len))
690 return -EFAULT;
691
692 ret = ptrace_regset(child, request, addr, &kiov);
693 if (!ret)
694 ret = __put_user(kiov.iov_len, &uiov->iov_len);
695 break;
696 }
697#endif
1da177e4
LT
698 default:
699 break;
700 }
701
702 return ret;
703}
481bed45 704
8053bdd5 705static struct task_struct *ptrace_get_task_struct(pid_t pid)
6b9c7ed8
CH
706{
707 struct task_struct *child;
481bed45 708
8053bdd5 709 rcu_read_lock();
228ebcbe 710 child = find_task_by_vpid(pid);
481bed45
CH
711 if (child)
712 get_task_struct(child);
8053bdd5 713 rcu_read_unlock();
f400e198 714
481bed45 715 if (!child)
6b9c7ed8
CH
716 return ERR_PTR(-ESRCH);
717 return child;
481bed45
CH
718}
719
0ac15559
CH
720#ifndef arch_ptrace_attach
721#define arch_ptrace_attach(child) do { } while (0)
722#endif
723
4abf9869
NK
724SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
725 unsigned long, data)
481bed45
CH
726{
727 struct task_struct *child;
728 long ret;
729
6b9c7ed8
CH
730 if (request == PTRACE_TRACEME) {
731 ret = ptrace_traceme();
6ea6dd93
HS
732 if (!ret)
733 arch_ptrace_attach(current);
481bed45 734 goto out;
6b9c7ed8
CH
735 }
736
737 child = ptrace_get_task_struct(pid);
738 if (IS_ERR(child)) {
739 ret = PTR_ERR(child);
740 goto out;
741 }
481bed45
CH
742
743 if (request == PTRACE_ATTACH) {
744 ret = ptrace_attach(child);
0ac15559
CH
745 /*
746 * Some architectures need to do book-keeping after
747 * a ptrace attach.
748 */
749 if (!ret)
750 arch_ptrace_attach(child);
005f18df 751 goto out_put_task_struct;
481bed45
CH
752 }
753
754 ret = ptrace_check_attach(child, request == PTRACE_KILL);
755 if (ret < 0)
756 goto out_put_task_struct;
757
758 ret = arch_ptrace(child, request, addr, data);
481bed45
CH
759
760 out_put_task_struct:
761 put_task_struct(child);
762 out:
481bed45
CH
763 return ret;
764}
76647323 765
4abf9869
NK
766int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
767 unsigned long data)
76647323
AD
768{
769 unsigned long tmp;
770 int copied;
771
772 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
773 if (copied != sizeof(tmp))
774 return -EIO;
775 return put_user(tmp, (unsigned long __user *)data);
776}
f284ce72 777
4abf9869
NK
778int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
779 unsigned long data)
f284ce72
AD
780{
781 int copied;
782
783 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
784 return (copied == sizeof(data)) ? 0 : -EIO;
785}
032d82d9 786
96b8936a 787#if defined CONFIG_COMPAT
032d82d9
RM
788#include <linux/compat.h>
789
790int compat_ptrace_request(struct task_struct *child, compat_long_t request,
791 compat_ulong_t addr, compat_ulong_t data)
792{
793 compat_ulong_t __user *datap = compat_ptr(data);
794 compat_ulong_t word;
e16b2781 795 siginfo_t siginfo;
032d82d9
RM
796 int ret;
797
798 switch (request) {
799 case PTRACE_PEEKTEXT:
800 case PTRACE_PEEKDATA:
801 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
802 if (ret != sizeof(word))
803 ret = -EIO;
804 else
805 ret = put_user(word, datap);
806 break;
807
808 case PTRACE_POKETEXT:
809 case PTRACE_POKEDATA:
810 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
811 ret = (ret != sizeof(data) ? -EIO : 0);
812 break;
813
814 case PTRACE_GETEVENTMSG:
815 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
816 break;
817
e16b2781
RM
818 case PTRACE_GETSIGINFO:
819 ret = ptrace_getsiginfo(child, &siginfo);
820 if (!ret)
821 ret = copy_siginfo_to_user32(
822 (struct compat_siginfo __user *) datap,
823 &siginfo);
824 break;
825
826 case PTRACE_SETSIGINFO:
827 memset(&siginfo, 0, sizeof siginfo);
828 if (copy_siginfo_from_user32(
829 &siginfo, (struct compat_siginfo __user *) datap))
830 ret = -EFAULT;
831 else
832 ret = ptrace_setsiginfo(child, &siginfo);
833 break;
2225a122
SS
834#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
835 case PTRACE_GETREGSET:
836 case PTRACE_SETREGSET:
837 {
838 struct iovec kiov;
839 struct compat_iovec __user *uiov =
840 (struct compat_iovec __user *) datap;
841 compat_uptr_t ptr;
842 compat_size_t len;
843
844 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
845 return -EFAULT;
846
847 if (__get_user(ptr, &uiov->iov_base) ||
848 __get_user(len, &uiov->iov_len))
849 return -EFAULT;
850
851 kiov.iov_base = compat_ptr(ptr);
852 kiov.iov_len = len;
853
854 ret = ptrace_regset(child, request, addr, &kiov);
855 if (!ret)
856 ret = __put_user(kiov.iov_len, &uiov->iov_len);
857 break;
858 }
859#endif
e16b2781 860
032d82d9
RM
861 default:
862 ret = ptrace_request(child, request, addr, data);
863 }
864
865 return ret;
866}
c269f196 867
c269f196
RM
868asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
869 compat_long_t addr, compat_long_t data)
870{
871 struct task_struct *child;
872 long ret;
873
c269f196
RM
874 if (request == PTRACE_TRACEME) {
875 ret = ptrace_traceme();
876 goto out;
877 }
878
879 child = ptrace_get_task_struct(pid);
880 if (IS_ERR(child)) {
881 ret = PTR_ERR(child);
882 goto out;
883 }
884
885 if (request == PTRACE_ATTACH) {
886 ret = ptrace_attach(child);
887 /*
888 * Some architectures need to do book-keeping after
889 * a ptrace attach.
890 */
891 if (!ret)
892 arch_ptrace_attach(child);
893 goto out_put_task_struct;
894 }
895
896 ret = ptrace_check_attach(child, request == PTRACE_KILL);
897 if (!ret)
898 ret = compat_arch_ptrace(child, request, addr, data);
899
900 out_put_task_struct:
901 put_task_struct(child);
902 out:
c269f196
RM
903 return ret;
904}
96b8936a 905#endif /* CONFIG_COMPAT */