sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / ptrace.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
c59ede7b 10#include <linux/capability.h>
9984de1a 11#include <linux/export.h>
1da177e4 12#include <linux/sched.h>
6e84f315 13#include <linux/sched/mm.h>
f7ccbae4 14#include <linux/sched/coredump.h>
1da177e4
LT
15#include <linux/errno.h>
16#include <linux/mm.h>
17#include <linux/highmem.h>
18#include <linux/pagemap.h>
1da177e4
LT
19#include <linux/ptrace.h>
20#include <linux/security.h>
7ed20e1a 21#include <linux/signal.h>
a27bb332 22#include <linux/uio.h>
a5cb013d 23#include <linux/audit.h>
b488893a 24#include <linux/pid_namespace.h>
f17d30a8 25#include <linux/syscalls.h>
3a709703 26#include <linux/uaccess.h>
2225a122 27#include <linux/regset.h>
bf26c018 28#include <linux/hw_breakpoint.h>
f701e5b7 29#include <linux/cn_proc.h>
84c751bd 30#include <linux/compat.h>
1da177e4 31
84d77d3f
EB
32/*
33 * Access another process' address space via ptrace.
34 * Source/target buffer must be kernel space,
35 * Do not walk the page table directly, use get_user_pages
36 */
37int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
38 void *buf, int len, unsigned int gup_flags)
39{
40 struct mm_struct *mm;
41 int ret;
42
43 mm = get_task_mm(tsk);
44 if (!mm)
45 return 0;
46
47 if (!tsk->ptrace ||
48 (current != tsk->parent) ||
49 ((get_dumpable(mm) != SUID_DUMP_USER) &&
50 !ptracer_capable(tsk, mm->user_ns))) {
51 mmput(mm);
52 return 0;
53 }
54
55 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
56 mmput(mm);
57
58 return ret;
59}
60
bf53de90 61
1da177e4
LT
62/*
63 * ptrace a task: make the debugger its new parent and
64 * move it to the ptrace list.
65 *
66 * Must be called with the tasklist lock write-held.
67 */
36c8b586 68void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
1da177e4 69{
f470021a
RM
70 BUG_ON(!list_empty(&child->ptrace_entry));
71 list_add(&child->ptrace_entry, &new_parent->ptraced);
1da177e4 72 child->parent = new_parent;
64b875f7
EB
73 rcu_read_lock();
74 child->ptracer_cred = get_cred(__task_cred(new_parent));
75 rcu_read_unlock();
1da177e4 76}
3a709703 77
e3bd058f
TH
78/**
79 * __ptrace_unlink - unlink ptracee and restore its execution state
80 * @child: ptracee to be unlinked
1da177e4 81 *
0e9f0a4a
TH
82 * Remove @child from the ptrace list, move it back to the original parent,
83 * and restore the execution state so that it conforms to the group stop
84 * state.
85 *
86 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
87 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
88 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
89 * If the ptracer is exiting, the ptracee can be in any state.
90 *
91 * After detach, the ptracee should be in a state which conforms to the
92 * group stop. If the group is stopped or in the process of stopping, the
93 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
94 * up from TASK_TRACED.
95 *
96 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
97 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
98 * to but in the opposite direction of what happens while attaching to a
99 * stopped task. However, in this direction, the intermediate RUNNING
100 * state is not hidden even from the current ptracer and if it immediately
101 * re-attaches and performs a WNOHANG wait(2), it may fail.
e3bd058f
TH
102 *
103 * CONTEXT:
104 * write_lock_irq(tasklist_lock)
1da177e4 105 */
36c8b586 106void __ptrace_unlink(struct task_struct *child)
1da177e4 107{
64b875f7 108 const struct cred *old_cred;
5ecfbae0
ON
109 BUG_ON(!child->ptrace);
110
0a5bf409
AN
111 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
112
f470021a
RM
113 child->parent = child->real_parent;
114 list_del_init(&child->ptrace_entry);
64b875f7
EB
115 old_cred = child->ptracer_cred;
116 child->ptracer_cred = NULL;
117 put_cred(old_cred);
1da177e4 118
1da177e4 119 spin_lock(&child->sighand->siglock);
1333ab03 120 child->ptrace = 0;
73ddff2b
TH
121 /*
122 * Clear all pending traps and TRAPPING. TRAPPING should be
123 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
124 */
125 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
126 task_clear_jobctl_trapping(child);
127
0e9f0a4a 128 /*
a8f072c1 129 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
0e9f0a4a
TH
130 * @child isn't dead.
131 */
132 if (!(child->flags & PF_EXITING) &&
133 (child->signal->flags & SIGNAL_STOP_STOPPED ||
8a88951b 134 child->signal->group_stop_count)) {
a8f072c1 135 child->jobctl |= JOBCTL_STOP_PENDING;
0e9f0a4a 136
8a88951b
ON
137 /*
138 * This is only possible if this thread was cloned by the
139 * traced task running in the stopped group, set the signal
140 * for the future reports.
141 * FIXME: we should change ptrace_init_task() to handle this
142 * case.
143 */
144 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
145 child->jobctl |= SIGSTOP;
146 }
147
0e9f0a4a
TH
148 /*
149 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
150 * @child in the butt. Note that @resume should be used iff @child
151 * is in TASK_TRACED; otherwise, we might unduly disrupt
152 * TASK_KILLABLE sleeps.
153 */
a8f072c1 154 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
910ffdb1 155 ptrace_signal_wake_up(child, true);
0e9f0a4a 156
1da177e4 157 spin_unlock(&child->sighand->siglock);
1da177e4
LT
158}
159
9899d11f
ON
160/* Ensure that nothing can wake it up, even SIGKILL */
161static bool ptrace_freeze_traced(struct task_struct *task)
162{
163 bool ret = false;
164
165 /* Lockless, nobody but us can set this flag */
166 if (task->jobctl & JOBCTL_LISTENING)
167 return ret;
168
169 spin_lock_irq(&task->sighand->siglock);
170 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
171 task->state = __TASK_TRACED;
172 ret = true;
173 }
174 spin_unlock_irq(&task->sighand->siglock);
175
176 return ret;
177}
178
179static void ptrace_unfreeze_traced(struct task_struct *task)
180{
181 if (task->state != __TASK_TRACED)
182 return;
183
184 WARN_ON(!task->ptrace || task->parent != current);
185
186 spin_lock_irq(&task->sighand->siglock);
187 if (__fatal_signal_pending(task))
188 wake_up_state(task, __TASK_TRACED);
189 else
190 task->state = TASK_TRACED;
191 spin_unlock_irq(&task->sighand->siglock);
192}
193
755e276b
TH
194/**
195 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
196 * @child: ptracee to check for
197 * @ignore_state: don't check whether @child is currently %TASK_TRACED
198 *
199 * Check whether @child is being ptraced by %current and ready for further
200 * ptrace operations. If @ignore_state is %false, @child also should be in
201 * %TASK_TRACED state and on return the child is guaranteed to be traced
202 * and not executing. If @ignore_state is %true, @child can be in any
203 * state.
204 *
205 * CONTEXT:
206 * Grabs and releases tasklist_lock and @child->sighand->siglock.
207 *
208 * RETURNS:
209 * 0 on success, -ESRCH if %child is not ready.
1da177e4 210 */
edea0d03 211static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
1da177e4
LT
212{
213 int ret = -ESRCH;
214
215 /*
216 * We take the read lock around doing both checks to close a
217 * possible race where someone else was tracing our child and
218 * detached between these two checks. After this locked check,
219 * we are sure that this is our traced child and that can only
220 * be changed by us so it's not changing right after this.
221 */
222 read_lock(&tasklist_lock);
9899d11f
ON
223 if (child->ptrace && child->parent == current) {
224 WARN_ON(child->state == __TASK_TRACED);
c0c0b649
ON
225 /*
226 * child->sighand can't be NULL, release_task()
227 * does ptrace_unlink() before __exit_signal().
228 */
9899d11f 229 if (ignore_state || ptrace_freeze_traced(child))
321fb561 230 ret = 0;
1da177e4
LT
231 }
232 read_unlock(&tasklist_lock);
233
9899d11f
ON
234 if (!ret && !ignore_state) {
235 if (!wait_task_inactive(child, __TASK_TRACED)) {
236 /*
237 * This can only happen if may_ptrace_stop() fails and
238 * ptrace_stop() changes ->state back to TASK_RUNNING,
239 * so we should not worry about leaking __TASK_TRACED.
240 */
241 WARN_ON(child->state == __TASK_TRACED);
242 ret = -ESRCH;
243 }
244 }
1da177e4 245
1da177e4
LT
246 return ret;
247}
248
69f594a3
EP
249static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
250{
251 if (mode & PTRACE_MODE_NOAUDIT)
252 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
253 else
254 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
255}
256
9f99798f
TH
257/* Returns 0 on success, -errno on denial. */
258static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
ab8d11be 259{
c69e8d9c 260 const struct cred *cred = current_cred(), *tcred;
bfedb589 261 struct mm_struct *mm;
caaee623
JH
262 kuid_t caller_uid;
263 kgid_t caller_gid;
264
265 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
266 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
267 return -EPERM;
268 }
b6dff3ec 269
df26c40e
EB
270 /* May we inspect the given task?
271 * This check is used both for attaching with ptrace
272 * and for allowing access to sensitive information in /proc.
273 *
274 * ptrace_attach denies several cases that /proc allows
275 * because setting up the necessary parent/child relationship
276 * or halting the specified task is impossible.
277 */
caaee623 278
df26c40e 279 /* Don't let security modules deny introspection */
73af963f 280 if (same_thread_group(task, current))
df26c40e 281 return 0;
c69e8d9c 282 rcu_read_lock();
caaee623
JH
283 if (mode & PTRACE_MODE_FSCREDS) {
284 caller_uid = cred->fsuid;
285 caller_gid = cred->fsgid;
286 } else {
287 /*
288 * Using the euid would make more sense here, but something
289 * in userland might rely on the old behavior, and this
290 * shouldn't be a security problem since
291 * PTRACE_MODE_REALCREDS implies that the caller explicitly
292 * used a syscall that requests access to another process
293 * (and not a filesystem syscall to procfs).
294 */
295 caller_uid = cred->uid;
296 caller_gid = cred->gid;
297 }
c69e8d9c 298 tcred = __task_cred(task);
caaee623
JH
299 if (uid_eq(caller_uid, tcred->euid) &&
300 uid_eq(caller_uid, tcred->suid) &&
301 uid_eq(caller_uid, tcred->uid) &&
302 gid_eq(caller_gid, tcred->egid) &&
303 gid_eq(caller_gid, tcred->sgid) &&
304 gid_eq(caller_gid, tcred->gid))
8409cca7 305 goto ok;
c4a4d603 306 if (ptrace_has_cap(tcred->user_ns, mode))
8409cca7
SH
307 goto ok;
308 rcu_read_unlock();
309 return -EPERM;
310ok:
c69e8d9c 311 rcu_read_unlock();
bfedb589
EB
312 mm = task->mm;
313 if (mm &&
314 ((get_dumpable(mm) != SUID_DUMP_USER) &&
315 !ptrace_has_cap(mm->user_ns, mode)))
316 return -EPERM;
ab8d11be 317
9e48858f 318 return security_ptrace_access_check(task, mode);
ab8d11be
MS
319}
320
006ebb40 321bool ptrace_may_access(struct task_struct *task, unsigned int mode)
ab8d11be
MS
322{
323 int err;
324 task_lock(task);
006ebb40 325 err = __ptrace_may_access(task, mode);
ab8d11be 326 task_unlock(task);
3a709703 327 return !err;
ab8d11be
MS
328}
329
3544d72a 330static int ptrace_attach(struct task_struct *task, long request,
aa9147c9 331 unsigned long addr,
3544d72a 332 unsigned long flags)
1da177e4 333{
3544d72a 334 bool seize = (request == PTRACE_SEIZE);
1da177e4 335 int retval;
f5b40e36 336
3544d72a 337 retval = -EIO;
aa9147c9
DV
338 if (seize) {
339 if (addr != 0)
340 goto out;
aa9147c9
DV
341 if (flags & ~(unsigned long)PTRACE_O_MASK)
342 goto out;
343 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
344 } else {
345 flags = PT_PTRACED;
346 }
3544d72a 347
a5cb013d
AV
348 audit_ptrace(task);
349
1da177e4 350 retval = -EPERM;
b79b7ba9
ON
351 if (unlikely(task->flags & PF_KTHREAD))
352 goto out;
bac0abd6 353 if (same_thread_group(task, current))
f5b40e36
LT
354 goto out;
355
f2f0b00a
ON
356 /*
357 * Protect exec's credential calculations against our interference;
86b6c1f3 358 * SUID, SGID and LSM creds get determined differently
5e751e99 359 * under ptrace.
d84f4f99 360 */
793285fc 361 retval = -ERESTARTNOINTR;
9b1bf12d 362 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
d84f4f99 363 goto out;
f5b40e36 364
4b105cbb 365 task_lock(task);
caaee623 366 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
4b105cbb 367 task_unlock(task);
1da177e4 368 if (retval)
4b105cbb 369 goto unlock_creds;
1da177e4 370
4b105cbb 371 write_lock_irq(&tasklist_lock);
b79b7ba9
ON
372 retval = -EPERM;
373 if (unlikely(task->exit_state))
4b105cbb 374 goto unlock_tasklist;
f2f0b00a 375 if (task->ptrace)
4b105cbb 376 goto unlock_tasklist;
b79b7ba9 377
3544d72a 378 if (seize)
aa9147c9 379 flags |= PT_SEIZED;
aa9147c9 380 task->ptrace = flags;
1da177e4 381
1da177e4 382 __ptrace_link(task, current);
3544d72a
TH
383
384 /* SEIZE doesn't trap tracee on attach */
385 if (!seize)
386 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
b79b7ba9 387
d79fdd6d
TH
388 spin_lock(&task->sighand->siglock);
389
390 /*
73ddff2b 391 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
d79fdd6d
TH
392 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
393 * will be cleared if the child completes the transition or any
394 * event which clears the group stop states happens. We'll wait
395 * for the transition to complete before returning from this
396 * function.
397 *
398 * This hides STOPPED -> RUNNING -> TRACED transition from the
399 * attaching thread but a different thread in the same group can
400 * still observe the transient RUNNING state. IOW, if another
401 * thread's WNOHANG wait(2) on the stopped tracee races against
402 * ATTACH, the wait(2) may fail due to the transient RUNNING.
403 *
404 * The following task_is_stopped() test is safe as both transitions
405 * in and out of STOPPED are protected by siglock.
406 */
7dd3db54 407 if (task_is_stopped(task) &&
73ddff2b 408 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
910ffdb1 409 signal_wake_up_state(task, __TASK_STOPPED);
d79fdd6d
TH
410
411 spin_unlock(&task->sighand->siglock);
412
b79b7ba9 413 retval = 0;
4b105cbb
ON
414unlock_tasklist:
415 write_unlock_irq(&tasklist_lock);
416unlock_creds:
9b1bf12d 417 mutex_unlock(&task->signal->cred_guard_mutex);
f5b40e36 418out:
f701e5b7 419 if (!retval) {
7c3b00e0
ON
420 /*
421 * We do not bother to change retval or clear JOBCTL_TRAPPING
422 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
423 * not return to user-mode, it will exit and clear this bit in
424 * __ptrace_unlink() if it wasn't already cleared by the tracee;
425 * and until then nobody can ptrace this task.
426 */
427 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
f701e5b7
VZ
428 proc_ptrace_connector(task, PTRACE_ATTACH);
429 }
430
1da177e4
LT
431 return retval;
432}
433
f2f0b00a
ON
434/**
435 * ptrace_traceme -- helper for PTRACE_TRACEME
436 *
437 * Performs checks and sets PT_PTRACED.
438 * Should be used by all ptrace implementations for PTRACE_TRACEME.
439 */
e3e89cc5 440static int ptrace_traceme(void)
f2f0b00a
ON
441{
442 int ret = -EPERM;
443
4b105cbb
ON
444 write_lock_irq(&tasklist_lock);
445 /* Are we already being traced? */
f2f0b00a 446 if (!current->ptrace) {
f2f0b00a 447 ret = security_ptrace_traceme(current->parent);
f2f0b00a
ON
448 /*
449 * Check PF_EXITING to ensure ->real_parent has not passed
450 * exit_ptrace(). Otherwise we don't report the error but
451 * pretend ->real_parent untraces us right after return.
452 */
453 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
454 current->ptrace = PT_PTRACED;
455 __ptrace_link(current, current->real_parent);
456 }
f2f0b00a 457 }
4b105cbb
ON
458 write_unlock_irq(&tasklist_lock);
459
f2f0b00a
ON
460 return ret;
461}
462
39c626ae
ON
463/*
464 * Called with irqs disabled, returns true if childs should reap themselves.
465 */
466static int ignoring_children(struct sighand_struct *sigh)
467{
468 int ret;
469 spin_lock(&sigh->siglock);
470 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
471 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
472 spin_unlock(&sigh->siglock);
473 return ret;
474}
475
476/*
477 * Called with tasklist_lock held for writing.
478 * Unlink a traced task, and clean it up if it was a traced zombie.
479 * Return true if it needs to be reaped with release_task().
480 * (We can't call release_task() here because we already hold tasklist_lock.)
481 *
482 * If it's a zombie, our attachedness prevented normal parent notification
483 * or self-reaping. Do notification now if it would have happened earlier.
484 * If it should reap itself, return true.
485 *
a7f0765e
ON
486 * If it's our own child, there is no notification to do. But if our normal
487 * children self-reap, then this child was prevented by ptrace and we must
488 * reap it now, in that case we must also wake up sub-threads sleeping in
489 * do_wait().
39c626ae
ON
490 */
491static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
492{
9843a1e9
ON
493 bool dead;
494
39c626ae
ON
495 __ptrace_unlink(p);
496
9843a1e9
ON
497 if (p->exit_state != EXIT_ZOMBIE)
498 return false;
499
500 dead = !thread_group_leader(p);
501
502 if (!dead && thread_group_empty(p)) {
503 if (!same_thread_group(p->real_parent, tracer))
504 dead = do_notify_parent(p, p->exit_signal);
505 else if (ignoring_children(tracer->sighand)) {
506 __wake_up_parent(p, tracer);
9843a1e9 507 dead = true;
39c626ae
ON
508 }
509 }
9843a1e9
ON
510 /* Mark it as in the process of being reaped. */
511 if (dead)
512 p->exit_state = EXIT_DEAD;
513 return dead;
39c626ae
ON
514}
515
e3e89cc5 516static int ptrace_detach(struct task_struct *child, unsigned int data)
1da177e4 517{
7ed20e1a 518 if (!valid_signal(data))
5ecfbae0 519 return -EIO;
1da177e4
LT
520
521 /* Architecture-specific hardware disable .. */
522 ptrace_disable(child);
523
95c3eb76 524 write_lock_irq(&tasklist_lock);
39c626ae 525 /*
64a4096c
ON
526 * We rely on ptrace_freeze_traced(). It can't be killed and
527 * untraced by another thread, it can't be a zombie.
39c626ae 528 */
64a4096c
ON
529 WARN_ON(!child->ptrace || child->exit_state);
530 /*
531 * tasklist_lock avoids the race with wait_task_stopped(), see
532 * the comment in ptrace_resume().
533 */
534 child->exit_code = data;
535 __ptrace_detach(current, child);
1da177e4
LT
536 write_unlock_irq(&tasklist_lock);
537
f701e5b7 538 proc_ptrace_connector(child, PTRACE_DETACH);
4576145c 539
1da177e4
LT
540 return 0;
541}
542
39c626ae 543/*
c7e49c14 544 * Detach all tasks we were using ptrace on. Called with tasklist held
7c8bd232 545 * for writing.
39c626ae 546 */
7c8bd232 547void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
39c626ae
ON
548{
549 struct task_struct *p, *n;
c7e49c14 550
39c626ae 551 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
992fb6e1
ON
552 if (unlikely(p->ptrace & PT_EXITKILL))
553 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
554
39c626ae 555 if (__ptrace_detach(tracer, p))
7c8bd232 556 list_add(&p->ptrace_entry, dead);
39c626ae
ON
557 }
558}
559
1da177e4
LT
560int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
561{
562 int copied = 0;
563
564 while (len > 0) {
565 char buf[128];
566 int this_len, retval;
567
568 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
84d77d3f
EB
569 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
570
1da177e4
LT
571 if (!retval) {
572 if (copied)
573 break;
574 return -EIO;
575 }
576 if (copy_to_user(dst, buf, retval))
577 return -EFAULT;
578 copied += retval;
579 src += retval;
580 dst += retval;
3a709703 581 len -= retval;
1da177e4
LT
582 }
583 return copied;
584}
585
586int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
587{
588 int copied = 0;
589
590 while (len > 0) {
591 char buf[128];
592 int this_len, retval;
593
594 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
595 if (copy_from_user(buf, src, this_len))
596 return -EFAULT;
84d77d3f 597 retval = ptrace_access_vm(tsk, dst, buf, this_len,
f307ab6d 598 FOLL_FORCE | FOLL_WRITE);
1da177e4
LT
599 if (!retval) {
600 if (copied)
601 break;
602 return -EIO;
603 }
604 copied += retval;
605 src += retval;
606 dst += retval;
3a709703 607 len -= retval;
1da177e4
LT
608 }
609 return copied;
610}
611
4abf9869 612static int ptrace_setoptions(struct task_struct *child, unsigned long data)
1da177e4 613{
86b6c1f3
DV
614 unsigned flags;
615
8c5cf9e5
DV
616 if (data & ~(unsigned long)PTRACE_O_MASK)
617 return -EINVAL;
618
13c4a901 619 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
97f2645f
MY
620 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
621 !IS_ENABLED(CONFIG_SECCOMP))
13c4a901
TA
622 return -EINVAL;
623
624 if (!capable(CAP_SYS_ADMIN))
625 return -EPERM;
626
627 if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
628 current->ptrace & PT_SUSPEND_SECCOMP)
629 return -EPERM;
630 }
631
86b6c1f3
DV
632 /* Avoid intermediate state when all opts are cleared */
633 flags = child->ptrace;
634 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
635 flags |= (data << PT_OPT_FLAG_SHIFT);
636 child->ptrace = flags;
1da177e4 637
8c5cf9e5 638 return 0;
1da177e4
LT
639}
640
e16b2781 641static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
1da177e4 642{
e4961254 643 unsigned long flags;
1da177e4
LT
644 int error = -ESRCH;
645
e4961254 646 if (lock_task_sighand(child, &flags)) {
1da177e4 647 error = -EINVAL;
1da177e4 648 if (likely(child->last_siginfo != NULL)) {
e16b2781 649 *info = *child->last_siginfo;
1da177e4
LT
650 error = 0;
651 }
e4961254 652 unlock_task_sighand(child, &flags);
1da177e4 653 }
1da177e4
LT
654 return error;
655}
656
e16b2781 657static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
1da177e4 658{
e4961254 659 unsigned long flags;
1da177e4
LT
660 int error = -ESRCH;
661
e4961254 662 if (lock_task_sighand(child, &flags)) {
1da177e4 663 error = -EINVAL;
1da177e4 664 if (likely(child->last_siginfo != NULL)) {
e16b2781 665 *child->last_siginfo = *info;
1da177e4
LT
666 error = 0;
667 }
e4961254 668 unlock_task_sighand(child, &flags);
1da177e4 669 }
1da177e4
LT
670 return error;
671}
672
84c751bd
AV
673static int ptrace_peek_siginfo(struct task_struct *child,
674 unsigned long addr,
675 unsigned long data)
676{
677 struct ptrace_peeksiginfo_args arg;
678 struct sigpending *pending;
679 struct sigqueue *q;
680 int ret, i;
681
682 ret = copy_from_user(&arg, (void __user *) addr,
683 sizeof(struct ptrace_peeksiginfo_args));
684 if (ret)
685 return -EFAULT;
686
687 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
688 return -EINVAL; /* unknown flags */
689
690 if (arg.nr < 0)
691 return -EINVAL;
692
693 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
694 pending = &child->signal->shared_pending;
695 else
696 pending = &child->pending;
697
698 for (i = 0; i < arg.nr; ) {
699 siginfo_t info;
700 s32 off = arg.off + i;
701
702 spin_lock_irq(&child->sighand->siglock);
703 list_for_each_entry(q, &pending->list, list) {
704 if (!off--) {
705 copy_siginfo(&info, &q->info);
706 break;
707 }
708 }
709 spin_unlock_irq(&child->sighand->siglock);
710
711 if (off >= 0) /* beyond the end of the list */
712 break;
713
714#ifdef CONFIG_COMPAT
5c465217 715 if (unlikely(in_compat_syscall())) {
84c751bd
AV
716 compat_siginfo_t __user *uinfo = compat_ptr(data);
717
706b23bd
MD
718 if (copy_siginfo_to_user32(uinfo, &info) ||
719 __put_user(info.si_code, &uinfo->si_code)) {
720 ret = -EFAULT;
721 break;
722 }
723
84c751bd
AV
724 } else
725#endif
726 {
727 siginfo_t __user *uinfo = (siginfo_t __user *) data;
728
706b23bd
MD
729 if (copy_siginfo_to_user(uinfo, &info) ||
730 __put_user(info.si_code, &uinfo->si_code)) {
731 ret = -EFAULT;
732 break;
733 }
84c751bd
AV
734 }
735
736 data += sizeof(siginfo_t);
737 i++;
738
739 if (signal_pending(current))
740 break;
741
742 cond_resched();
743 }
744
745 if (i > 0)
746 return i;
747
748 return ret;
749}
36df29d7
RM
750
751#ifdef PTRACE_SINGLESTEP
752#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
753#else
754#define is_singlestep(request) 0
755#endif
756
5b88abbf
RM
757#ifdef PTRACE_SINGLEBLOCK
758#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
759#else
760#define is_singleblock(request) 0
761#endif
762
36df29d7
RM
763#ifdef PTRACE_SYSEMU
764#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
765#else
766#define is_sysemu_singlestep(request) 0
767#endif
768
4abf9869
NK
769static int ptrace_resume(struct task_struct *child, long request,
770 unsigned long data)
36df29d7 771{
b72c1869
ON
772 bool need_siglock;
773
36df29d7
RM
774 if (!valid_signal(data))
775 return -EIO;
776
777 if (request == PTRACE_SYSCALL)
778 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
779 else
780 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
781
782#ifdef TIF_SYSCALL_EMU
783 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
784 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
785 else
786 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
787#endif
788
5b88abbf
RM
789 if (is_singleblock(request)) {
790 if (unlikely(!arch_has_block_step()))
791 return -EIO;
792 user_enable_block_step(child);
793 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
36df29d7
RM
794 if (unlikely(!arch_has_single_step()))
795 return -EIO;
796 user_enable_single_step(child);
3a709703 797 } else {
36df29d7 798 user_disable_single_step(child);
3a709703 799 }
36df29d7 800
b72c1869
ON
801 /*
802 * Change ->exit_code and ->state under siglock to avoid the race
803 * with wait_task_stopped() in between; a non-zero ->exit_code will
804 * wrongly look like another report from tracee.
805 *
806 * Note that we need siglock even if ->exit_code == data and/or this
807 * status was not reported yet, the new status must not be cleared by
808 * wait_task_stopped() after resume.
809 *
810 * If data == 0 we do not care if wait_task_stopped() reports the old
811 * status and clears the code too; this can't race with the tracee, it
812 * takes siglock after resume.
813 */
814 need_siglock = data && !thread_group_empty(current);
815 if (need_siglock)
816 spin_lock_irq(&child->sighand->siglock);
36df29d7 817 child->exit_code = data;
0666fb51 818 wake_up_state(child, __TASK_TRACED);
b72c1869
ON
819 if (need_siglock)
820 spin_unlock_irq(&child->sighand->siglock);
36df29d7
RM
821
822 return 0;
823}
824
2225a122
SS
825#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
826
827static const struct user_regset *
828find_regset(const struct user_regset_view *view, unsigned int type)
829{
830 const struct user_regset *regset;
831 int n;
832
833 for (n = 0; n < view->n; ++n) {
834 regset = view->regsets + n;
835 if (regset->core_note_type == type)
836 return regset;
837 }
838
839 return NULL;
840}
841
842static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
843 struct iovec *kiov)
844{
845 const struct user_regset_view *view = task_user_regset_view(task);
846 const struct user_regset *regset = find_regset(view, type);
847 int regset_no;
848
849 if (!regset || (kiov->iov_len % regset->size) != 0)
c6a0dd7e 850 return -EINVAL;
2225a122
SS
851
852 regset_no = regset - view->regsets;
853 kiov->iov_len = min(kiov->iov_len,
854 (__kernel_size_t) (regset->n * regset->size));
855
856 if (req == PTRACE_GETREGSET)
857 return copy_regset_to_user(task, view, regset_no, 0,
858 kiov->iov_len, kiov->iov_base);
859 else
860 return copy_regset_from_user(task, view, regset_no, 0,
861 kiov->iov_len, kiov->iov_base);
862}
863
e8440c14
JS
864/*
865 * This is declared in linux/regset.h and defined in machine-dependent
866 * code. We put the export here, near the primary machine-neutral use,
867 * to ensure no machine forgets it.
868 */
869EXPORT_SYMBOL_GPL(task_user_regset_view);
2225a122
SS
870#endif
871
1da177e4 872int ptrace_request(struct task_struct *child, long request,
4abf9869 873 unsigned long addr, unsigned long data)
1da177e4 874{
fca26f26 875 bool seized = child->ptrace & PT_SEIZED;
1da177e4 876 int ret = -EIO;
544b2c91 877 siginfo_t siginfo, *si;
9fed81dc
NK
878 void __user *datavp = (void __user *) data;
879 unsigned long __user *datalp = datavp;
fca26f26 880 unsigned long flags;
1da177e4
LT
881
882 switch (request) {
16c3e389
RM
883 case PTRACE_PEEKTEXT:
884 case PTRACE_PEEKDATA:
885 return generic_ptrace_peekdata(child, addr, data);
886 case PTRACE_POKETEXT:
887 case PTRACE_POKEDATA:
888 return generic_ptrace_pokedata(child, addr, data);
889
1da177e4
LT
890#ifdef PTRACE_OLDSETOPTIONS
891 case PTRACE_OLDSETOPTIONS:
892#endif
893 case PTRACE_SETOPTIONS:
894 ret = ptrace_setoptions(child, data);
895 break;
896 case PTRACE_GETEVENTMSG:
9fed81dc 897 ret = put_user(child->ptrace_message, datalp);
1da177e4 898 break;
e16b2781 899
84c751bd
AV
900 case PTRACE_PEEKSIGINFO:
901 ret = ptrace_peek_siginfo(child, addr, data);
902 break;
903
1da177e4 904 case PTRACE_GETSIGINFO:
e16b2781
RM
905 ret = ptrace_getsiginfo(child, &siginfo);
906 if (!ret)
9fed81dc 907 ret = copy_siginfo_to_user(datavp, &siginfo);
1da177e4 908 break;
e16b2781 909
1da177e4 910 case PTRACE_SETSIGINFO:
9fed81dc 911 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
e16b2781
RM
912 ret = -EFAULT;
913 else
914 ret = ptrace_setsiginfo(child, &siginfo);
1da177e4 915 break;
e16b2781 916
29000cae
AV
917 case PTRACE_GETSIGMASK:
918 if (addr != sizeof(sigset_t)) {
919 ret = -EINVAL;
920 break;
921 }
922
923 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
924 ret = -EFAULT;
925 else
926 ret = 0;
927
928 break;
929
930 case PTRACE_SETSIGMASK: {
931 sigset_t new_set;
932
933 if (addr != sizeof(sigset_t)) {
934 ret = -EINVAL;
935 break;
936 }
937
938 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
939 ret = -EFAULT;
940 break;
941 }
942
943 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
944
945 /*
946 * Every thread does recalc_sigpending() after resume, so
947 * retarget_shared_pending() and recalc_sigpending() are not
948 * called here.
949 */
950 spin_lock_irq(&child->sighand->siglock);
951 child->blocked = new_set;
952 spin_unlock_irq(&child->sighand->siglock);
953
954 ret = 0;
955 break;
956 }
957
fca26f26
TH
958 case PTRACE_INTERRUPT:
959 /*
960 * Stop tracee without any side-effect on signal or job
961 * control. At least one trap is guaranteed to happen
962 * after this request. If @child is already trapped, the
963 * current trap is not disturbed and another trap will
964 * happen after the current trap is ended with PTRACE_CONT.
965 *
966 * The actual trap might not be PTRACE_EVENT_STOP trap but
967 * the pending condition is cleared regardless.
968 */
969 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
970 break;
971
544b2c91
TH
972 /*
973 * INTERRUPT doesn't disturb existing trap sans one
974 * exception. If ptracer issued LISTEN for the current
975 * STOP, this INTERRUPT should clear LISTEN and re-trap
976 * tracee into STOP.
977 */
fca26f26 978 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
910ffdb1 979 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
544b2c91
TH
980
981 unlock_task_sighand(child, &flags);
982 ret = 0;
983 break;
984
985 case PTRACE_LISTEN:
986 /*
987 * Listen for events. Tracee must be in STOP. It's not
988 * resumed per-se but is not considered to be in TRACED by
989 * wait(2) or ptrace(2). If an async event (e.g. group
990 * stop state change) happens, tracee will enter STOP trap
991 * again. Alternatively, ptracer can issue INTERRUPT to
992 * finish listening and re-trap tracee into STOP.
993 */
994 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
995 break;
996
997 si = child->last_siginfo;
f9d81f61
ON
998 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
999 child->jobctl |= JOBCTL_LISTENING;
1000 /*
1001 * If NOTIFY is set, it means event happened between
1002 * start of this trap and now. Trigger re-trap.
1003 */
1004 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
910ffdb1 1005 ptrace_signal_wake_up(child, true);
f9d81f61
ON
1006 ret = 0;
1007 }
fca26f26 1008 unlock_task_sighand(child, &flags);
fca26f26
TH
1009 break;
1010
1bcf5482
AD
1011 case PTRACE_DETACH: /* detach a process that was attached. */
1012 ret = ptrace_detach(child, data);
1013 break;
36df29d7 1014
9c1a1259
MF
1015#ifdef CONFIG_BINFMT_ELF_FDPIC
1016 case PTRACE_GETFDPIC: {
e0129ef9 1017 struct mm_struct *mm = get_task_mm(child);
9c1a1259
MF
1018 unsigned long tmp = 0;
1019
e0129ef9
ON
1020 ret = -ESRCH;
1021 if (!mm)
1022 break;
1023
9c1a1259
MF
1024 switch (addr) {
1025 case PTRACE_GETFDPIC_EXEC:
e0129ef9 1026 tmp = mm->context.exec_fdpic_loadmap;
9c1a1259
MF
1027 break;
1028 case PTRACE_GETFDPIC_INTERP:
e0129ef9 1029 tmp = mm->context.interp_fdpic_loadmap;
9c1a1259
MF
1030 break;
1031 default:
1032 break;
1033 }
e0129ef9 1034 mmput(mm);
9c1a1259 1035
9fed81dc 1036 ret = put_user(tmp, datalp);
9c1a1259
MF
1037 break;
1038 }
1039#endif
1040
36df29d7
RM
1041#ifdef PTRACE_SINGLESTEP
1042 case PTRACE_SINGLESTEP:
1043#endif
5b88abbf
RM
1044#ifdef PTRACE_SINGLEBLOCK
1045 case PTRACE_SINGLEBLOCK:
1046#endif
36df29d7
RM
1047#ifdef PTRACE_SYSEMU
1048 case PTRACE_SYSEMU:
1049 case PTRACE_SYSEMU_SINGLESTEP:
1050#endif
1051 case PTRACE_SYSCALL:
1052 case PTRACE_CONT:
1053 return ptrace_resume(child, request, data);
1054
1055 case PTRACE_KILL:
1056 if (child->exit_state) /* already dead */
1057 return 0;
1058 return ptrace_resume(child, request, SIGKILL);
1059
2225a122
SS
1060#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1061 case PTRACE_GETREGSET:
29000cae 1062 case PTRACE_SETREGSET: {
2225a122 1063 struct iovec kiov;
9fed81dc 1064 struct iovec __user *uiov = datavp;
2225a122
SS
1065
1066 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1067 return -EFAULT;
1068
1069 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1070 __get_user(kiov.iov_len, &uiov->iov_len))
1071 return -EFAULT;
1072
1073 ret = ptrace_regset(child, request, addr, &kiov);
1074 if (!ret)
1075 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1076 break;
1077 }
1078#endif
f8e529ed
TA
1079
1080 case PTRACE_SECCOMP_GET_FILTER:
1081 ret = seccomp_get_filter(child, addr, datavp);
1082 break;
1083
1da177e4
LT
1084 default:
1085 break;
1086 }
1087
1088 return ret;
1089}
481bed45 1090
8053bdd5 1091static struct task_struct *ptrace_get_task_struct(pid_t pid)
6b9c7ed8
CH
1092{
1093 struct task_struct *child;
481bed45 1094
8053bdd5 1095 rcu_read_lock();
228ebcbe 1096 child = find_task_by_vpid(pid);
481bed45
CH
1097 if (child)
1098 get_task_struct(child);
8053bdd5 1099 rcu_read_unlock();
f400e198 1100
481bed45 1101 if (!child)
6b9c7ed8
CH
1102 return ERR_PTR(-ESRCH);
1103 return child;
481bed45
CH
1104}
1105
0ac15559
CH
1106#ifndef arch_ptrace_attach
1107#define arch_ptrace_attach(child) do { } while (0)
1108#endif
1109
4abf9869
NK
1110SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1111 unsigned long, data)
481bed45
CH
1112{
1113 struct task_struct *child;
1114 long ret;
1115
6b9c7ed8
CH
1116 if (request == PTRACE_TRACEME) {
1117 ret = ptrace_traceme();
6ea6dd93
HS
1118 if (!ret)
1119 arch_ptrace_attach(current);
481bed45 1120 goto out;
6b9c7ed8
CH
1121 }
1122
1123 child = ptrace_get_task_struct(pid);
1124 if (IS_ERR(child)) {
1125 ret = PTR_ERR(child);
1126 goto out;
1127 }
481bed45 1128
3544d72a 1129 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
aa9147c9 1130 ret = ptrace_attach(child, request, addr, data);
0ac15559
CH
1131 /*
1132 * Some architectures need to do book-keeping after
1133 * a ptrace attach.
1134 */
1135 if (!ret)
1136 arch_ptrace_attach(child);
005f18df 1137 goto out_put_task_struct;
481bed45
CH
1138 }
1139
fca26f26
TH
1140 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1141 request == PTRACE_INTERRUPT);
481bed45
CH
1142 if (ret < 0)
1143 goto out_put_task_struct;
1144
1145 ret = arch_ptrace(child, request, addr, data);
9899d11f
ON
1146 if (ret || request != PTRACE_DETACH)
1147 ptrace_unfreeze_traced(child);
481bed45
CH
1148
1149 out_put_task_struct:
1150 put_task_struct(child);
1151 out:
481bed45
CH
1152 return ret;
1153}
76647323 1154
4abf9869
NK
1155int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1156 unsigned long data)
76647323
AD
1157{
1158 unsigned long tmp;
1159 int copied;
1160
84d77d3f 1161 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
76647323
AD
1162 if (copied != sizeof(tmp))
1163 return -EIO;
1164 return put_user(tmp, (unsigned long __user *)data);
1165}
f284ce72 1166
4abf9869
NK
1167int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1168 unsigned long data)
f284ce72
AD
1169{
1170 int copied;
1171
84d77d3f 1172 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
f307ab6d 1173 FOLL_FORCE | FOLL_WRITE);
f284ce72
AD
1174 return (copied == sizeof(data)) ? 0 : -EIO;
1175}
032d82d9 1176
96b8936a 1177#if defined CONFIG_COMPAT
032d82d9
RM
1178
1179int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1180 compat_ulong_t addr, compat_ulong_t data)
1181{
1182 compat_ulong_t __user *datap = compat_ptr(data);
1183 compat_ulong_t word;
e16b2781 1184 siginfo_t siginfo;
032d82d9
RM
1185 int ret;
1186
1187 switch (request) {
1188 case PTRACE_PEEKTEXT:
1189 case PTRACE_PEEKDATA:
84d77d3f 1190 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
f307ab6d 1191 FOLL_FORCE);
032d82d9
RM
1192 if (ret != sizeof(word))
1193 ret = -EIO;
1194 else
1195 ret = put_user(word, datap);
1196 break;
1197
1198 case PTRACE_POKETEXT:
1199 case PTRACE_POKEDATA:
84d77d3f 1200 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
f307ab6d 1201 FOLL_FORCE | FOLL_WRITE);
032d82d9
RM
1202 ret = (ret != sizeof(data) ? -EIO : 0);
1203 break;
1204
1205 case PTRACE_GETEVENTMSG:
1206 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1207 break;
1208
e16b2781
RM
1209 case PTRACE_GETSIGINFO:
1210 ret = ptrace_getsiginfo(child, &siginfo);
1211 if (!ret)
1212 ret = copy_siginfo_to_user32(
1213 (struct compat_siginfo __user *) datap,
1214 &siginfo);
1215 break;
1216
1217 case PTRACE_SETSIGINFO:
1218 memset(&siginfo, 0, sizeof siginfo);
1219 if (copy_siginfo_from_user32(
1220 &siginfo, (struct compat_siginfo __user *) datap))
1221 ret = -EFAULT;
1222 else
1223 ret = ptrace_setsiginfo(child, &siginfo);
1224 break;
2225a122
SS
1225#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1226 case PTRACE_GETREGSET:
1227 case PTRACE_SETREGSET:
1228 {
1229 struct iovec kiov;
1230 struct compat_iovec __user *uiov =
1231 (struct compat_iovec __user *) datap;
1232 compat_uptr_t ptr;
1233 compat_size_t len;
1234
1235 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1236 return -EFAULT;
1237
1238 if (__get_user(ptr, &uiov->iov_base) ||
1239 __get_user(len, &uiov->iov_len))
1240 return -EFAULT;
1241
1242 kiov.iov_base = compat_ptr(ptr);
1243 kiov.iov_len = len;
1244
1245 ret = ptrace_regset(child, request, addr, &kiov);
1246 if (!ret)
1247 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1248 break;
1249 }
1250#endif
e16b2781 1251
032d82d9
RM
1252 default:
1253 ret = ptrace_request(child, request, addr, data);
1254 }
1255
1256 return ret;
1257}
c269f196 1258
62a6fa97
HC
1259COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1260 compat_long_t, addr, compat_long_t, data)
c269f196
RM
1261{
1262 struct task_struct *child;
1263 long ret;
1264
c269f196
RM
1265 if (request == PTRACE_TRACEME) {
1266 ret = ptrace_traceme();
1267 goto out;
1268 }
1269
1270 child = ptrace_get_task_struct(pid);
1271 if (IS_ERR(child)) {
1272 ret = PTR_ERR(child);
1273 goto out;
1274 }
1275
3544d72a 1276 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
aa9147c9 1277 ret = ptrace_attach(child, request, addr, data);
c269f196
RM
1278 /*
1279 * Some architectures need to do book-keeping after
1280 * a ptrace attach.
1281 */
1282 if (!ret)
1283 arch_ptrace_attach(child);
1284 goto out_put_task_struct;
1285 }
1286
fca26f26
TH
1287 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1288 request == PTRACE_INTERRUPT);
9899d11f 1289 if (!ret) {
c269f196 1290 ret = compat_arch_ptrace(child, request, addr, data);
9899d11f
ON
1291 if (ret || request != PTRACE_DETACH)
1292 ptrace_unfreeze_traced(child);
1293 }
c269f196
RM
1294
1295 out_put_task_struct:
1296 put_task_struct(child);
1297 out:
c269f196
RM
1298 return ret;
1299}
96b8936a 1300#endif /* CONFIG_COMPAT */