2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/uio.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/user_namespace.h>
24 #include <linux/syscalls.h>
25 #include <linux/uaccess.h>
26 #include <linux/regset.h>
27 #include <linux/hw_breakpoint.h>
28 #include <linux/cn_proc.h>
29 #include <linux/compat.h>
32 static int ptrace_trapping_sleep_fn(void *flags
)
39 * ptrace a task: make the debugger its new parent and
40 * move it to the ptrace list.
42 * Must be called with the tasklist lock write-held.
44 void __ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
)
46 BUG_ON(!list_empty(&child
->ptrace_entry
));
47 list_add(&child
->ptrace_entry
, &new_parent
->ptraced
);
48 child
->parent
= new_parent
;
52 * __ptrace_unlink - unlink ptracee and restore its execution state
53 * @child: ptracee to be unlinked
55 * Remove @child from the ptrace list, move it back to the original parent,
56 * and restore the execution state so that it conforms to the group stop
59 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
60 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
61 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
62 * If the ptracer is exiting, the ptracee can be in any state.
64 * After detach, the ptracee should be in a state which conforms to the
65 * group stop. If the group is stopped or in the process of stopping, the
66 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
67 * up from TASK_TRACED.
69 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
70 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
71 * to but in the opposite direction of what happens while attaching to a
72 * stopped task. However, in this direction, the intermediate RUNNING
73 * state is not hidden even from the current ptracer and if it immediately
74 * re-attaches and performs a WNOHANG wait(2), it may fail.
77 * write_lock_irq(tasklist_lock)
79 void __ptrace_unlink(struct task_struct
*child
)
81 BUG_ON(!child
->ptrace
);
84 child
->parent
= child
->real_parent
;
85 list_del_init(&child
->ptrace_entry
);
87 spin_lock(&child
->sighand
->siglock
);
90 * Clear all pending traps and TRAPPING. TRAPPING should be
91 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
93 task_clear_jobctl_pending(child
, JOBCTL_TRAP_MASK
);
94 task_clear_jobctl_trapping(child
);
97 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
100 if (!(child
->flags
& PF_EXITING
) &&
101 (child
->signal
->flags
& SIGNAL_STOP_STOPPED
||
102 child
->signal
->group_stop_count
)) {
103 child
->jobctl
|= JOBCTL_STOP_PENDING
;
106 * This is only possible if this thread was cloned by the
107 * traced task running in the stopped group, set the signal
108 * for the future reports.
109 * FIXME: we should change ptrace_init_task() to handle this
112 if (!(child
->jobctl
& JOBCTL_STOP_SIGMASK
))
113 child
->jobctl
|= SIGSTOP
;
117 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
118 * @child in the butt. Note that @resume should be used iff @child
119 * is in TASK_TRACED; otherwise, we might unduly disrupt
120 * TASK_KILLABLE sleeps.
122 if (child
->jobctl
& JOBCTL_STOP_PENDING
|| task_is_traced(child
))
123 ptrace_signal_wake_up(child
, true);
125 spin_unlock(&child
->sighand
->siglock
);
128 /* Ensure that nothing can wake it up, even SIGKILL */
129 static bool ptrace_freeze_traced(struct task_struct
*task
)
133 /* Lockless, nobody but us can set this flag */
134 if (task
->jobctl
& JOBCTL_LISTENING
)
137 spin_lock_irq(&task
->sighand
->siglock
);
138 if (task_is_traced(task
) && !__fatal_signal_pending(task
)) {
139 task
->state
= __TASK_TRACED
;
142 spin_unlock_irq(&task
->sighand
->siglock
);
147 static void ptrace_unfreeze_traced(struct task_struct
*task
)
149 if (task
->state
!= __TASK_TRACED
)
152 WARN_ON(!task
->ptrace
|| task
->parent
!= current
);
155 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
156 * Recheck state under the lock to close this race.
158 spin_lock_irq(&task
->sighand
->siglock
);
159 if (task
->state
== __TASK_TRACED
) {
160 if (__fatal_signal_pending(task
))
161 wake_up_state(task
, __TASK_TRACED
);
163 task
->state
= TASK_TRACED
;
165 spin_unlock_irq(&task
->sighand
->siglock
);
169 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
170 * @child: ptracee to check for
171 * @ignore_state: don't check whether @child is currently %TASK_TRACED
173 * Check whether @child is being ptraced by %current and ready for further
174 * ptrace operations. If @ignore_state is %false, @child also should be in
175 * %TASK_TRACED state and on return the child is guaranteed to be traced
176 * and not executing. If @ignore_state is %true, @child can be in any
180 * Grabs and releases tasklist_lock and @child->sighand->siglock.
183 * 0 on success, -ESRCH if %child is not ready.
185 static int ptrace_check_attach(struct task_struct
*child
, bool ignore_state
)
190 * We take the read lock around doing both checks to close a
191 * possible race where someone else was tracing our child and
192 * detached between these two checks. After this locked check,
193 * we are sure that this is our traced child and that can only
194 * be changed by us so it's not changing right after this.
196 read_lock(&tasklist_lock
);
197 if (child
->ptrace
&& child
->parent
== current
) {
198 WARN_ON(child
->state
== __TASK_TRACED
);
200 * child->sighand can't be NULL, release_task()
201 * does ptrace_unlink() before __exit_signal().
203 if (ignore_state
|| ptrace_freeze_traced(child
))
206 read_unlock(&tasklist_lock
);
208 if (!ret
&& !ignore_state
) {
209 if (!wait_task_inactive(child
, __TASK_TRACED
)) {
211 * This can only happen if may_ptrace_stop() fails and
212 * ptrace_stop() changes ->state back to TASK_RUNNING,
213 * so we should not worry about leaking __TASK_TRACED.
215 WARN_ON(child
->state
== __TASK_TRACED
);
223 static bool ptrace_has_cap(const struct cred
*tcred
, unsigned int mode
)
225 struct user_namespace
*tns
= tcred
->user_ns
;
226 /* When a root-owned process enters a user namespace created by a
227 * malicious user, the user shouldn't be able to execute code under
228 * uid 0 by attaching to the root-owned process via ptrace.
229 * Therefore, similar to the capable_wrt_inode_uidgid() check,
230 * verify that all the uids and gids of the target process are
231 * mapped into a namespace below the current one in which the caller
232 * is capable.+ * No fsuid/fsgid check because __ptrace_may_access doesn't do it
236 !kuid_has_mapping(tns
, tcred
->euid
) ||
237 !kuid_has_mapping(tns
, tcred
->suid
) ||
238 !kuid_has_mapping(tns
, tcred
->uid
) ||
239 !kgid_has_mapping(tns
, tcred
->egid
) ||
240 !kgid_has_mapping(tns
, tcred
->sgid
) ||
241 !kgid_has_mapping(tns
, tcred
->gid
)) {
245 if (mode
& PTRACE_MODE_NOAUDIT
)
246 return has_ns_capability_noaudit(current
, tns
, CAP_SYS_PTRACE
);
248 return has_ns_capability(current
, tns
, CAP_SYS_PTRACE
);
251 /* Returns 0 on success, -errno on denial. */
252 static int __ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
254 const struct cred
*cred
= current_cred(), *tcred
;
259 if (!(mode
& PTRACE_MODE_FSCREDS
) == !(mode
& PTRACE_MODE_REALCREDS
)) {
260 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
264 /* May we inspect the given task?
265 * This check is used both for attaching with ptrace
266 * and for allowing access to sensitive information in /proc.
268 * ptrace_attach denies several cases that /proc allows
269 * because setting up the necessary parent/child relationship
270 * or halting the specified task is impossible.
273 /* Don't let security modules deny introspection */
274 if (same_thread_group(task
, current
))
277 if (mode
& PTRACE_MODE_FSCREDS
) {
278 caller_uid
= cred
->fsuid
;
279 caller_gid
= cred
->fsgid
;
282 * Using the euid would make more sense here, but something
283 * in userland might rely on the old behavior, and this
284 * shouldn't be a security problem since
285 * PTRACE_MODE_REALCREDS implies that the caller explicitly
286 * used a syscall that requests access to another process
287 * (and not a filesystem syscall to procfs).
289 caller_uid
= cred
->uid
;
290 caller_gid
= cred
->gid
;
292 tcred
= __task_cred(task
);
293 if (uid_eq(caller_uid
, tcred
->euid
) &&
294 uid_eq(caller_uid
, tcred
->suid
) &&
295 uid_eq(caller_uid
, tcred
->uid
) &&
296 gid_eq(caller_gid
, tcred
->egid
) &&
297 gid_eq(caller_gid
, tcred
->sgid
) &&
298 gid_eq(caller_gid
, tcred
->gid
))
300 if (ptrace_has_cap(tcred
, mode
))
308 dumpable
= get_dumpable(task
->mm
);
310 if (dumpable
!= SUID_DUMP_USER
&&
311 !ptrace_has_cap(__task_cred(task
), mode
)) {
317 return security_ptrace_access_check(task
, mode
);
320 bool ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
324 err
= __ptrace_may_access(task
, mode
);
329 static int ptrace_attach(struct task_struct
*task
, long request
,
333 bool seize
= (request
== PTRACE_SEIZE
);
340 if (flags
& ~(unsigned long)PTRACE_O_MASK
)
342 flags
= PT_PTRACED
| PT_SEIZED
| (flags
<< PT_OPT_FLAG_SHIFT
);
350 if (unlikely(task
->flags
& PF_KTHREAD
))
352 if (same_thread_group(task
, current
))
356 * Protect exec's credential calculations against our interference;
357 * SUID, SGID and LSM creds get determined differently
360 retval
= -ERESTARTNOINTR
;
361 if (mutex_lock_interruptible(&task
->signal
->cred_guard_mutex
))
365 retval
= __ptrace_may_access(task
, PTRACE_MODE_ATTACH_REALCREDS
);
370 write_lock_irq(&tasklist_lock
);
372 if (unlikely(task
->exit_state
))
373 goto unlock_tasklist
;
375 goto unlock_tasklist
;
380 if (ns_capable(__task_cred(task
)->user_ns
, CAP_SYS_PTRACE
))
381 flags
|= PT_PTRACE_CAP
;
383 task
->ptrace
= flags
;
385 __ptrace_link(task
, current
);
387 /* SEIZE doesn't trap tracee on attach */
389 send_sig_info(SIGSTOP
, SEND_SIG_FORCED
, task
);
391 spin_lock(&task
->sighand
->siglock
);
394 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
395 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
396 * will be cleared if the child completes the transition or any
397 * event which clears the group stop states happens. We'll wait
398 * for the transition to complete before returning from this
401 * This hides STOPPED -> RUNNING -> TRACED transition from the
402 * attaching thread but a different thread in the same group can
403 * still observe the transient RUNNING state. IOW, if another
404 * thread's WNOHANG wait(2) on the stopped tracee races against
405 * ATTACH, the wait(2) may fail due to the transient RUNNING.
407 * The following task_is_stopped() test is safe as both transitions
408 * in and out of STOPPED are protected by siglock.
410 if (task_is_stopped(task
) &&
411 task_set_jobctl_pending(task
, JOBCTL_TRAP_STOP
| JOBCTL_TRAPPING
))
412 signal_wake_up_state(task
, __TASK_STOPPED
);
414 spin_unlock(&task
->sighand
->siglock
);
418 write_unlock_irq(&tasklist_lock
);
420 mutex_unlock(&task
->signal
->cred_guard_mutex
);
423 wait_on_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
,
424 ptrace_trapping_sleep_fn
, TASK_UNINTERRUPTIBLE
);
425 proc_ptrace_connector(task
, PTRACE_ATTACH
);
432 * ptrace_traceme -- helper for PTRACE_TRACEME
434 * Performs checks and sets PT_PTRACED.
435 * Should be used by all ptrace implementations for PTRACE_TRACEME.
437 static int ptrace_traceme(void)
441 write_lock_irq(&tasklist_lock
);
442 /* Are we already being traced? */
443 if (!current
->ptrace
) {
444 ret
= security_ptrace_traceme(current
->parent
);
446 * Check PF_EXITING to ensure ->real_parent has not passed
447 * exit_ptrace(). Otherwise we don't report the error but
448 * pretend ->real_parent untraces us right after return.
450 if (!ret
&& !(current
->real_parent
->flags
& PF_EXITING
)) {
451 current
->ptrace
= PT_PTRACED
;
452 __ptrace_link(current
, current
->real_parent
);
455 write_unlock_irq(&tasklist_lock
);
461 * Called with irqs disabled, returns true if childs should reap themselves.
463 static int ignoring_children(struct sighand_struct
*sigh
)
466 spin_lock(&sigh
->siglock
);
467 ret
= (sigh
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
) ||
468 (sigh
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
);
469 spin_unlock(&sigh
->siglock
);
474 * Called with tasklist_lock held for writing.
475 * Unlink a traced task, and clean it up if it was a traced zombie.
476 * Return true if it needs to be reaped with release_task().
477 * (We can't call release_task() here because we already hold tasklist_lock.)
479 * If it's a zombie, our attachedness prevented normal parent notification
480 * or self-reaping. Do notification now if it would have happened earlier.
481 * If it should reap itself, return true.
483 * If it's our own child, there is no notification to do. But if our normal
484 * children self-reap, then this child was prevented by ptrace and we must
485 * reap it now, in that case we must also wake up sub-threads sleeping in
488 static bool __ptrace_detach(struct task_struct
*tracer
, struct task_struct
*p
)
494 if (p
->exit_state
!= EXIT_ZOMBIE
)
497 dead
= !thread_group_leader(p
);
499 if (!dead
&& thread_group_empty(p
)) {
500 if (!same_thread_group(p
->real_parent
, tracer
))
501 dead
= do_notify_parent(p
, p
->exit_signal
);
502 else if (ignoring_children(tracer
->sighand
)) {
503 __wake_up_parent(p
, tracer
);
507 /* Mark it as in the process of being reaped. */
509 p
->exit_state
= EXIT_DEAD
;
513 static int ptrace_detach(struct task_struct
*child
, unsigned int data
)
517 if (!valid_signal(data
))
520 /* Architecture-specific hardware disable .. */
521 ptrace_disable(child
);
522 clear_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
524 write_lock_irq(&tasklist_lock
);
526 * This child can be already killed. Make sure de_thread() or
527 * our sub-thread doing do_wait() didn't do release_task() yet.
530 child
->exit_code
= data
;
531 dead
= __ptrace_detach(current
, child
);
533 write_unlock_irq(&tasklist_lock
);
535 proc_ptrace_connector(child
, PTRACE_DETACH
);
543 * Detach all tasks we were using ptrace on. Called with tasklist held
544 * for writing, and returns with it held too. But note it can release
545 * and reacquire the lock.
547 void exit_ptrace(struct task_struct
*tracer
)
548 __releases(&tasklist_lock
)
549 __acquires(&tasklist_lock
)
551 struct task_struct
*p
, *n
;
552 LIST_HEAD(ptrace_dead
);
554 if (likely(list_empty(&tracer
->ptraced
)))
557 list_for_each_entry_safe(p
, n
, &tracer
->ptraced
, ptrace_entry
) {
558 if (unlikely(p
->ptrace
& PT_EXITKILL
))
559 send_sig_info(SIGKILL
, SEND_SIG_FORCED
, p
);
561 if (__ptrace_detach(tracer
, p
))
562 list_add(&p
->ptrace_entry
, &ptrace_dead
);
565 write_unlock_irq(&tasklist_lock
);
566 BUG_ON(!list_empty(&tracer
->ptraced
));
568 list_for_each_entry_safe(p
, n
, &ptrace_dead
, ptrace_entry
) {
569 list_del_init(&p
->ptrace_entry
);
573 write_lock_irq(&tasklist_lock
);
576 int ptrace_readdata(struct task_struct
*tsk
, unsigned long src
, char __user
*dst
, int len
)
582 int this_len
, retval
;
584 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
585 retval
= access_process_vm(tsk
, src
, buf
, this_len
, 0);
591 if (copy_to_user(dst
, buf
, retval
))
601 int ptrace_writedata(struct task_struct
*tsk
, char __user
*src
, unsigned long dst
, int len
)
607 int this_len
, retval
;
609 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
610 if (copy_from_user(buf
, src
, this_len
))
612 retval
= access_process_vm(tsk
, dst
, buf
, this_len
, 1);
626 static int ptrace_setoptions(struct task_struct
*child
, unsigned long data
)
630 if (data
& ~(unsigned long)PTRACE_O_MASK
)
633 /* Avoid intermediate state when all opts are cleared */
634 flags
= child
->ptrace
;
635 flags
&= ~(PTRACE_O_MASK
<< PT_OPT_FLAG_SHIFT
);
636 flags
|= (data
<< PT_OPT_FLAG_SHIFT
);
637 child
->ptrace
= flags
;
642 static int ptrace_getsiginfo(struct task_struct
*child
, siginfo_t
*info
)
647 if (lock_task_sighand(child
, &flags
)) {
649 if (likely(child
->last_siginfo
!= NULL
)) {
650 *info
= *child
->last_siginfo
;
653 unlock_task_sighand(child
, &flags
);
658 static int ptrace_setsiginfo(struct task_struct
*child
, const siginfo_t
*info
)
663 if (lock_task_sighand(child
, &flags
)) {
665 if (likely(child
->last_siginfo
!= NULL
)) {
666 *child
->last_siginfo
= *info
;
669 unlock_task_sighand(child
, &flags
);
674 static int ptrace_peek_siginfo(struct task_struct
*child
,
678 struct ptrace_peeksiginfo_args arg
;
679 struct sigpending
*pending
;
683 ret
= copy_from_user(&arg
, (void __user
*) addr
,
684 sizeof(struct ptrace_peeksiginfo_args
));
688 if (arg
.flags
& ~PTRACE_PEEKSIGINFO_SHARED
)
689 return -EINVAL
; /* unknown flags */
694 if (arg
.flags
& PTRACE_PEEKSIGINFO_SHARED
)
695 pending
= &child
->signal
->shared_pending
;
697 pending
= &child
->pending
;
699 for (i
= 0; i
< arg
.nr
; ) {
701 s32 off
= arg
.off
+ i
;
703 spin_lock_irq(&child
->sighand
->siglock
);
704 list_for_each_entry(q
, &pending
->list
, list
) {
706 copy_siginfo(&info
, &q
->info
);
710 spin_unlock_irq(&child
->sighand
->siglock
);
712 if (off
>= 0) /* beyond the end of the list */
716 if (unlikely(is_compat_task())) {
717 compat_siginfo_t __user
*uinfo
= compat_ptr(data
);
719 if (copy_siginfo_to_user32(uinfo
, &info
) ||
720 __put_user(info
.si_code
, &uinfo
->si_code
)) {
728 siginfo_t __user
*uinfo
= (siginfo_t __user
*) data
;
730 if (copy_siginfo_to_user(uinfo
, &info
) ||
731 __put_user(info
.si_code
, &uinfo
->si_code
)) {
737 data
+= sizeof(siginfo_t
);
740 if (signal_pending(current
))
752 #ifdef PTRACE_SINGLESTEP
753 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
755 #define is_singlestep(request) 0
758 #ifdef PTRACE_SINGLEBLOCK
759 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
761 #define is_singleblock(request) 0
765 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
767 #define is_sysemu_singlestep(request) 0
770 static int ptrace_resume(struct task_struct
*child
, long request
,
775 if (!valid_signal(data
))
778 if (request
== PTRACE_SYSCALL
)
779 set_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
781 clear_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
783 #ifdef TIF_SYSCALL_EMU
784 if (request
== PTRACE_SYSEMU
|| request
== PTRACE_SYSEMU_SINGLESTEP
)
785 set_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
787 clear_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
790 if (is_singleblock(request
)) {
791 if (unlikely(!arch_has_block_step()))
793 user_enable_block_step(child
);
794 } else if (is_singlestep(request
) || is_sysemu_singlestep(request
)) {
795 if (unlikely(!arch_has_single_step()))
797 user_enable_single_step(child
);
799 user_disable_single_step(child
);
803 * Change ->exit_code and ->state under siglock to avoid the race
804 * with wait_task_stopped() in between; a non-zero ->exit_code will
805 * wrongly look like another report from tracee.
807 * Note that we need siglock even if ->exit_code == data and/or this
808 * status was not reported yet, the new status must not be cleared by
809 * wait_task_stopped() after resume.
811 * If data == 0 we do not care if wait_task_stopped() reports the old
812 * status and clears the code too; this can't race with the tracee, it
813 * takes siglock after resume.
815 need_siglock
= data
&& !thread_group_empty(current
);
817 spin_lock_irq(&child
->sighand
->siglock
);
818 child
->exit_code
= data
;
819 wake_up_state(child
, __TASK_TRACED
);
821 spin_unlock_irq(&child
->sighand
->siglock
);
826 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
828 static const struct user_regset
*
829 find_regset(const struct user_regset_view
*view
, unsigned int type
)
831 const struct user_regset
*regset
;
834 for (n
= 0; n
< view
->n
; ++n
) {
835 regset
= view
->regsets
+ n
;
836 if (regset
->core_note_type
== type
)
843 static int ptrace_regset(struct task_struct
*task
, int req
, unsigned int type
,
846 const struct user_regset_view
*view
= task_user_regset_view(task
);
847 const struct user_regset
*regset
= find_regset(view
, type
);
850 if (!regset
|| (kiov
->iov_len
% regset
->size
) != 0)
853 regset_no
= regset
- view
->regsets
;
854 kiov
->iov_len
= min(kiov
->iov_len
,
855 (__kernel_size_t
) (regset
->n
* regset
->size
));
857 if (req
== PTRACE_GETREGSET
)
858 return copy_regset_to_user(task
, view
, regset_no
, 0,
859 kiov
->iov_len
, kiov
->iov_base
);
861 return copy_regset_from_user(task
, view
, regset_no
, 0,
862 kiov
->iov_len
, kiov
->iov_base
);
866 * This is declared in linux/regset.h and defined in machine-dependent
867 * code. We put the export here, near the primary machine-neutral use,
868 * to ensure no machine forgets it.
870 EXPORT_SYMBOL_GPL(task_user_regset_view
);
873 int ptrace_request(struct task_struct
*child
, long request
,
874 unsigned long addr
, unsigned long data
)
876 bool seized
= child
->ptrace
& PT_SEIZED
;
878 siginfo_t siginfo
, *si
;
879 void __user
*datavp
= (void __user
*) data
;
880 unsigned long __user
*datalp
= datavp
;
884 case PTRACE_PEEKTEXT
:
885 case PTRACE_PEEKDATA
:
886 return generic_ptrace_peekdata(child
, addr
, data
);
887 case PTRACE_POKETEXT
:
888 case PTRACE_POKEDATA
:
889 return generic_ptrace_pokedata(child
, addr
, data
);
891 #ifdef PTRACE_OLDSETOPTIONS
892 case PTRACE_OLDSETOPTIONS
:
894 case PTRACE_SETOPTIONS
:
895 ret
= ptrace_setoptions(child
, data
);
897 case PTRACE_GETEVENTMSG
:
898 ret
= put_user(child
->ptrace_message
, datalp
);
901 case PTRACE_PEEKSIGINFO
:
902 ret
= ptrace_peek_siginfo(child
, addr
, data
);
905 case PTRACE_GETSIGINFO
:
906 ret
= ptrace_getsiginfo(child
, &siginfo
);
908 ret
= copy_siginfo_to_user(datavp
, &siginfo
);
911 case PTRACE_SETSIGINFO
:
912 if (copy_from_user(&siginfo
, datavp
, sizeof siginfo
))
915 ret
= ptrace_setsiginfo(child
, &siginfo
);
918 case PTRACE_INTERRUPT
:
920 * Stop tracee without any side-effect on signal or job
921 * control. At least one trap is guaranteed to happen
922 * after this request. If @child is already trapped, the
923 * current trap is not disturbed and another trap will
924 * happen after the current trap is ended with PTRACE_CONT.
926 * The actual trap might not be PTRACE_EVENT_STOP trap but
927 * the pending condition is cleared regardless.
929 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
933 * INTERRUPT doesn't disturb existing trap sans one
934 * exception. If ptracer issued LISTEN for the current
935 * STOP, this INTERRUPT should clear LISTEN and re-trap
938 if (likely(task_set_jobctl_pending(child
, JOBCTL_TRAP_STOP
)))
939 ptrace_signal_wake_up(child
, child
->jobctl
& JOBCTL_LISTENING
);
941 unlock_task_sighand(child
, &flags
);
947 * Listen for events. Tracee must be in STOP. It's not
948 * resumed per-se but is not considered to be in TRACED by
949 * wait(2) or ptrace(2). If an async event (e.g. group
950 * stop state change) happens, tracee will enter STOP trap
951 * again. Alternatively, ptracer can issue INTERRUPT to
952 * finish listening and re-trap tracee into STOP.
954 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
957 si
= child
->last_siginfo
;
958 if (likely(si
&& (si
->si_code
>> 8) == PTRACE_EVENT_STOP
)) {
959 child
->jobctl
|= JOBCTL_LISTENING
;
961 * If NOTIFY is set, it means event happened between
962 * start of this trap and now. Trigger re-trap.
964 if (child
->jobctl
& JOBCTL_TRAP_NOTIFY
)
965 ptrace_signal_wake_up(child
, true);
968 unlock_task_sighand(child
, &flags
);
971 case PTRACE_DETACH
: /* detach a process that was attached. */
972 ret
= ptrace_detach(child
, data
);
975 #ifdef CONFIG_BINFMT_ELF_FDPIC
976 case PTRACE_GETFDPIC
: {
977 struct mm_struct
*mm
= get_task_mm(child
);
978 unsigned long tmp
= 0;
985 case PTRACE_GETFDPIC_EXEC
:
986 tmp
= mm
->context
.exec_fdpic_loadmap
;
988 case PTRACE_GETFDPIC_INTERP
:
989 tmp
= mm
->context
.interp_fdpic_loadmap
;
996 ret
= put_user(tmp
, datalp
);
1001 #ifdef PTRACE_SINGLESTEP
1002 case PTRACE_SINGLESTEP
:
1004 #ifdef PTRACE_SINGLEBLOCK
1005 case PTRACE_SINGLEBLOCK
:
1007 #ifdef PTRACE_SYSEMU
1009 case PTRACE_SYSEMU_SINGLESTEP
:
1011 case PTRACE_SYSCALL
:
1013 return ptrace_resume(child
, request
, data
);
1016 if (child
->exit_state
) /* already dead */
1018 return ptrace_resume(child
, request
, SIGKILL
);
1020 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1021 case PTRACE_GETREGSET
:
1022 case PTRACE_SETREGSET
:
1025 struct iovec __user
*uiov
= datavp
;
1027 if (!access_ok(VERIFY_WRITE
, uiov
, sizeof(*uiov
)))
1030 if (__get_user(kiov
.iov_base
, &uiov
->iov_base
) ||
1031 __get_user(kiov
.iov_len
, &uiov
->iov_len
))
1034 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1036 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1047 static struct task_struct
*ptrace_get_task_struct(pid_t pid
)
1049 struct task_struct
*child
;
1052 child
= find_task_by_vpid(pid
);
1054 get_task_struct(child
);
1058 return ERR_PTR(-ESRCH
);
1062 #ifndef arch_ptrace_attach
1063 #define arch_ptrace_attach(child) do { } while (0)
1066 SYSCALL_DEFINE4(ptrace
, long, request
, long, pid
, unsigned long, addr
,
1067 unsigned long, data
)
1069 struct task_struct
*child
;
1072 if (request
== PTRACE_TRACEME
) {
1073 ret
= ptrace_traceme();
1075 arch_ptrace_attach(current
);
1079 child
= ptrace_get_task_struct(pid
);
1080 if (IS_ERR(child
)) {
1081 ret
= PTR_ERR(child
);
1085 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1086 ret
= ptrace_attach(child
, request
, addr
, data
);
1088 * Some architectures need to do book-keeping after
1092 arch_ptrace_attach(child
);
1093 goto out_put_task_struct
;
1096 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1097 request
== PTRACE_INTERRUPT
);
1099 goto out_put_task_struct
;
1101 ret
= arch_ptrace(child
, request
, addr
, data
);
1102 if (ret
|| request
!= PTRACE_DETACH
)
1103 ptrace_unfreeze_traced(child
);
1105 out_put_task_struct
:
1106 put_task_struct(child
);
1111 int generic_ptrace_peekdata(struct task_struct
*tsk
, unsigned long addr
,
1117 copied
= access_process_vm(tsk
, addr
, &tmp
, sizeof(tmp
), 0);
1118 if (copied
!= sizeof(tmp
))
1120 return put_user(tmp
, (unsigned long __user
*)data
);
1123 int generic_ptrace_pokedata(struct task_struct
*tsk
, unsigned long addr
,
1128 copied
= access_process_vm(tsk
, addr
, &data
, sizeof(data
), 1);
1129 return (copied
== sizeof(data
)) ? 0 : -EIO
;
1132 #if defined CONFIG_COMPAT
1133 #include <linux/compat.h>
1135 int compat_ptrace_request(struct task_struct
*child
, compat_long_t request
,
1136 compat_ulong_t addr
, compat_ulong_t data
)
1138 compat_ulong_t __user
*datap
= compat_ptr(data
);
1139 compat_ulong_t word
;
1144 case PTRACE_PEEKTEXT
:
1145 case PTRACE_PEEKDATA
:
1146 ret
= access_process_vm(child
, addr
, &word
, sizeof(word
), 0);
1147 if (ret
!= sizeof(word
))
1150 ret
= put_user(word
, datap
);
1153 case PTRACE_POKETEXT
:
1154 case PTRACE_POKEDATA
:
1155 ret
= access_process_vm(child
, addr
, &data
, sizeof(data
), 1);
1156 ret
= (ret
!= sizeof(data
) ? -EIO
: 0);
1159 case PTRACE_GETEVENTMSG
:
1160 ret
= put_user((compat_ulong_t
) child
->ptrace_message
, datap
);
1163 case PTRACE_GETSIGINFO
:
1164 ret
= ptrace_getsiginfo(child
, &siginfo
);
1166 ret
= copy_siginfo_to_user32(
1167 (struct compat_siginfo __user
*) datap
,
1171 case PTRACE_SETSIGINFO
:
1172 memset(&siginfo
, 0, sizeof siginfo
);
1173 if (copy_siginfo_from_user32(
1174 &siginfo
, (struct compat_siginfo __user
*) datap
))
1177 ret
= ptrace_setsiginfo(child
, &siginfo
);
1179 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1180 case PTRACE_GETREGSET
:
1181 case PTRACE_SETREGSET
:
1184 struct compat_iovec __user
*uiov
=
1185 (struct compat_iovec __user
*) datap
;
1189 if (!access_ok(VERIFY_WRITE
, uiov
, sizeof(*uiov
)))
1192 if (__get_user(ptr
, &uiov
->iov_base
) ||
1193 __get_user(len
, &uiov
->iov_len
))
1196 kiov
.iov_base
= compat_ptr(ptr
);
1199 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1201 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1207 ret
= ptrace_request(child
, request
, addr
, data
);
1213 asmlinkage
long compat_sys_ptrace(compat_long_t request
, compat_long_t pid
,
1214 compat_long_t addr
, compat_long_t data
)
1216 struct task_struct
*child
;
1219 if (request
== PTRACE_TRACEME
) {
1220 ret
= ptrace_traceme();
1224 child
= ptrace_get_task_struct(pid
);
1225 if (IS_ERR(child
)) {
1226 ret
= PTR_ERR(child
);
1230 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1231 ret
= ptrace_attach(child
, request
, addr
, data
);
1233 * Some architectures need to do book-keeping after
1237 arch_ptrace_attach(child
);
1238 goto out_put_task_struct
;
1241 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1242 request
== PTRACE_INTERRUPT
);
1244 ret
= compat_arch_ptrace(child
, request
, addr
, data
);
1245 if (ret
|| request
!= PTRACE_DETACH
)
1246 ptrace_unfreeze_traced(child
);
1249 out_put_task_struct
:
1250 put_task_struct(child
);
1254 #endif /* CONFIG_COMPAT */
1256 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1257 int ptrace_get_breakpoints(struct task_struct
*tsk
)
1259 if (atomic_inc_not_zero(&tsk
->ptrace_bp_refcnt
))
1265 void ptrace_put_breakpoints(struct task_struct
*tsk
)
1267 if (atomic_dec_and_test(&tsk
->ptrace_bp_refcnt
))
1268 flush_ptrace_hw_breakpoint(tsk
);
1270 #endif /* CONFIG_HAVE_HW_BREAKPOINT */