2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/seccomp.h>
31 #include <linux/security.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
34 #include <linux/uaccess.h>
35 #include <linux/perf_event.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/regset.h>
38 #include <linux/tracehook.h>
39 #include <linux/elf.h>
41 #include <asm/compat.h>
42 #include <asm/debug-monitors.h>
43 #include <asm/pgtable.h>
44 #include <asm/syscall.h>
45 #include <asm/traps.h>
46 #include <asm/system_misc.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/syscalls.h>
52 * TODO: does not yet catch signals sent when the child dies.
53 * in exit.c or in signal.c.
57 * Called by kernel/ptrace.c when detaching..
59 void ptrace_disable(struct task_struct
*child
)
63 #ifdef CONFIG_HAVE_HW_BREAKPOINT
65 * Handle hitting a HW-breakpoint.
67 static void ptrace_hbptriggered(struct perf_event
*bp
,
68 struct perf_sample_data
*data
,
71 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
75 .si_code
= TRAP_HWBKPT
,
76 .si_addr
= (void __user
*)(bkpt
->trigger
),
82 if (!is_compat_task())
85 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
86 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
87 info
.si_errno
= (i
<< 1) + 1;
92 for (i
= 0; i
< ARM_MAX_WRP
; ++i
) {
93 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
94 info
.si_errno
= -((i
<< 1) + 1);
101 force_sig_info(SIGTRAP
, &info
, current
);
105 * Unregister breakpoints from this task and reset the pointers in
108 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
111 struct thread_struct
*t
= &tsk
->thread
;
113 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
114 if (t
->debug
.hbp_break
[i
]) {
115 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
116 t
->debug
.hbp_break
[i
] = NULL
;
120 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
121 if (t
->debug
.hbp_watch
[i
]) {
122 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
123 t
->debug
.hbp_watch
[i
] = NULL
;
128 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
130 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
133 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
134 struct task_struct
*tsk
,
137 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
140 case NT_ARM_HW_BREAK
:
141 if (idx
< ARM_MAX_BRP
)
142 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
144 case NT_ARM_HW_WATCH
:
145 if (idx
< ARM_MAX_WRP
)
146 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
153 static int ptrace_hbp_set_event(unsigned int note_type
,
154 struct task_struct
*tsk
,
156 struct perf_event
*bp
)
161 case NT_ARM_HW_BREAK
:
162 if (idx
< ARM_MAX_BRP
) {
163 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
167 case NT_ARM_HW_WATCH
:
168 if (idx
< ARM_MAX_WRP
) {
169 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
178 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
179 struct task_struct
*tsk
,
182 struct perf_event
*bp
;
183 struct perf_event_attr attr
;
187 case NT_ARM_HW_BREAK
:
188 type
= HW_BREAKPOINT_X
;
190 case NT_ARM_HW_WATCH
:
191 type
= HW_BREAKPOINT_RW
;
194 return ERR_PTR(-EINVAL
);
197 ptrace_breakpoint_init(&attr
);
200 * Initialise fields to sane defaults
201 * (i.e. values that will pass validation).
204 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
208 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
212 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
219 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
220 struct arch_hw_breakpoint_ctrl ctrl
,
221 struct perf_event_attr
*attr
)
223 int err
, len
, type
, disabled
= !ctrl
.enabled
;
225 attr
->disabled
= disabled
;
229 err
= arch_bp_generic_fields(ctrl
, &len
, &type
);
234 case NT_ARM_HW_BREAK
:
235 if ((type
& HW_BREAKPOINT_X
) != type
)
238 case NT_ARM_HW_WATCH
:
239 if ((type
& HW_BREAKPOINT_RW
) != type
)
247 attr
->bp_type
= type
;
252 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
258 case NT_ARM_HW_BREAK
:
259 num
= hw_breakpoint_slots(TYPE_INST
);
261 case NT_ARM_HW_WATCH
:
262 num
= hw_breakpoint_slots(TYPE_DATA
);
268 reg
|= debug_monitors_arch();
276 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
277 struct task_struct
*tsk
,
281 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
286 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
290 static int ptrace_hbp_get_addr(unsigned int note_type
,
291 struct task_struct
*tsk
,
295 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
300 *addr
= bp
? bp
->attr
.bp_addr
: 0;
304 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
305 struct task_struct
*tsk
,
308 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
311 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
316 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
317 struct task_struct
*tsk
,
322 struct perf_event
*bp
;
323 struct perf_event_attr attr
;
324 struct arch_hw_breakpoint_ctrl ctrl
;
326 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
333 decode_ctrl_reg(uctrl
, &ctrl
);
334 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
338 return modify_user_hw_breakpoint(bp
, &attr
);
341 static int ptrace_hbp_set_addr(unsigned int note_type
,
342 struct task_struct
*tsk
,
347 struct perf_event
*bp
;
348 struct perf_event_attr attr
;
350 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
358 err
= modify_user_hw_breakpoint(bp
, &attr
);
362 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
363 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
364 #define PTRACE_HBP_PAD_SZ sizeof(u32)
366 static int hw_break_get(struct task_struct
*target
,
367 const struct user_regset
*regset
,
368 unsigned int pos
, unsigned int count
,
369 void *kbuf
, void __user
*ubuf
)
371 unsigned int note_type
= regset
->core_note_type
;
372 int ret
, idx
= 0, offset
, limit
;
377 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
381 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &info
, 0,
387 offset
= offsetof(struct user_hwdebug_state
, pad
);
388 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
, offset
,
389 offset
+ PTRACE_HBP_PAD_SZ
);
393 /* (address, ctrl) registers */
394 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
395 limit
= regset
->n
* regset
->size
;
396 while (count
&& offset
< limit
) {
397 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
400 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
401 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
404 offset
+= PTRACE_HBP_ADDR_SZ
;
406 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
409 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
410 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
413 offset
+= PTRACE_HBP_CTRL_SZ
;
415 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
417 offset
+ PTRACE_HBP_PAD_SZ
);
420 offset
+= PTRACE_HBP_PAD_SZ
;
427 static int hw_break_set(struct task_struct
*target
,
428 const struct user_regset
*regset
,
429 unsigned int pos
, unsigned int count
,
430 const void *kbuf
, const void __user
*ubuf
)
432 unsigned int note_type
= regset
->core_note_type
;
433 int ret
, idx
= 0, offset
, limit
;
437 /* Resource info and pad */
438 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
439 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
443 /* (address, ctrl) registers */
444 limit
= regset
->n
* regset
->size
;
445 while (count
&& offset
< limit
) {
446 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
447 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
450 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
453 offset
+= PTRACE_HBP_ADDR_SZ
;
455 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
456 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
459 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
462 offset
+= PTRACE_HBP_CTRL_SZ
;
464 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
466 offset
+ PTRACE_HBP_PAD_SZ
);
469 offset
+= PTRACE_HBP_PAD_SZ
;
475 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
477 static int gpr_get(struct task_struct
*target
,
478 const struct user_regset
*regset
,
479 unsigned int pos
, unsigned int count
,
480 void *kbuf
, void __user
*ubuf
)
482 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
483 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
486 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
487 unsigned int pos
, unsigned int count
,
488 const void *kbuf
, const void __user
*ubuf
)
491 struct user_pt_regs newregs
;
493 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
497 if (!valid_user_regs(&newregs
))
500 task_pt_regs(target
)->user_regs
= newregs
;
505 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
507 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
508 unsigned int pos
, unsigned int count
,
509 void *kbuf
, void __user
*ubuf
)
511 struct user_fpsimd_state
*uregs
;
512 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
513 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
516 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
517 unsigned int pos
, unsigned int count
,
518 const void *kbuf
, const void __user
*ubuf
)
521 struct user_fpsimd_state newstate
;
523 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
, 0, -1);
527 target
->thread
.fpsimd_state
.user_fpsimd
= newstate
;
528 fpsimd_flush_task_state(target
);
532 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
533 unsigned int pos
, unsigned int count
,
534 void *kbuf
, void __user
*ubuf
)
536 unsigned long *tls
= &target
->thread
.tp_value
;
537 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, tls
, 0, -1);
540 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
541 unsigned int pos
, unsigned int count
,
542 const void *kbuf
, const void __user
*ubuf
)
547 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
551 target
->thread
.tp_value
= tls
;
555 enum aarch64_regset
{
559 #ifdef CONFIG_HAVE_HW_BREAKPOINT
565 static const struct user_regset aarch64_regsets
[] = {
567 .core_note_type
= NT_PRSTATUS
,
568 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
570 .align
= sizeof(u64
),
575 .core_note_type
= NT_PRFPREG
,
576 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
578 * We pretend we have 32-bit registers because the fpsr and
579 * fpcr are 32-bits wide.
582 .align
= sizeof(u32
),
587 .core_note_type
= NT_ARM_TLS
,
589 .size
= sizeof(void *),
590 .align
= sizeof(void *),
594 #ifdef CONFIG_HAVE_HW_BREAKPOINT
595 [REGSET_HW_BREAK
] = {
596 .core_note_type
= NT_ARM_HW_BREAK
,
597 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
599 .align
= sizeof(u32
),
603 [REGSET_HW_WATCH
] = {
604 .core_note_type
= NT_ARM_HW_WATCH
,
605 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
607 .align
= sizeof(u32
),
614 static const struct user_regset_view user_aarch64_view
= {
615 .name
= "aarch64", .e_machine
= EM_AARCH64
,
616 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
620 #include <linux/compat.h>
627 static int compat_gpr_get(struct task_struct
*target
,
628 const struct user_regset
*regset
,
629 unsigned int pos
, unsigned int count
,
630 void *kbuf
, void __user
*ubuf
)
633 unsigned int i
, start
, num_regs
;
635 /* Calculate the number of AArch32 registers contained in count */
636 num_regs
= count
/ regset
->size
;
638 /* Convert pos into an register number */
639 start
= pos
/ regset
->size
;
641 if (start
+ num_regs
> regset
->n
)
644 for (i
= 0; i
< num_regs
; ++i
) {
645 unsigned int idx
= start
+ i
;
650 reg
= (void *)&task_pt_regs(target
)->pc
;
653 reg
= (void *)&task_pt_regs(target
)->pstate
;
656 reg
= (void *)&task_pt_regs(target
)->orig_x0
;
659 reg
= (void *)&task_pt_regs(target
)->regs
[idx
];
663 if (i
== 0 && NULL
!= target
&& target
->pid
== current
->pid
)
664 printk(KERN_WARNING
"coredump(%d) copy registers to kbuf\n", current
->pid
);
665 memcpy(kbuf
, reg
, sizeof(compat_ulong_t
));
666 kbuf
+= sizeof(compat_ulong_t
);
669 ret
= copy_to_user(ubuf
, reg
, sizeof(compat_ulong_t
));
674 ubuf
+= sizeof(compat_ulong_t
);
681 static int compat_gpr_set(struct task_struct
*target
,
682 const struct user_regset
*regset
,
683 unsigned int pos
, unsigned int count
,
684 const void *kbuf
, const void __user
*ubuf
)
686 struct pt_regs newregs
;
688 unsigned int i
, start
, num_regs
;
690 /* Calculate the number of AArch32 registers contained in count */
691 num_regs
= count
/ regset
->size
;
693 /* Convert pos into an register number */
694 start
= pos
/ regset
->size
;
696 if (start
+ num_regs
> regset
->n
)
699 newregs
= *task_pt_regs(target
);
701 for (i
= 0; i
< num_regs
; ++i
) {
702 unsigned int idx
= start
+ i
;
707 reg
= (void *)&newregs
.pc
;
710 reg
= (void *)&newregs
.pstate
;
713 reg
= (void *)&newregs
.orig_x0
;
716 reg
= (void *)&newregs
.regs
[idx
];
719 ret
= copy_from_user(reg
, ubuf
, sizeof(compat_ulong_t
));
724 ubuf
+= sizeof(compat_ulong_t
);
727 if (valid_user_regs(&newregs
.user_regs
))
728 *task_pt_regs(target
) = newregs
;
736 static int compat_vfp_get(struct task_struct
*target
,
737 const struct user_regset
*regset
,
738 unsigned int pos
, unsigned int count
,
739 void *kbuf
, void __user
*ubuf
)
741 struct user_fpsimd_state
*uregs
;
742 compat_ulong_t fpscr
;
745 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
748 * The VFP registers are packed into the fpsimd_state, so they all sit
749 * nicely together for us. We just need to create the fpscr separately.
751 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
752 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
755 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
756 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
757 ret
= put_user(fpscr
, (compat_ulong_t
*)ubuf
);
763 static int compat_vfp_set(struct task_struct
*target
,
764 const struct user_regset
*regset
,
765 unsigned int pos
, unsigned int count
,
766 const void *kbuf
, const void __user
*ubuf
)
768 struct user_fpsimd_state
*uregs
;
769 compat_ulong_t fpscr
;
772 if (pos
+ count
> VFP_STATE_SIZE
)
775 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
777 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
778 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
781 ret
= get_user(fpscr
, (compat_ulong_t
*)ubuf
);
782 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
783 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
786 fpsimd_flush_task_state(target
);
790 static const struct user_regset aarch32_regsets
[] = {
791 [REGSET_COMPAT_GPR
] = {
792 .core_note_type
= NT_PRSTATUS
,
793 .n
= COMPAT_ELF_NGREG
,
794 .size
= sizeof(compat_elf_greg_t
),
795 .align
= sizeof(compat_elf_greg_t
),
796 .get
= compat_gpr_get
,
797 .set
= compat_gpr_set
799 [REGSET_COMPAT_VFP
] = {
800 .core_note_type
= NT_ARM_VFP
,
801 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
802 .size
= sizeof(compat_ulong_t
),
803 .align
= sizeof(compat_ulong_t
),
804 .get
= compat_vfp_get
,
805 .set
= compat_vfp_set
809 static const struct user_regset_view user_aarch32_view
= {
810 .name
= "aarch32", .e_machine
= EM_ARM
,
811 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
814 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
815 compat_ulong_t __user
*ret
)
822 if (off
== COMPAT_PT_TEXT_ADDR
)
823 tmp
= tsk
->mm
->start_code
;
824 else if (off
== COMPAT_PT_DATA_ADDR
)
825 tmp
= tsk
->mm
->start_data
;
826 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
827 tmp
= tsk
->mm
->end_code
;
828 else if (off
< sizeof(compat_elf_gregset_t
))
829 return copy_regset_to_user(tsk
, &user_aarch32_view
,
830 REGSET_COMPAT_GPR
, off
,
831 sizeof(compat_ulong_t
), ret
);
832 else if (off
>= COMPAT_USER_SZ
)
837 return put_user(tmp
, ret
);
840 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
844 mm_segment_t old_fs
= get_fs();
846 if (off
& 3 || off
>= COMPAT_USER_SZ
)
849 if (off
>= sizeof(compat_elf_gregset_t
))
853 ret
= copy_regset_from_user(tsk
, &user_aarch32_view
,
854 REGSET_COMPAT_GPR
, off
,
855 sizeof(compat_ulong_t
),
862 #ifdef CONFIG_HAVE_HW_BREAKPOINT
865 * Convert a virtual register number into an index for a thread_info
866 * breakpoint array. Breakpoints are identified using positive numbers
867 * whilst watchpoints are negative. The registers are laid out as pairs
868 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
869 * Register 0 is reserved for describing resource information.
871 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
873 return (abs(num
) - 1) >> 1;
876 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
878 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
881 num_brps
= hw_breakpoint_slots(TYPE_INST
);
882 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
884 debug_arch
= debug_monitors_arch();
898 static int compat_ptrace_hbp_get(unsigned int note_type
,
899 struct task_struct
*tsk
,
906 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);;
909 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
912 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
919 static int compat_ptrace_hbp_set(unsigned int note_type
,
920 struct task_struct
*tsk
,
927 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
931 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
934 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
940 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
941 compat_ulong_t __user
*data
)
945 mm_segment_t old_fs
= get_fs();
950 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
952 } else if (num
== 0) {
953 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
956 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
961 ret
= put_user(kdata
, data
);
966 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
967 compat_ulong_t __user
*data
)
971 mm_segment_t old_fs
= get_fs();
976 ret
= get_user(kdata
, data
);
982 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
984 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
989 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
991 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
992 compat_ulong_t caddr
, compat_ulong_t cdata
)
994 unsigned long addr
= caddr
;
995 unsigned long data
= cdata
;
996 void __user
*datap
= compat_ptr(data
);
1000 case PTRACE_PEEKUSR
:
1001 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1004 case PTRACE_POKEUSR
:
1005 ret
= compat_ptrace_write_user(child
, addr
, data
);
1008 case COMPAT_PTRACE_GETREGS
:
1009 ret
= copy_regset_to_user(child
,
1012 0, sizeof(compat_elf_gregset_t
),
1016 case COMPAT_PTRACE_SETREGS
:
1017 ret
= copy_regset_from_user(child
,
1020 0, sizeof(compat_elf_gregset_t
),
1024 case COMPAT_PTRACE_GET_THREAD_AREA
:
1025 ret
= put_user((compat_ulong_t
)child
->thread
.tp_value
,
1026 (compat_ulong_t __user
*)datap
);
1029 case COMPAT_PTRACE_SET_SYSCALL
:
1030 task_pt_regs(child
)->syscallno
= data
;
1034 case COMPAT_PTRACE_GETVFPREGS
:
1035 ret
= copy_regset_to_user(child
,
1042 case COMPAT_PTRACE_SETVFPREGS
:
1043 ret
= copy_regset_from_user(child
,
1050 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1051 case COMPAT_PTRACE_GETHBPREGS
:
1052 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1055 case COMPAT_PTRACE_SETHBPREGS
:
1056 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1061 ret
= compat_ptrace_request(child
, request
, addr
,
1068 #endif /* CONFIG_COMPAT */
1070 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1072 #ifdef CONFIG_COMPAT
1073 if (is_compat_thread(task_thread_info(task
)))
1074 return &user_aarch32_view
;
1076 return &user_aarch64_view
;
1079 long arch_ptrace(struct task_struct
*child
, long request
,
1080 unsigned long addr
, unsigned long data
)
1085 case PTRACE_SET_SYSCALL
:
1086 task_pt_regs(child
)->syscallno
= data
;
1090 ret
= ptrace_request(child
, request
, addr
, data
);
1097 enum ptrace_syscall_dir
{
1098 PTRACE_SYSCALL_ENTER
= 0,
1099 PTRACE_SYSCALL_EXIT
,
1102 static void tracehook_report_syscall(struct pt_regs
*regs
,
1103 enum ptrace_syscall_dir dir
)
1106 unsigned long saved_reg
;
1109 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1110 * used to denote syscall entry/exit:
1112 regno
= (is_compat_task() ? 12 : 7);
1113 saved_reg
= regs
->regs
[regno
];
1114 regs
->regs
[regno
] = dir
;
1116 if (dir
== PTRACE_SYSCALL_EXIT
)
1117 tracehook_report_syscall_exit(regs
, 0);
1118 else if (tracehook_report_syscall_entry(regs
))
1119 regs
->syscallno
= ~0UL;
1121 regs
->regs
[regno
] = saved_reg
;
1124 asmlinkage
int syscall_trace_enter(struct pt_regs
*regs
)
1126 unsigned int saved_syscallno
= regs
->syscallno
;
1128 /* Do the secure computing check first; failures should be fast. */
1129 if (secure_computing(regs
->syscallno
) == -1)
1130 return RET_SKIP_SYSCALL_TRACE
;
1132 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1133 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1135 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1136 trace_sys_enter(regs
, regs
->syscallno
);
1138 if (IS_SKIP_SYSCALL(regs
->syscallno
)) {
1140 * RESTRICTION: we can't modify a return value of user
1141 * issued syscall(-1) here. In order to ease this flavor,
1142 * we need to treat whatever value in x0 as a return value,
1143 * but this might result in a bogus value being returned.
1146 * NOTE: syscallno may also be set to -1 if fatal signal is
1147 * detected in tracehook_report_syscall_entry(), but since
1148 * a value set to x0 here is not used in this case, we may
1151 if (!test_thread_flag(TIF_SYSCALL_TRACE
) ||
1152 (IS_SKIP_SYSCALL(saved_syscallno
)))
1153 regs
->regs
[0] = -ENOSYS
;
1156 audit_syscall_entry(syscall_get_arch(), regs
->syscallno
,
1157 regs
->orig_x0
, regs
->regs
[1], regs
->regs
[2], regs
->regs
[3]);
1159 return regs
->syscallno
;
1162 asmlinkage
void syscall_trace_exit(struct pt_regs
*regs
)
1164 audit_syscall_exit(regs
);
1166 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1167 trace_sys_exit(regs
, regs_return_value(regs
));
1169 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1170 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);