import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / kernel / ptrace.c
1 /*
2 * Based on arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/mm.h>
27 #include <linux/smp.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/seccomp.h>
31 #include <linux/security.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
34 #include <linux/uaccess.h>
35 #include <linux/perf_event.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/regset.h>
38 #include <linux/tracehook.h>
39 #include <linux/elf.h>
40
41 #include <asm/compat.h>
42 #include <asm/debug-monitors.h>
43 #include <asm/pgtable.h>
44 #include <asm/syscall.h>
45 #include <asm/traps.h>
46 #include <asm/system_misc.h>
47
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/syscalls.h>
50
51 /*
52 * TODO: does not yet catch signals sent when the child dies.
53 * in exit.c or in signal.c.
54 */
55
56 /*
57 * Called by kernel/ptrace.c when detaching..
58 */
59 void ptrace_disable(struct task_struct *child)
60 {
61 }
62
63 #ifdef CONFIG_HAVE_HW_BREAKPOINT
64 /*
65 * Handle hitting a HW-breakpoint.
66 */
67 static void ptrace_hbptriggered(struct perf_event *bp,
68 struct perf_sample_data *data,
69 struct pt_regs *regs)
70 {
71 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
72 siginfo_t info = {
73 .si_signo = SIGTRAP,
74 .si_errno = 0,
75 .si_code = TRAP_HWBKPT,
76 .si_addr = (void __user *)(bkpt->trigger),
77 };
78
79 #ifdef CONFIG_COMPAT
80 int i;
81
82 if (!is_compat_task())
83 goto send_sig;
84
85 for (i = 0; i < ARM_MAX_BRP; ++i) {
86 if (current->thread.debug.hbp_break[i] == bp) {
87 info.si_errno = (i << 1) + 1;
88 break;
89 }
90 }
91 for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
92 if (current->thread.debug.hbp_watch[i] == bp) {
93 info.si_errno = -((i << 1) + 1);
94 break;
95 }
96 }
97
98 send_sig:
99 #endif
100 force_sig_info(SIGTRAP, &info, current);
101 }
102
103 /*
104 * Unregister breakpoints from this task and reset the pointers in
105 * the thread_struct.
106 */
107 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
108 {
109 int i;
110 struct thread_struct *t = &tsk->thread;
111
112 for (i = 0; i < ARM_MAX_BRP; i++) {
113 if (t->debug.hbp_break[i]) {
114 unregister_hw_breakpoint(t->debug.hbp_break[i]);
115 t->debug.hbp_break[i] = NULL;
116 }
117 }
118
119 for (i = 0; i < ARM_MAX_WRP; i++) {
120 if (t->debug.hbp_watch[i]) {
121 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
122 t->debug.hbp_watch[i] = NULL;
123 }
124 }
125 }
126
127 void ptrace_hw_copy_thread(struct task_struct *tsk)
128 {
129 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
130 }
131
132 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
133 struct task_struct *tsk,
134 unsigned long idx)
135 {
136 struct perf_event *bp = ERR_PTR(-EINVAL);
137
138 switch (note_type) {
139 case NT_ARM_HW_BREAK:
140 if (idx < ARM_MAX_BRP)
141 bp = tsk->thread.debug.hbp_break[idx];
142 break;
143 case NT_ARM_HW_WATCH:
144 if (idx < ARM_MAX_WRP)
145 bp = tsk->thread.debug.hbp_watch[idx];
146 break;
147 }
148
149 return bp;
150 }
151
152 static int ptrace_hbp_set_event(unsigned int note_type,
153 struct task_struct *tsk,
154 unsigned long idx,
155 struct perf_event *bp)
156 {
157 int err = -EINVAL;
158
159 switch (note_type) {
160 case NT_ARM_HW_BREAK:
161 if (idx < ARM_MAX_BRP) {
162 tsk->thread.debug.hbp_break[idx] = bp;
163 err = 0;
164 }
165 break;
166 case NT_ARM_HW_WATCH:
167 if (idx < ARM_MAX_WRP) {
168 tsk->thread.debug.hbp_watch[idx] = bp;
169 err = 0;
170 }
171 break;
172 }
173
174 return err;
175 }
176
177 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
178 struct task_struct *tsk,
179 unsigned long idx)
180 {
181 struct perf_event *bp;
182 struct perf_event_attr attr;
183 int err, type;
184
185 switch (note_type) {
186 case NT_ARM_HW_BREAK:
187 type = HW_BREAKPOINT_X;
188 break;
189 case NT_ARM_HW_WATCH:
190 type = HW_BREAKPOINT_RW;
191 break;
192 default:
193 return ERR_PTR(-EINVAL);
194 }
195
196 ptrace_breakpoint_init(&attr);
197
198 /*
199 * Initialise fields to sane defaults
200 * (i.e. values that will pass validation).
201 */
202 attr.bp_addr = 0;
203 attr.bp_len = HW_BREAKPOINT_LEN_4;
204 attr.bp_type = type;
205 attr.disabled = 1;
206
207 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
208 if (IS_ERR(bp))
209 return bp;
210
211 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
212 if (err)
213 return ERR_PTR(err);
214
215 return bp;
216 }
217
218 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
219 struct arch_hw_breakpoint_ctrl ctrl,
220 struct perf_event_attr *attr)
221 {
222 int err, len, type, disabled = !ctrl.enabled;
223
224 attr->disabled = disabled;
225 if (disabled)
226 return 0;
227
228 err = arch_bp_generic_fields(ctrl, &len, &type);
229 if (err)
230 return err;
231
232 switch (note_type) {
233 case NT_ARM_HW_BREAK:
234 if ((type & HW_BREAKPOINT_X) != type)
235 return -EINVAL;
236 break;
237 case NT_ARM_HW_WATCH:
238 if ((type & HW_BREAKPOINT_RW) != type)
239 return -EINVAL;
240 break;
241 default:
242 return -EINVAL;
243 }
244
245 attr->bp_len = len;
246 attr->bp_type = type;
247
248 return 0;
249 }
250
251 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
252 {
253 u8 num;
254 u32 reg = 0;
255
256 switch (note_type) {
257 case NT_ARM_HW_BREAK:
258 num = hw_breakpoint_slots(TYPE_INST);
259 break;
260 case NT_ARM_HW_WATCH:
261 num = hw_breakpoint_slots(TYPE_DATA);
262 break;
263 default:
264 return -EINVAL;
265 }
266
267 reg |= debug_monitors_arch();
268 reg <<= 8;
269 reg |= num;
270
271 *info = reg;
272 return 0;
273 }
274
275 static int ptrace_hbp_get_ctrl(unsigned int note_type,
276 struct task_struct *tsk,
277 unsigned long idx,
278 u32 *ctrl)
279 {
280 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
281
282 if (IS_ERR(bp))
283 return PTR_ERR(bp);
284
285 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
286 return 0;
287 }
288
289 static int ptrace_hbp_get_addr(unsigned int note_type,
290 struct task_struct *tsk,
291 unsigned long idx,
292 u64 *addr)
293 {
294 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
295
296 if (IS_ERR(bp))
297 return PTR_ERR(bp);
298
299 *addr = bp ? bp->attr.bp_addr : 0;
300 return 0;
301 }
302
303 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
304 struct task_struct *tsk,
305 unsigned long idx)
306 {
307 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
308
309 if (!bp)
310 bp = ptrace_hbp_create(note_type, tsk, idx);
311
312 return bp;
313 }
314
315 static int ptrace_hbp_set_ctrl(unsigned int note_type,
316 struct task_struct *tsk,
317 unsigned long idx,
318 u32 uctrl)
319 {
320 int err;
321 struct perf_event *bp;
322 struct perf_event_attr attr;
323 struct arch_hw_breakpoint_ctrl ctrl;
324
325 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
326 if (IS_ERR(bp)) {
327 err = PTR_ERR(bp);
328 return err;
329 }
330
331 attr = bp->attr;
332 decode_ctrl_reg(uctrl, &ctrl);
333 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
334 if (err)
335 return err;
336
337 return modify_user_hw_breakpoint(bp, &attr);
338 }
339
340 static int ptrace_hbp_set_addr(unsigned int note_type,
341 struct task_struct *tsk,
342 unsigned long idx,
343 u64 addr)
344 {
345 int err;
346 struct perf_event *bp;
347 struct perf_event_attr attr;
348
349 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
350 if (IS_ERR(bp)) {
351 err = PTR_ERR(bp);
352 return err;
353 }
354
355 attr = bp->attr;
356 attr.bp_addr = addr;
357 err = modify_user_hw_breakpoint(bp, &attr);
358 return err;
359 }
360
361 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
362 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
363 #define PTRACE_HBP_PAD_SZ sizeof(u32)
364
365 static int hw_break_get(struct task_struct *target,
366 const struct user_regset *regset,
367 unsigned int pos, unsigned int count,
368 void *kbuf, void __user *ubuf)
369 {
370 unsigned int note_type = regset->core_note_type;
371 int ret, idx = 0, offset, limit;
372 u32 info, ctrl;
373 u64 addr;
374
375 /* Resource info */
376 ret = ptrace_hbp_get_resource_info(note_type, &info);
377 if (ret)
378 return ret;
379
380 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
381 sizeof(info));
382 if (ret)
383 return ret;
384
385 /* Pad */
386 offset = offsetof(struct user_hwdebug_state, pad);
387 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
388 offset + PTRACE_HBP_PAD_SZ);
389 if (ret)
390 return ret;
391
392 /* (address, ctrl) registers */
393 offset = offsetof(struct user_hwdebug_state, dbg_regs);
394 limit = regset->n * regset->size;
395 while (count && offset < limit) {
396 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
397 if (ret)
398 return ret;
399 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
400 offset, offset + PTRACE_HBP_ADDR_SZ);
401 if (ret)
402 return ret;
403 offset += PTRACE_HBP_ADDR_SZ;
404
405 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
406 if (ret)
407 return ret;
408 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
409 offset, offset + PTRACE_HBP_CTRL_SZ);
410 if (ret)
411 return ret;
412 offset += PTRACE_HBP_CTRL_SZ;
413
414 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
415 offset,
416 offset + PTRACE_HBP_PAD_SZ);
417 if (ret)
418 return ret;
419 offset += PTRACE_HBP_PAD_SZ;
420 idx++;
421 }
422
423 return 0;
424 }
425
426 static int hw_break_set(struct task_struct *target,
427 const struct user_regset *regset,
428 unsigned int pos, unsigned int count,
429 const void *kbuf, const void __user *ubuf)
430 {
431 unsigned int note_type = regset->core_note_type;
432 int ret, idx = 0, offset, limit;
433 u32 ctrl;
434 u64 addr;
435
436 /* Resource info and pad */
437 offset = offsetof(struct user_hwdebug_state, dbg_regs);
438 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
439 if (ret)
440 return ret;
441
442 /* (address, ctrl) registers */
443 limit = regset->n * regset->size;
444 while (count && offset < limit) {
445 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
446 offset, offset + PTRACE_HBP_ADDR_SZ);
447 if (ret)
448 return ret;
449 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
450 if (ret)
451 return ret;
452 offset += PTRACE_HBP_ADDR_SZ;
453
454 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
455 offset, offset + PTRACE_HBP_CTRL_SZ);
456 if (ret)
457 return ret;
458 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
459 if (ret)
460 return ret;
461 offset += PTRACE_HBP_CTRL_SZ;
462
463 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
464 offset,
465 offset + PTRACE_HBP_PAD_SZ);
466 if (ret)
467 return ret;
468 offset += PTRACE_HBP_PAD_SZ;
469 idx++;
470 }
471
472 return 0;
473 }
474 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
475
476 static int gpr_get(struct task_struct *target,
477 const struct user_regset *regset,
478 unsigned int pos, unsigned int count,
479 void *kbuf, void __user *ubuf)
480 {
481 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
482 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
483 }
484
485 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
486 unsigned int pos, unsigned int count,
487 const void *kbuf, const void __user *ubuf)
488 {
489 int ret;
490 struct user_pt_regs newregs;
491
492 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
493 if (ret)
494 return ret;
495
496 if (!valid_user_regs(&newregs))
497 return -EINVAL;
498
499 task_pt_regs(target)->user_regs = newregs;
500 return 0;
501 }
502
503 /*
504 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
505 */
506 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
507 unsigned int pos, unsigned int count,
508 void *kbuf, void __user *ubuf)
509 {
510 struct user_fpsimd_state *uregs;
511 uregs = &target->thread.fpsimd_state.user_fpsimd;
512 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
513 }
514
515 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
516 unsigned int pos, unsigned int count,
517 const void *kbuf, const void __user *ubuf)
518 {
519 int ret;
520 struct user_fpsimd_state newstate;
521
522 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
523 if (ret)
524 return ret;
525
526 target->thread.fpsimd_state.user_fpsimd = newstate;
527 fpsimd_flush_task_state(target);
528 return ret;
529 }
530
531 static int tls_get(struct task_struct *target, const struct user_regset *regset,
532 unsigned int pos, unsigned int count,
533 void *kbuf, void __user *ubuf)
534 {
535 unsigned long *tls = &target->thread.tp_value;
536 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
537 }
538
539 static int tls_set(struct task_struct *target, const struct user_regset *regset,
540 unsigned int pos, unsigned int count,
541 const void *kbuf, const void __user *ubuf)
542 {
543 int ret;
544 unsigned long tls;
545
546 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
547 if (ret)
548 return ret;
549
550 target->thread.tp_value = tls;
551 return ret;
552 }
553
554 enum aarch64_regset {
555 REGSET_GPR,
556 REGSET_FPR,
557 REGSET_TLS,
558 #ifdef CONFIG_HAVE_HW_BREAKPOINT
559 REGSET_HW_BREAK,
560 REGSET_HW_WATCH,
561 #endif
562 };
563
564 static const struct user_regset aarch64_regsets[] = {
565 [REGSET_GPR] = {
566 .core_note_type = NT_PRSTATUS,
567 .n = sizeof(struct user_pt_regs) / sizeof(u64),
568 .size = sizeof(u64),
569 .align = sizeof(u64),
570 .get = gpr_get,
571 .set = gpr_set
572 },
573 [REGSET_FPR] = {
574 .core_note_type = NT_PRFPREG,
575 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
576 /*
577 * We pretend we have 32-bit registers because the fpsr and
578 * fpcr are 32-bits wide.
579 */
580 .size = sizeof(u32),
581 .align = sizeof(u32),
582 .get = fpr_get,
583 .set = fpr_set
584 },
585 [REGSET_TLS] = {
586 .core_note_type = NT_ARM_TLS,
587 .n = 1,
588 .size = sizeof(void *),
589 .align = sizeof(void *),
590 .get = tls_get,
591 .set = tls_set,
592 },
593 #ifdef CONFIG_HAVE_HW_BREAKPOINT
594 [REGSET_HW_BREAK] = {
595 .core_note_type = NT_ARM_HW_BREAK,
596 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
597 .size = sizeof(u32),
598 .align = sizeof(u32),
599 .get = hw_break_get,
600 .set = hw_break_set,
601 },
602 [REGSET_HW_WATCH] = {
603 .core_note_type = NT_ARM_HW_WATCH,
604 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
605 .size = sizeof(u32),
606 .align = sizeof(u32),
607 .get = hw_break_get,
608 .set = hw_break_set,
609 },
610 #endif
611 };
612
613 static const struct user_regset_view user_aarch64_view = {
614 .name = "aarch64", .e_machine = EM_AARCH64,
615 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
616 };
617
618 #ifdef CONFIG_COMPAT
619 #include <linux/compat.h>
620
621 enum compat_regset {
622 REGSET_COMPAT_GPR,
623 REGSET_COMPAT_VFP,
624 };
625
626 static int compat_gpr_get(struct task_struct *target,
627 const struct user_regset *regset,
628 unsigned int pos, unsigned int count,
629 void *kbuf, void __user *ubuf)
630 {
631 int ret = 0;
632 unsigned int i, start, num_regs;
633
634 /* Calculate the number of AArch32 registers contained in count */
635 num_regs = count / regset->size;
636
637 /* Convert pos into an register number */
638 start = pos / regset->size;
639
640 if (start + num_regs > regset->n)
641 return -EIO;
642
643 for (i = 0; i < num_regs; ++i) {
644 unsigned int idx = start + i;
645 void *reg;
646
647 switch (idx) {
648 case 15:
649 reg = (void *)&task_pt_regs(target)->pc;
650 break;
651 case 16:
652 reg = (void *)&task_pt_regs(target)->pstate;
653 break;
654 case 17:
655 reg = (void *)&task_pt_regs(target)->orig_x0;
656 break;
657 default:
658 reg = (void *)&task_pt_regs(target)->regs[idx];
659 }
660
661 if (!ubuf && kbuf) {
662 if (i == 0 && NULL != target && target->pid == current->pid)
663 printk(KERN_WARNING "coredump(%d) copy registers to kbuf\n", current->pid);
664 memcpy(kbuf, reg, sizeof(compat_ulong_t));
665 kbuf += sizeof(compat_ulong_t);
666 }
667 else {
668 ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
669
670 if (ret)
671 break;
672 else
673 ubuf += sizeof(compat_ulong_t);
674 }
675 }
676
677 return ret;
678 }
679
680 static int compat_gpr_set(struct task_struct *target,
681 const struct user_regset *regset,
682 unsigned int pos, unsigned int count,
683 const void *kbuf, const void __user *ubuf)
684 {
685 struct pt_regs newregs;
686 int ret = 0;
687 unsigned int i, start, num_regs;
688
689 /* Calculate the number of AArch32 registers contained in count */
690 num_regs = count / regset->size;
691
692 /* Convert pos into an register number */
693 start = pos / regset->size;
694
695 if (start + num_regs > regset->n)
696 return -EIO;
697
698 newregs = *task_pt_regs(target);
699
700 for (i = 0; i < num_regs; ++i) {
701 unsigned int idx = start + i;
702 void *reg;
703
704 switch (idx) {
705 case 15:
706 reg = (void *)&newregs.pc;
707 break;
708 case 16:
709 reg = (void *)&newregs.pstate;
710 break;
711 case 17:
712 reg = (void *)&newregs.orig_x0;
713 break;
714 default:
715 reg = (void *)&newregs.regs[idx];
716 }
717
718 ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
719
720 if (ret)
721 goto out;
722 else
723 ubuf += sizeof(compat_ulong_t);
724 }
725
726 if (valid_user_regs(&newregs.user_regs))
727 *task_pt_regs(target) = newregs;
728 else
729 ret = -EINVAL;
730
731 out:
732 return ret;
733 }
734
735 static int compat_vfp_get(struct task_struct *target,
736 const struct user_regset *regset,
737 unsigned int pos, unsigned int count,
738 void *kbuf, void __user *ubuf)
739 {
740 struct user_fpsimd_state *uregs;
741 compat_ulong_t fpscr;
742 int ret;
743
744 uregs = &target->thread.fpsimd_state.user_fpsimd;
745
746 /*
747 * The VFP registers are packed into the fpsimd_state, so they all sit
748 * nicely together for us. We just need to create the fpscr separately.
749 */
750 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
751 VFP_STATE_SIZE - sizeof(compat_ulong_t));
752
753 if (count && !ret) {
754 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
755 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
756 ret = put_user(fpscr, (compat_ulong_t *)ubuf);
757 }
758
759 return ret;
760 }
761
762 static int compat_vfp_set(struct task_struct *target,
763 const struct user_regset *regset,
764 unsigned int pos, unsigned int count,
765 const void *kbuf, const void __user *ubuf)
766 {
767 struct user_fpsimd_state *uregs;
768 compat_ulong_t fpscr;
769 int ret;
770
771 if (pos + count > VFP_STATE_SIZE)
772 return -EIO;
773
774 uregs = &target->thread.fpsimd_state.user_fpsimd;
775
776 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
777 VFP_STATE_SIZE - sizeof(compat_ulong_t));
778
779 if (count && !ret) {
780 ret = get_user(fpscr, (compat_ulong_t *)ubuf);
781 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
782 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
783 }
784
785 fpsimd_flush_task_state(target);
786 return ret;
787 }
788
789 static const struct user_regset aarch32_regsets[] = {
790 [REGSET_COMPAT_GPR] = {
791 .core_note_type = NT_PRSTATUS,
792 .n = COMPAT_ELF_NGREG,
793 .size = sizeof(compat_elf_greg_t),
794 .align = sizeof(compat_elf_greg_t),
795 .get = compat_gpr_get,
796 .set = compat_gpr_set
797 },
798 [REGSET_COMPAT_VFP] = {
799 .core_note_type = NT_ARM_VFP,
800 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
801 .size = sizeof(compat_ulong_t),
802 .align = sizeof(compat_ulong_t),
803 .get = compat_vfp_get,
804 .set = compat_vfp_set
805 },
806 };
807
808 static const struct user_regset_view user_aarch32_view = {
809 .name = "aarch32", .e_machine = EM_ARM,
810 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
811 };
812
813 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
814 compat_ulong_t __user *ret)
815 {
816 compat_ulong_t tmp;
817
818 if (off & 3)
819 return -EIO;
820
821 if (off == COMPAT_PT_TEXT_ADDR)
822 tmp = tsk->mm->start_code;
823 else if (off == COMPAT_PT_DATA_ADDR)
824 tmp = tsk->mm->start_data;
825 else if (off == COMPAT_PT_TEXT_END_ADDR)
826 tmp = tsk->mm->end_code;
827 else if (off < sizeof(compat_elf_gregset_t))
828 return copy_regset_to_user(tsk, &user_aarch32_view,
829 REGSET_COMPAT_GPR, off,
830 sizeof(compat_ulong_t), ret);
831 else if (off >= COMPAT_USER_SZ)
832 return -EIO;
833 else
834 tmp = 0;
835
836 return put_user(tmp, ret);
837 }
838
839 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
840 compat_ulong_t val)
841 {
842 int ret;
843 mm_segment_t old_fs = get_fs();
844
845 if (off & 3 || off >= COMPAT_USER_SZ)
846 return -EIO;
847
848 if (off >= sizeof(compat_elf_gregset_t))
849 return 0;
850
851 set_fs(KERNEL_DS);
852 ret = copy_regset_from_user(tsk, &user_aarch32_view,
853 REGSET_COMPAT_GPR, off,
854 sizeof(compat_ulong_t),
855 &val);
856 set_fs(old_fs);
857
858 return ret;
859 }
860
861 #ifdef CONFIG_HAVE_HW_BREAKPOINT
862
863 /*
864 * Convert a virtual register number into an index for a thread_info
865 * breakpoint array. Breakpoints are identified using positive numbers
866 * whilst watchpoints are negative. The registers are laid out as pairs
867 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
868 * Register 0 is reserved for describing resource information.
869 */
870 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
871 {
872 return (abs(num) - 1) >> 1;
873 }
874
875 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
876 {
877 u8 num_brps, num_wrps, debug_arch, wp_len;
878 u32 reg = 0;
879
880 num_brps = hw_breakpoint_slots(TYPE_INST);
881 num_wrps = hw_breakpoint_slots(TYPE_DATA);
882
883 debug_arch = debug_monitors_arch();
884 wp_len = 8;
885 reg |= debug_arch;
886 reg <<= 8;
887 reg |= wp_len;
888 reg <<= 8;
889 reg |= num_wrps;
890 reg <<= 8;
891 reg |= num_brps;
892
893 *kdata = reg;
894 return 0;
895 }
896
897 static int compat_ptrace_hbp_get(unsigned int note_type,
898 struct task_struct *tsk,
899 compat_long_t num,
900 u32 *kdata)
901 {
902 u64 addr = 0;
903 u32 ctrl = 0;
904
905 int err, idx = compat_ptrace_hbp_num_to_idx(num);;
906
907 if (num & 1) {
908 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
909 *kdata = (u32)addr;
910 } else {
911 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
912 *kdata = ctrl;
913 }
914
915 return err;
916 }
917
918 static int compat_ptrace_hbp_set(unsigned int note_type,
919 struct task_struct *tsk,
920 compat_long_t num,
921 u32 *kdata)
922 {
923 u64 addr;
924 u32 ctrl;
925
926 int err, idx = compat_ptrace_hbp_num_to_idx(num);
927
928 if (num & 1) {
929 addr = *kdata;
930 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
931 } else {
932 ctrl = *kdata;
933 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
934 }
935
936 return err;
937 }
938
939 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
940 compat_ulong_t __user *data)
941 {
942 int ret;
943 u32 kdata;
944 mm_segment_t old_fs = get_fs();
945
946 set_fs(KERNEL_DS);
947 /* Watchpoint */
948 if (num < 0) {
949 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
950 /* Resource info */
951 } else if (num == 0) {
952 ret = compat_ptrace_hbp_get_resource_info(&kdata);
953 /* Breakpoint */
954 } else {
955 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
956 }
957 set_fs(old_fs);
958
959 if (!ret)
960 ret = put_user(kdata, data);
961
962 return ret;
963 }
964
965 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
966 compat_ulong_t __user *data)
967 {
968 int ret;
969 u32 kdata = 0;
970 mm_segment_t old_fs = get_fs();
971
972 if (num == 0)
973 return 0;
974
975 ret = get_user(kdata, data);
976 if (ret)
977 return ret;
978
979 set_fs(KERNEL_DS);
980 if (num < 0)
981 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
982 else
983 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
984 set_fs(old_fs);
985
986 return ret;
987 }
988 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
989
990 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
991 compat_ulong_t caddr, compat_ulong_t cdata)
992 {
993 unsigned long addr = caddr;
994 unsigned long data = cdata;
995 void __user *datap = compat_ptr(data);
996 int ret;
997
998 switch (request) {
999 case PTRACE_PEEKUSR:
1000 ret = compat_ptrace_read_user(child, addr, datap);
1001 break;
1002
1003 case PTRACE_POKEUSR:
1004 ret = compat_ptrace_write_user(child, addr, data);
1005 break;
1006
1007 case COMPAT_PTRACE_GETREGS:
1008 ret = copy_regset_to_user(child,
1009 &user_aarch32_view,
1010 REGSET_COMPAT_GPR,
1011 0, sizeof(compat_elf_gregset_t),
1012 datap);
1013 break;
1014
1015 case COMPAT_PTRACE_SETREGS:
1016 ret = copy_regset_from_user(child,
1017 &user_aarch32_view,
1018 REGSET_COMPAT_GPR,
1019 0, sizeof(compat_elf_gregset_t),
1020 datap);
1021 break;
1022
1023 case COMPAT_PTRACE_GET_THREAD_AREA:
1024 ret = put_user((compat_ulong_t)child->thread.tp_value,
1025 (compat_ulong_t __user *)datap);
1026 break;
1027
1028 case COMPAT_PTRACE_SET_SYSCALL:
1029 task_pt_regs(child)->syscallno = data;
1030 ret = 0;
1031 break;
1032
1033 case COMPAT_PTRACE_GETVFPREGS:
1034 ret = copy_regset_to_user(child,
1035 &user_aarch32_view,
1036 REGSET_COMPAT_VFP,
1037 0, VFP_STATE_SIZE,
1038 datap);
1039 break;
1040
1041 case COMPAT_PTRACE_SETVFPREGS:
1042 ret = copy_regset_from_user(child,
1043 &user_aarch32_view,
1044 REGSET_COMPAT_VFP,
1045 0, VFP_STATE_SIZE,
1046 datap);
1047 break;
1048
1049 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1050 case COMPAT_PTRACE_GETHBPREGS:
1051 ret = compat_ptrace_gethbpregs(child, addr, datap);
1052 break;
1053
1054 case COMPAT_PTRACE_SETHBPREGS:
1055 ret = compat_ptrace_sethbpregs(child, addr, datap);
1056 break;
1057 #endif
1058
1059 default:
1060 ret = compat_ptrace_request(child, request, addr,
1061 data);
1062 break;
1063 }
1064
1065 return ret;
1066 }
1067 #endif /* CONFIG_COMPAT */
1068
1069 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1070 {
1071 #ifdef CONFIG_COMPAT
1072 if (is_compat_thread(task_thread_info(task)))
1073 return &user_aarch32_view;
1074 #endif
1075 return &user_aarch64_view;
1076 }
1077
1078 long arch_ptrace(struct task_struct *child, long request,
1079 unsigned long addr, unsigned long data)
1080 {
1081 int ret;
1082
1083 switch (request) {
1084 case PTRACE_SET_SYSCALL:
1085 task_pt_regs(child)->syscallno = data;
1086 ret = 0;
1087 break;
1088 default:
1089 ret = ptrace_request(child, request, addr, data);
1090 break;
1091 }
1092
1093 return ret;
1094 }
1095
1096 enum ptrace_syscall_dir {
1097 PTRACE_SYSCALL_ENTER = 0,
1098 PTRACE_SYSCALL_EXIT,
1099 };
1100
1101 static void tracehook_report_syscall(struct pt_regs *regs,
1102 enum ptrace_syscall_dir dir)
1103 {
1104 int regno;
1105 unsigned long saved_reg;
1106
1107 /*
1108 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1109 * used to denote syscall entry/exit:
1110 */
1111 regno = (is_compat_task() ? 12 : 7);
1112 saved_reg = regs->regs[regno];
1113 regs->regs[regno] = dir;
1114
1115 if (dir == PTRACE_SYSCALL_EXIT)
1116 tracehook_report_syscall_exit(regs, 0);
1117 else if (tracehook_report_syscall_entry(regs))
1118 regs->syscallno = ~0UL;
1119
1120 regs->regs[regno] = saved_reg;
1121 }
1122
1123 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1124 {
1125 unsigned int saved_syscallno = regs->syscallno;
1126
1127 /* Do the secure computing check first; failures should be fast. */
1128 if (secure_computing(regs->syscallno) == -1)
1129 return RET_SKIP_SYSCALL_TRACE;
1130
1131 if (test_thread_flag(TIF_SYSCALL_TRACE))
1132 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1133
1134 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1135 trace_sys_enter(regs, regs->syscallno);
1136
1137 if (IS_SKIP_SYSCALL(regs->syscallno)) {
1138 /*
1139 * RESTRICTION: we can't modify a return value of user
1140 * issued syscall(-1) here. In order to ease this flavor,
1141 * we need to treat whatever value in x0 as a return value,
1142 * but this might result in a bogus value being returned.
1143 */
1144 /*
1145 * NOTE: syscallno may also be set to -1 if fatal signal is
1146 * detected in tracehook_report_syscall_entry(), but since
1147 * a value set to x0 here is not used in this case, we may
1148 * neglect the case.
1149 */
1150 if (!test_thread_flag(TIF_SYSCALL_TRACE) ||
1151 (IS_SKIP_SYSCALL(saved_syscallno)))
1152 regs->regs[0] = -ENOSYS;
1153 }
1154
1155 audit_syscall_entry(syscall_get_arch(), regs->syscallno,
1156 regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
1157
1158 return regs->syscallno;
1159 }
1160
1161 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1162 {
1163 audit_syscall_exit(regs);
1164
1165 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1166 trace_sys_exit(regs, regs_return_value(regs));
1167
1168 if (test_thread_flag(TIF_SYSCALL_TRACE))
1169 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1170 }