CRED: Inaugurate COW credentials
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / ptrace.c
1 /*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
24
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
27
28 /*
29 * ptrace a task: make the debugger its new parent and
30 * move it to the ptrace list.
31 *
32 * Must be called with the tasklist lock write-held.
33 */
34 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
35 {
36 BUG_ON(!list_empty(&child->ptrace_entry));
37 list_add(&child->ptrace_entry, &new_parent->ptraced);
38 child->parent = new_parent;
39 }
40
41 /*
42 * Turn a tracing stop into a normal stop now, since with no tracer there
43 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
44 * signal sent that would resume the child, but didn't because it was in
45 * TASK_TRACED, resume it now.
46 * Requires that irqs be disabled.
47 */
48 static void ptrace_untrace(struct task_struct *child)
49 {
50 spin_lock(&child->sighand->siglock);
51 if (task_is_traced(child)) {
52 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
53 __set_task_state(child, TASK_STOPPED);
54 } else {
55 signal_wake_up(child, 1);
56 }
57 }
58 spin_unlock(&child->sighand->siglock);
59 }
60
61 /*
62 * unptrace a task: move it back to its original parent and
63 * remove it from the ptrace list.
64 *
65 * Must be called with the tasklist lock write-held.
66 */
67 void __ptrace_unlink(struct task_struct *child)
68 {
69 BUG_ON(!child->ptrace);
70
71 child->ptrace = 0;
72 child->parent = child->real_parent;
73 list_del_init(&child->ptrace_entry);
74
75 if (task_is_traced(child))
76 ptrace_untrace(child);
77 }
78
79 /*
80 * Check that we have indeed attached to the thing..
81 */
82 int ptrace_check_attach(struct task_struct *child, int kill)
83 {
84 int ret = -ESRCH;
85
86 /*
87 * We take the read lock around doing both checks to close a
88 * possible race where someone else was tracing our child and
89 * detached between these two checks. After this locked check,
90 * we are sure that this is our traced child and that can only
91 * be changed by us so it's not changing right after this.
92 */
93 read_lock(&tasklist_lock);
94 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
95 ret = 0;
96 /*
97 * child->sighand can't be NULL, release_task()
98 * does ptrace_unlink() before __exit_signal().
99 */
100 spin_lock_irq(&child->sighand->siglock);
101 if (task_is_stopped(child))
102 child->state = TASK_TRACED;
103 else if (!task_is_traced(child) && !kill)
104 ret = -ESRCH;
105 spin_unlock_irq(&child->sighand->siglock);
106 }
107 read_unlock(&tasklist_lock);
108
109 if (!ret && !kill)
110 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
111
112 /* All systems go.. */
113 return ret;
114 }
115
116 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
117 {
118 const struct cred *cred = current_cred(), *tcred;
119
120 /* May we inspect the given task?
121 * This check is used both for attaching with ptrace
122 * and for allowing access to sensitive information in /proc.
123 *
124 * ptrace_attach denies several cases that /proc allows
125 * because setting up the necessary parent/child relationship
126 * or halting the specified task is impossible.
127 */
128 int dumpable = 0;
129 /* Don't let security modules deny introspection */
130 if (task == current)
131 return 0;
132 rcu_read_lock();
133 tcred = __task_cred(task);
134 if ((cred->uid != tcred->euid ||
135 cred->uid != tcred->suid ||
136 cred->uid != tcred->uid ||
137 cred->gid != tcred->egid ||
138 cred->gid != tcred->sgid ||
139 cred->gid != tcred->gid) &&
140 !capable(CAP_SYS_PTRACE)) {
141 rcu_read_unlock();
142 return -EPERM;
143 }
144 rcu_read_unlock();
145 smp_rmb();
146 if (task->mm)
147 dumpable = get_dumpable(task->mm);
148 if (!dumpable && !capable(CAP_SYS_PTRACE))
149 return -EPERM;
150
151 return security_ptrace_may_access(task, mode);
152 }
153
154 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
155 {
156 int err;
157 task_lock(task);
158 err = __ptrace_may_access(task, mode);
159 task_unlock(task);
160 return (!err ? true : false);
161 }
162
163 int ptrace_attach(struct task_struct *task)
164 {
165 int retval;
166 unsigned long flags;
167
168 audit_ptrace(task);
169
170 retval = -EPERM;
171 if (same_thread_group(task, current))
172 goto out;
173
174 /* Protect exec's credential calculations against our interference;
175 * SUID, SGID and LSM creds get determined differently under ptrace.
176 */
177 retval = mutex_lock_interruptible(&current->cred_exec_mutex);
178 if (retval < 0)
179 goto out;
180
181 retval = -EPERM;
182 repeat:
183 /*
184 * Nasty, nasty.
185 *
186 * We want to hold both the task-lock and the
187 * tasklist_lock for writing at the same time.
188 * But that's against the rules (tasklist_lock
189 * is taken for reading by interrupts on other
190 * cpu's that may have task_lock).
191 */
192 task_lock(task);
193 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
194 task_unlock(task);
195 do {
196 cpu_relax();
197 } while (!write_can_lock(&tasklist_lock));
198 goto repeat;
199 }
200
201 if (!task->mm)
202 goto bad;
203 /* the same process cannot be attached many times */
204 if (task->ptrace & PT_PTRACED)
205 goto bad;
206 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
207 if (retval)
208 goto bad;
209
210 /* Go */
211 task->ptrace |= PT_PTRACED;
212 if (capable(CAP_SYS_PTRACE))
213 task->ptrace |= PT_PTRACE_CAP;
214
215 __ptrace_link(task, current);
216
217 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
218 bad:
219 write_unlock_irqrestore(&tasklist_lock, flags);
220 task_unlock(task);
221 mutex_unlock(&current->cred_exec_mutex);
222 out:
223 return retval;
224 }
225
226 static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
227 {
228 child->exit_code = data;
229 /* .. re-parent .. */
230 __ptrace_unlink(child);
231 /* .. and wake it up. */
232 if (child->exit_state != EXIT_ZOMBIE)
233 wake_up_process(child);
234 }
235
236 int ptrace_detach(struct task_struct *child, unsigned int data)
237 {
238 if (!valid_signal(data))
239 return -EIO;
240
241 /* Architecture-specific hardware disable .. */
242 ptrace_disable(child);
243 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
244
245 write_lock_irq(&tasklist_lock);
246 /* protect against de_thread()->release_task() */
247 if (child->ptrace)
248 __ptrace_detach(child, data);
249 write_unlock_irq(&tasklist_lock);
250
251 return 0;
252 }
253
254 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
255 {
256 int copied = 0;
257
258 while (len > 0) {
259 char buf[128];
260 int this_len, retval;
261
262 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
263 retval = access_process_vm(tsk, src, buf, this_len, 0);
264 if (!retval) {
265 if (copied)
266 break;
267 return -EIO;
268 }
269 if (copy_to_user(dst, buf, retval))
270 return -EFAULT;
271 copied += retval;
272 src += retval;
273 dst += retval;
274 len -= retval;
275 }
276 return copied;
277 }
278
279 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
280 {
281 int copied = 0;
282
283 while (len > 0) {
284 char buf[128];
285 int this_len, retval;
286
287 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
288 if (copy_from_user(buf, src, this_len))
289 return -EFAULT;
290 retval = access_process_vm(tsk, dst, buf, this_len, 1);
291 if (!retval) {
292 if (copied)
293 break;
294 return -EIO;
295 }
296 copied += retval;
297 src += retval;
298 dst += retval;
299 len -= retval;
300 }
301 return copied;
302 }
303
304 static int ptrace_setoptions(struct task_struct *child, long data)
305 {
306 child->ptrace &= ~PT_TRACE_MASK;
307
308 if (data & PTRACE_O_TRACESYSGOOD)
309 child->ptrace |= PT_TRACESYSGOOD;
310
311 if (data & PTRACE_O_TRACEFORK)
312 child->ptrace |= PT_TRACE_FORK;
313
314 if (data & PTRACE_O_TRACEVFORK)
315 child->ptrace |= PT_TRACE_VFORK;
316
317 if (data & PTRACE_O_TRACECLONE)
318 child->ptrace |= PT_TRACE_CLONE;
319
320 if (data & PTRACE_O_TRACEEXEC)
321 child->ptrace |= PT_TRACE_EXEC;
322
323 if (data & PTRACE_O_TRACEVFORKDONE)
324 child->ptrace |= PT_TRACE_VFORK_DONE;
325
326 if (data & PTRACE_O_TRACEEXIT)
327 child->ptrace |= PT_TRACE_EXIT;
328
329 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
330 }
331
332 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
333 {
334 int error = -ESRCH;
335
336 read_lock(&tasklist_lock);
337 if (likely(child->sighand != NULL)) {
338 error = -EINVAL;
339 spin_lock_irq(&child->sighand->siglock);
340 if (likely(child->last_siginfo != NULL)) {
341 *info = *child->last_siginfo;
342 error = 0;
343 }
344 spin_unlock_irq(&child->sighand->siglock);
345 }
346 read_unlock(&tasklist_lock);
347 return error;
348 }
349
350 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
351 {
352 int error = -ESRCH;
353
354 read_lock(&tasklist_lock);
355 if (likely(child->sighand != NULL)) {
356 error = -EINVAL;
357 spin_lock_irq(&child->sighand->siglock);
358 if (likely(child->last_siginfo != NULL)) {
359 *child->last_siginfo = *info;
360 error = 0;
361 }
362 spin_unlock_irq(&child->sighand->siglock);
363 }
364 read_unlock(&tasklist_lock);
365 return error;
366 }
367
368
369 #ifdef PTRACE_SINGLESTEP
370 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
371 #else
372 #define is_singlestep(request) 0
373 #endif
374
375 #ifdef PTRACE_SINGLEBLOCK
376 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
377 #else
378 #define is_singleblock(request) 0
379 #endif
380
381 #ifdef PTRACE_SYSEMU
382 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
383 #else
384 #define is_sysemu_singlestep(request) 0
385 #endif
386
387 static int ptrace_resume(struct task_struct *child, long request, long data)
388 {
389 if (!valid_signal(data))
390 return -EIO;
391
392 if (request == PTRACE_SYSCALL)
393 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
394 else
395 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
396
397 #ifdef TIF_SYSCALL_EMU
398 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
399 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
400 else
401 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
402 #endif
403
404 if (is_singleblock(request)) {
405 if (unlikely(!arch_has_block_step()))
406 return -EIO;
407 user_enable_block_step(child);
408 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
409 if (unlikely(!arch_has_single_step()))
410 return -EIO;
411 user_enable_single_step(child);
412 }
413 else
414 user_disable_single_step(child);
415
416 child->exit_code = data;
417 wake_up_process(child);
418
419 return 0;
420 }
421
422 int ptrace_request(struct task_struct *child, long request,
423 long addr, long data)
424 {
425 int ret = -EIO;
426 siginfo_t siginfo;
427
428 switch (request) {
429 case PTRACE_PEEKTEXT:
430 case PTRACE_PEEKDATA:
431 return generic_ptrace_peekdata(child, addr, data);
432 case PTRACE_POKETEXT:
433 case PTRACE_POKEDATA:
434 return generic_ptrace_pokedata(child, addr, data);
435
436 #ifdef PTRACE_OLDSETOPTIONS
437 case PTRACE_OLDSETOPTIONS:
438 #endif
439 case PTRACE_SETOPTIONS:
440 ret = ptrace_setoptions(child, data);
441 break;
442 case PTRACE_GETEVENTMSG:
443 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
444 break;
445
446 case PTRACE_GETSIGINFO:
447 ret = ptrace_getsiginfo(child, &siginfo);
448 if (!ret)
449 ret = copy_siginfo_to_user((siginfo_t __user *) data,
450 &siginfo);
451 break;
452
453 case PTRACE_SETSIGINFO:
454 if (copy_from_user(&siginfo, (siginfo_t __user *) data,
455 sizeof siginfo))
456 ret = -EFAULT;
457 else
458 ret = ptrace_setsiginfo(child, &siginfo);
459 break;
460
461 case PTRACE_DETACH: /* detach a process that was attached. */
462 ret = ptrace_detach(child, data);
463 break;
464
465 #ifdef PTRACE_SINGLESTEP
466 case PTRACE_SINGLESTEP:
467 #endif
468 #ifdef PTRACE_SINGLEBLOCK
469 case PTRACE_SINGLEBLOCK:
470 #endif
471 #ifdef PTRACE_SYSEMU
472 case PTRACE_SYSEMU:
473 case PTRACE_SYSEMU_SINGLESTEP:
474 #endif
475 case PTRACE_SYSCALL:
476 case PTRACE_CONT:
477 return ptrace_resume(child, request, data);
478
479 case PTRACE_KILL:
480 if (child->exit_state) /* already dead */
481 return 0;
482 return ptrace_resume(child, request, SIGKILL);
483
484 default:
485 break;
486 }
487
488 return ret;
489 }
490
491 /**
492 * ptrace_traceme -- helper for PTRACE_TRACEME
493 *
494 * Performs checks and sets PT_PTRACED.
495 * Should be used by all ptrace implementations for PTRACE_TRACEME.
496 */
497 int ptrace_traceme(void)
498 {
499 int ret = -EPERM;
500
501 /*
502 * Are we already being traced?
503 */
504 repeat:
505 task_lock(current);
506 if (!(current->ptrace & PT_PTRACED)) {
507 /*
508 * See ptrace_attach() comments about the locking here.
509 */
510 unsigned long flags;
511 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
512 task_unlock(current);
513 do {
514 cpu_relax();
515 } while (!write_can_lock(&tasklist_lock));
516 goto repeat;
517 }
518
519 ret = security_ptrace_traceme(current->parent);
520
521 /*
522 * Set the ptrace bit in the process ptrace flags.
523 * Then link us on our parent's ptraced list.
524 */
525 if (!ret) {
526 current->ptrace |= PT_PTRACED;
527 __ptrace_link(current, current->real_parent);
528 }
529
530 write_unlock_irqrestore(&tasklist_lock, flags);
531 }
532 task_unlock(current);
533 return ret;
534 }
535
536 /**
537 * ptrace_get_task_struct -- grab a task struct reference for ptrace
538 * @pid: process id to grab a task_struct reference of
539 *
540 * This function is a helper for ptrace implementations. It checks
541 * permissions and then grabs a task struct for use of the actual
542 * ptrace implementation.
543 *
544 * Returns the task_struct for @pid or an ERR_PTR() on failure.
545 */
546 struct task_struct *ptrace_get_task_struct(pid_t pid)
547 {
548 struct task_struct *child;
549
550 read_lock(&tasklist_lock);
551 child = find_task_by_vpid(pid);
552 if (child)
553 get_task_struct(child);
554
555 read_unlock(&tasklist_lock);
556 if (!child)
557 return ERR_PTR(-ESRCH);
558 return child;
559 }
560
561 #ifndef arch_ptrace_attach
562 #define arch_ptrace_attach(child) do { } while (0)
563 #endif
564
565 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
566 {
567 struct task_struct *child;
568 long ret;
569
570 /*
571 * This lock_kernel fixes a subtle race with suid exec
572 */
573 lock_kernel();
574 if (request == PTRACE_TRACEME) {
575 ret = ptrace_traceme();
576 if (!ret)
577 arch_ptrace_attach(current);
578 goto out;
579 }
580
581 child = ptrace_get_task_struct(pid);
582 if (IS_ERR(child)) {
583 ret = PTR_ERR(child);
584 goto out;
585 }
586
587 if (request == PTRACE_ATTACH) {
588 ret = ptrace_attach(child);
589 /*
590 * Some architectures need to do book-keeping after
591 * a ptrace attach.
592 */
593 if (!ret)
594 arch_ptrace_attach(child);
595 goto out_put_task_struct;
596 }
597
598 ret = ptrace_check_attach(child, request == PTRACE_KILL);
599 if (ret < 0)
600 goto out_put_task_struct;
601
602 ret = arch_ptrace(child, request, addr, data);
603 if (ret < 0)
604 goto out_put_task_struct;
605
606 out_put_task_struct:
607 put_task_struct(child);
608 out:
609 unlock_kernel();
610 return ret;
611 }
612
613 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
614 {
615 unsigned long tmp;
616 int copied;
617
618 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
619 if (copied != sizeof(tmp))
620 return -EIO;
621 return put_user(tmp, (unsigned long __user *)data);
622 }
623
624 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
625 {
626 int copied;
627
628 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
629 return (copied == sizeof(data)) ? 0 : -EIO;
630 }
631
632 #if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE
633 #include <linux/compat.h>
634
635 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
636 compat_ulong_t addr, compat_ulong_t data)
637 {
638 compat_ulong_t __user *datap = compat_ptr(data);
639 compat_ulong_t word;
640 siginfo_t siginfo;
641 int ret;
642
643 switch (request) {
644 case PTRACE_PEEKTEXT:
645 case PTRACE_PEEKDATA:
646 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
647 if (ret != sizeof(word))
648 ret = -EIO;
649 else
650 ret = put_user(word, datap);
651 break;
652
653 case PTRACE_POKETEXT:
654 case PTRACE_POKEDATA:
655 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
656 ret = (ret != sizeof(data) ? -EIO : 0);
657 break;
658
659 case PTRACE_GETEVENTMSG:
660 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
661 break;
662
663 case PTRACE_GETSIGINFO:
664 ret = ptrace_getsiginfo(child, &siginfo);
665 if (!ret)
666 ret = copy_siginfo_to_user32(
667 (struct compat_siginfo __user *) datap,
668 &siginfo);
669 break;
670
671 case PTRACE_SETSIGINFO:
672 memset(&siginfo, 0, sizeof siginfo);
673 if (copy_siginfo_from_user32(
674 &siginfo, (struct compat_siginfo __user *) datap))
675 ret = -EFAULT;
676 else
677 ret = ptrace_setsiginfo(child, &siginfo);
678 break;
679
680 default:
681 ret = ptrace_request(child, request, addr, data);
682 }
683
684 return ret;
685 }
686
687 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
688 compat_long_t addr, compat_long_t data)
689 {
690 struct task_struct *child;
691 long ret;
692
693 /*
694 * This lock_kernel fixes a subtle race with suid exec
695 */
696 lock_kernel();
697 if (request == PTRACE_TRACEME) {
698 ret = ptrace_traceme();
699 goto out;
700 }
701
702 child = ptrace_get_task_struct(pid);
703 if (IS_ERR(child)) {
704 ret = PTR_ERR(child);
705 goto out;
706 }
707
708 if (request == PTRACE_ATTACH) {
709 ret = ptrace_attach(child);
710 /*
711 * Some architectures need to do book-keeping after
712 * a ptrace attach.
713 */
714 if (!ret)
715 arch_ptrace_attach(child);
716 goto out_put_task_struct;
717 }
718
719 ret = ptrace_check_attach(child, request == PTRACE_KILL);
720 if (!ret)
721 ret = compat_arch_ptrace(child, request, addr, data);
722
723 out_put_task_struct:
724 put_task_struct(child);
725 out:
726 unlock_kernel();
727 return ret;
728 }
729 #endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */