tracehook: exec double-reporting fix
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / exec.c
1 /*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * #!-checking implemented by tytso.
9 */
10 /*
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
14 *
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
17 *
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
23 */
24
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/pagemap.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/proc_fs.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/tsacct_kern.h>
50 #include <linux/cn_proc.h>
51 #include <linux/audit.h>
52 #include <linux/tracehook.h>
53 #include <linux/kmod.h>
54
55 #include <asm/uaccess.h>
56 #include <asm/mmu_context.h>
57 #include <asm/tlb.h>
58
59 #ifdef __alpha__
60 /* for /sbin/loader handling in search_binary_handler() */
61 #include <linux/a.out.h>
62 #endif
63
64 int core_uses_pid;
65 char core_pattern[CORENAME_MAX_SIZE] = "core";
66 int suid_dumpable = 0;
67
68 /* The maximal length of core_pattern is also specified in sysctl.c */
69
70 static LIST_HEAD(formats);
71 static DEFINE_RWLOCK(binfmt_lock);
72
73 int register_binfmt(struct linux_binfmt * fmt)
74 {
75 if (!fmt)
76 return -EINVAL;
77 write_lock(&binfmt_lock);
78 list_add(&fmt->lh, &formats);
79 write_unlock(&binfmt_lock);
80 return 0;
81 }
82
83 EXPORT_SYMBOL(register_binfmt);
84
85 void unregister_binfmt(struct linux_binfmt * fmt)
86 {
87 write_lock(&binfmt_lock);
88 list_del(&fmt->lh);
89 write_unlock(&binfmt_lock);
90 }
91
92 EXPORT_SYMBOL(unregister_binfmt);
93
94 static inline void put_binfmt(struct linux_binfmt * fmt)
95 {
96 module_put(fmt->module);
97 }
98
99 /*
100 * Note that a shared library must be both readable and executable due to
101 * security reasons.
102 *
103 * Also note that we take the address to load from from the file itself.
104 */
105 asmlinkage long sys_uselib(const char __user * library)
106 {
107 struct file *file;
108 struct nameidata nd;
109 char *tmp = getname(library);
110 int error = PTR_ERR(tmp);
111
112 if (!IS_ERR(tmp)) {
113 error = path_lookup_open(AT_FDCWD, tmp,
114 LOOKUP_FOLLOW, &nd,
115 FMODE_READ|FMODE_EXEC);
116 putname(tmp);
117 }
118 if (error)
119 goto out;
120
121 error = -EINVAL;
122 if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
123 goto exit;
124
125 error = -EACCES;
126 if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
127 goto exit;
128
129 error = vfs_permission(&nd, MAY_READ | MAY_EXEC | MAY_OPEN);
130 if (error)
131 goto exit;
132
133 file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
134 error = PTR_ERR(file);
135 if (IS_ERR(file))
136 goto out;
137
138 error = -ENOEXEC;
139 if(file->f_op) {
140 struct linux_binfmt * fmt;
141
142 read_lock(&binfmt_lock);
143 list_for_each_entry(fmt, &formats, lh) {
144 if (!fmt->load_shlib)
145 continue;
146 if (!try_module_get(fmt->module))
147 continue;
148 read_unlock(&binfmt_lock);
149 error = fmt->load_shlib(file);
150 read_lock(&binfmt_lock);
151 put_binfmt(fmt);
152 if (error != -ENOEXEC)
153 break;
154 }
155 read_unlock(&binfmt_lock);
156 }
157 fput(file);
158 out:
159 return error;
160 exit:
161 release_open_intent(&nd);
162 path_put(&nd.path);
163 goto out;
164 }
165
166 #ifdef CONFIG_MMU
167
168 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
169 int write)
170 {
171 struct page *page;
172 int ret;
173
174 #ifdef CONFIG_STACK_GROWSUP
175 if (write) {
176 ret = expand_stack_downwards(bprm->vma, pos);
177 if (ret < 0)
178 return NULL;
179 }
180 #endif
181 ret = get_user_pages(current, bprm->mm, pos,
182 1, write, 1, &page, NULL);
183 if (ret <= 0)
184 return NULL;
185
186 if (write) {
187 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
188 struct rlimit *rlim;
189
190 /*
191 * We've historically supported up to 32 pages (ARG_MAX)
192 * of argument strings even with small stacks
193 */
194 if (size <= ARG_MAX)
195 return page;
196
197 /*
198 * Limit to 1/4-th the stack size for the argv+env strings.
199 * This ensures that:
200 * - the remaining binfmt code will not run out of stack space,
201 * - the program will have a reasonable amount of stack left
202 * to work from.
203 */
204 rlim = current->signal->rlim;
205 if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
206 put_page(page);
207 return NULL;
208 }
209 }
210
211 return page;
212 }
213
214 static void put_arg_page(struct page *page)
215 {
216 put_page(page);
217 }
218
219 static void free_arg_page(struct linux_binprm *bprm, int i)
220 {
221 }
222
223 static void free_arg_pages(struct linux_binprm *bprm)
224 {
225 }
226
227 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
228 struct page *page)
229 {
230 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
231 }
232
233 static int __bprm_mm_init(struct linux_binprm *bprm)
234 {
235 int err = -ENOMEM;
236 struct vm_area_struct *vma = NULL;
237 struct mm_struct *mm = bprm->mm;
238
239 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
240 if (!vma)
241 goto err;
242
243 down_write(&mm->mmap_sem);
244 vma->vm_mm = mm;
245
246 /*
247 * Place the stack at the largest stack address the architecture
248 * supports. Later, we'll move this to an appropriate place. We don't
249 * use STACK_TOP because that can depend on attributes which aren't
250 * configured yet.
251 */
252 vma->vm_end = STACK_TOP_MAX;
253 vma->vm_start = vma->vm_end - PAGE_SIZE;
254
255 vma->vm_flags = VM_STACK_FLAGS;
256 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
257 err = insert_vm_struct(mm, vma);
258 if (err) {
259 up_write(&mm->mmap_sem);
260 goto err;
261 }
262
263 mm->stack_vm = mm->total_vm = 1;
264 up_write(&mm->mmap_sem);
265
266 bprm->p = vma->vm_end - sizeof(void *);
267
268 return 0;
269
270 err:
271 if (vma) {
272 bprm->vma = NULL;
273 kmem_cache_free(vm_area_cachep, vma);
274 }
275
276 return err;
277 }
278
279 static bool valid_arg_len(struct linux_binprm *bprm, long len)
280 {
281 return len <= MAX_ARG_STRLEN;
282 }
283
284 #else
285
286 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
287 int write)
288 {
289 struct page *page;
290
291 page = bprm->page[pos / PAGE_SIZE];
292 if (!page && write) {
293 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
294 if (!page)
295 return NULL;
296 bprm->page[pos / PAGE_SIZE] = page;
297 }
298
299 return page;
300 }
301
302 static void put_arg_page(struct page *page)
303 {
304 }
305
306 static void free_arg_page(struct linux_binprm *bprm, int i)
307 {
308 if (bprm->page[i]) {
309 __free_page(bprm->page[i]);
310 bprm->page[i] = NULL;
311 }
312 }
313
314 static void free_arg_pages(struct linux_binprm *bprm)
315 {
316 int i;
317
318 for (i = 0; i < MAX_ARG_PAGES; i++)
319 free_arg_page(bprm, i);
320 }
321
322 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
323 struct page *page)
324 {
325 }
326
327 static int __bprm_mm_init(struct linux_binprm *bprm)
328 {
329 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
330 return 0;
331 }
332
333 static bool valid_arg_len(struct linux_binprm *bprm, long len)
334 {
335 return len <= bprm->p;
336 }
337
338 #endif /* CONFIG_MMU */
339
340 /*
341 * Create a new mm_struct and populate it with a temporary stack
342 * vm_area_struct. We don't have enough context at this point to set the stack
343 * flags, permissions, and offset, so we use temporary values. We'll update
344 * them later in setup_arg_pages().
345 */
346 int bprm_mm_init(struct linux_binprm *bprm)
347 {
348 int err;
349 struct mm_struct *mm = NULL;
350
351 bprm->mm = mm = mm_alloc();
352 err = -ENOMEM;
353 if (!mm)
354 goto err;
355
356 err = init_new_context(current, mm);
357 if (err)
358 goto err;
359
360 err = __bprm_mm_init(bprm);
361 if (err)
362 goto err;
363
364 return 0;
365
366 err:
367 if (mm) {
368 bprm->mm = NULL;
369 mmdrop(mm);
370 }
371
372 return err;
373 }
374
375 /*
376 * count() counts the number of strings in array ARGV.
377 */
378 static int count(char __user * __user * argv, int max)
379 {
380 int i = 0;
381
382 if (argv != NULL) {
383 for (;;) {
384 char __user * p;
385
386 if (get_user(p, argv))
387 return -EFAULT;
388 if (!p)
389 break;
390 argv++;
391 if (i++ >= max)
392 return -E2BIG;
393 cond_resched();
394 }
395 }
396 return i;
397 }
398
399 /*
400 * 'copy_strings()' copies argument/environment strings from the old
401 * processes's memory to the new process's stack. The call to get_user_pages()
402 * ensures the destination page is created and not swapped out.
403 */
404 static int copy_strings(int argc, char __user * __user * argv,
405 struct linux_binprm *bprm)
406 {
407 struct page *kmapped_page = NULL;
408 char *kaddr = NULL;
409 unsigned long kpos = 0;
410 int ret;
411
412 while (argc-- > 0) {
413 char __user *str;
414 int len;
415 unsigned long pos;
416
417 if (get_user(str, argv+argc) ||
418 !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
419 ret = -EFAULT;
420 goto out;
421 }
422
423 if (!valid_arg_len(bprm, len)) {
424 ret = -E2BIG;
425 goto out;
426 }
427
428 /* We're going to work our way backwords. */
429 pos = bprm->p;
430 str += len;
431 bprm->p -= len;
432
433 while (len > 0) {
434 int offset, bytes_to_copy;
435
436 offset = pos % PAGE_SIZE;
437 if (offset == 0)
438 offset = PAGE_SIZE;
439
440 bytes_to_copy = offset;
441 if (bytes_to_copy > len)
442 bytes_to_copy = len;
443
444 offset -= bytes_to_copy;
445 pos -= bytes_to_copy;
446 str -= bytes_to_copy;
447 len -= bytes_to_copy;
448
449 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
450 struct page *page;
451
452 page = get_arg_page(bprm, pos, 1);
453 if (!page) {
454 ret = -E2BIG;
455 goto out;
456 }
457
458 if (kmapped_page) {
459 flush_kernel_dcache_page(kmapped_page);
460 kunmap(kmapped_page);
461 put_arg_page(kmapped_page);
462 }
463 kmapped_page = page;
464 kaddr = kmap(kmapped_page);
465 kpos = pos & PAGE_MASK;
466 flush_arg_page(bprm, kpos, kmapped_page);
467 }
468 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
469 ret = -EFAULT;
470 goto out;
471 }
472 }
473 }
474 ret = 0;
475 out:
476 if (kmapped_page) {
477 flush_kernel_dcache_page(kmapped_page);
478 kunmap(kmapped_page);
479 put_arg_page(kmapped_page);
480 }
481 return ret;
482 }
483
484 /*
485 * Like copy_strings, but get argv and its values from kernel memory.
486 */
487 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
488 {
489 int r;
490 mm_segment_t oldfs = get_fs();
491 set_fs(KERNEL_DS);
492 r = copy_strings(argc, (char __user * __user *)argv, bprm);
493 set_fs(oldfs);
494 return r;
495 }
496 EXPORT_SYMBOL(copy_strings_kernel);
497
498 #ifdef CONFIG_MMU
499
500 /*
501 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
502 * the binfmt code determines where the new stack should reside, we shift it to
503 * its final location. The process proceeds as follows:
504 *
505 * 1) Use shift to calculate the new vma endpoints.
506 * 2) Extend vma to cover both the old and new ranges. This ensures the
507 * arguments passed to subsequent functions are consistent.
508 * 3) Move vma's page tables to the new range.
509 * 4) Free up any cleared pgd range.
510 * 5) Shrink the vma to cover only the new range.
511 */
512 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
513 {
514 struct mm_struct *mm = vma->vm_mm;
515 unsigned long old_start = vma->vm_start;
516 unsigned long old_end = vma->vm_end;
517 unsigned long length = old_end - old_start;
518 unsigned long new_start = old_start - shift;
519 unsigned long new_end = old_end - shift;
520 struct mmu_gather *tlb;
521
522 BUG_ON(new_start > new_end);
523
524 /*
525 * ensure there are no vmas between where we want to go
526 * and where we are
527 */
528 if (vma != find_vma(mm, new_start))
529 return -EFAULT;
530
531 /*
532 * cover the whole range: [new_start, old_end)
533 */
534 vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
535
536 /*
537 * move the page tables downwards, on failure we rely on
538 * process cleanup to remove whatever mess we made.
539 */
540 if (length != move_page_tables(vma, old_start,
541 vma, new_start, length))
542 return -ENOMEM;
543
544 lru_add_drain();
545 tlb = tlb_gather_mmu(mm, 0);
546 if (new_end > old_start) {
547 /*
548 * when the old and new regions overlap clear from new_end.
549 */
550 free_pgd_range(tlb, new_end, old_end, new_end,
551 vma->vm_next ? vma->vm_next->vm_start : 0);
552 } else {
553 /*
554 * otherwise, clean from old_start; this is done to not touch
555 * the address space in [new_end, old_start) some architectures
556 * have constraints on va-space that make this illegal (IA64) -
557 * for the others its just a little faster.
558 */
559 free_pgd_range(tlb, old_start, old_end, new_end,
560 vma->vm_next ? vma->vm_next->vm_start : 0);
561 }
562 tlb_finish_mmu(tlb, new_end, old_end);
563
564 /*
565 * shrink the vma to just the new range.
566 */
567 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
568
569 return 0;
570 }
571
572 #define EXTRA_STACK_VM_PAGES 20 /* random */
573
574 /*
575 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
576 * the stack is optionally relocated, and some extra space is added.
577 */
578 int setup_arg_pages(struct linux_binprm *bprm,
579 unsigned long stack_top,
580 int executable_stack)
581 {
582 unsigned long ret;
583 unsigned long stack_shift;
584 struct mm_struct *mm = current->mm;
585 struct vm_area_struct *vma = bprm->vma;
586 struct vm_area_struct *prev = NULL;
587 unsigned long vm_flags;
588 unsigned long stack_base;
589
590 #ifdef CONFIG_STACK_GROWSUP
591 /* Limit stack size to 1GB */
592 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
593 if (stack_base > (1 << 30))
594 stack_base = 1 << 30;
595
596 /* Make sure we didn't let the argument array grow too large. */
597 if (vma->vm_end - vma->vm_start > stack_base)
598 return -ENOMEM;
599
600 stack_base = PAGE_ALIGN(stack_top - stack_base);
601
602 stack_shift = vma->vm_start - stack_base;
603 mm->arg_start = bprm->p - stack_shift;
604 bprm->p = vma->vm_end - stack_shift;
605 #else
606 stack_top = arch_align_stack(stack_top);
607 stack_top = PAGE_ALIGN(stack_top);
608 stack_shift = vma->vm_end - stack_top;
609
610 bprm->p -= stack_shift;
611 mm->arg_start = bprm->p;
612 #endif
613
614 if (bprm->loader)
615 bprm->loader -= stack_shift;
616 bprm->exec -= stack_shift;
617
618 down_write(&mm->mmap_sem);
619 vm_flags = VM_STACK_FLAGS;
620
621 /*
622 * Adjust stack execute permissions; explicitly enable for
623 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
624 * (arch default) otherwise.
625 */
626 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
627 vm_flags |= VM_EXEC;
628 else if (executable_stack == EXSTACK_DISABLE_X)
629 vm_flags &= ~VM_EXEC;
630 vm_flags |= mm->def_flags;
631
632 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
633 vm_flags);
634 if (ret)
635 goto out_unlock;
636 BUG_ON(prev != vma);
637
638 /* Move stack pages down in memory. */
639 if (stack_shift) {
640 ret = shift_arg_pages(vma, stack_shift);
641 if (ret) {
642 up_write(&mm->mmap_sem);
643 return ret;
644 }
645 }
646
647 #ifdef CONFIG_STACK_GROWSUP
648 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
649 #else
650 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
651 #endif
652 ret = expand_stack(vma, stack_base);
653 if (ret)
654 ret = -EFAULT;
655
656 out_unlock:
657 up_write(&mm->mmap_sem);
658 return 0;
659 }
660 EXPORT_SYMBOL(setup_arg_pages);
661
662 #endif /* CONFIG_MMU */
663
664 struct file *open_exec(const char *name)
665 {
666 struct nameidata nd;
667 struct file *file;
668 int err;
669
670 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd,
671 FMODE_READ|FMODE_EXEC);
672 if (err)
673 goto out;
674
675 err = -EACCES;
676 if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
677 goto out_path_put;
678
679 if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
680 goto out_path_put;
681
682 err = vfs_permission(&nd, MAY_EXEC | MAY_OPEN);
683 if (err)
684 goto out_path_put;
685
686 file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
687 if (IS_ERR(file))
688 return file;
689
690 err = deny_write_access(file);
691 if (err) {
692 fput(file);
693 goto out;
694 }
695
696 return file;
697
698 out_path_put:
699 release_open_intent(&nd);
700 path_put(&nd.path);
701 out:
702 return ERR_PTR(err);
703 }
704 EXPORT_SYMBOL(open_exec);
705
706 int kernel_read(struct file *file, unsigned long offset,
707 char *addr, unsigned long count)
708 {
709 mm_segment_t old_fs;
710 loff_t pos = offset;
711 int result;
712
713 old_fs = get_fs();
714 set_fs(get_ds());
715 /* The cast to a user pointer is valid due to the set_fs() */
716 result = vfs_read(file, (void __user *)addr, count, &pos);
717 set_fs(old_fs);
718 return result;
719 }
720
721 EXPORT_SYMBOL(kernel_read);
722
723 static int exec_mmap(struct mm_struct *mm)
724 {
725 struct task_struct *tsk;
726 struct mm_struct * old_mm, *active_mm;
727
728 /* Notify parent that we're no longer interested in the old VM */
729 tsk = current;
730 old_mm = current->mm;
731 mm_release(tsk, old_mm);
732
733 if (old_mm) {
734 /*
735 * Make sure that if there is a core dump in progress
736 * for the old mm, we get out and die instead of going
737 * through with the exec. We must hold mmap_sem around
738 * checking core_state and changing tsk->mm.
739 */
740 down_read(&old_mm->mmap_sem);
741 if (unlikely(old_mm->core_state)) {
742 up_read(&old_mm->mmap_sem);
743 return -EINTR;
744 }
745 }
746 task_lock(tsk);
747 active_mm = tsk->active_mm;
748 tsk->mm = mm;
749 tsk->active_mm = mm;
750 activate_mm(active_mm, mm);
751 task_unlock(tsk);
752 arch_pick_mmap_layout(mm);
753 if (old_mm) {
754 up_read(&old_mm->mmap_sem);
755 BUG_ON(active_mm != old_mm);
756 mm_update_next_owner(old_mm);
757 mmput(old_mm);
758 return 0;
759 }
760 mmdrop(active_mm);
761 return 0;
762 }
763
764 /*
765 * This function makes sure the current process has its own signal table,
766 * so that flush_signal_handlers can later reset the handlers without
767 * disturbing other processes. (Other processes might share the signal
768 * table via the CLONE_SIGHAND option to clone().)
769 */
770 static int de_thread(struct task_struct *tsk)
771 {
772 struct signal_struct *sig = tsk->signal;
773 struct sighand_struct *oldsighand = tsk->sighand;
774 spinlock_t *lock = &oldsighand->siglock;
775 struct task_struct *leader = NULL;
776 int count;
777
778 if (thread_group_empty(tsk))
779 goto no_thread_group;
780
781 /*
782 * Kill all other threads in the thread group.
783 */
784 spin_lock_irq(lock);
785 if (signal_group_exit(sig)) {
786 /*
787 * Another group action in progress, just
788 * return so that the signal is processed.
789 */
790 spin_unlock_irq(lock);
791 return -EAGAIN;
792 }
793 sig->group_exit_task = tsk;
794 zap_other_threads(tsk);
795
796 /* Account for the thread group leader hanging around: */
797 count = thread_group_leader(tsk) ? 1 : 2;
798 sig->notify_count = count;
799 while (atomic_read(&sig->count) > count) {
800 __set_current_state(TASK_UNINTERRUPTIBLE);
801 spin_unlock_irq(lock);
802 schedule();
803 spin_lock_irq(lock);
804 }
805 spin_unlock_irq(lock);
806
807 /*
808 * At this point all other threads have exited, all we have to
809 * do is to wait for the thread group leader to become inactive,
810 * and to assume its PID:
811 */
812 if (!thread_group_leader(tsk)) {
813 leader = tsk->group_leader;
814
815 sig->notify_count = -1; /* for exit_notify() */
816 for (;;) {
817 write_lock_irq(&tasklist_lock);
818 if (likely(leader->exit_state))
819 break;
820 __set_current_state(TASK_UNINTERRUPTIBLE);
821 write_unlock_irq(&tasklist_lock);
822 schedule();
823 }
824
825 /*
826 * The only record we have of the real-time age of a
827 * process, regardless of execs it's done, is start_time.
828 * All the past CPU time is accumulated in signal_struct
829 * from sister threads now dead. But in this non-leader
830 * exec, nothing survives from the original leader thread,
831 * whose birth marks the true age of this process now.
832 * When we take on its identity by switching to its PID, we
833 * also take its birthdate (always earlier than our own).
834 */
835 tsk->start_time = leader->start_time;
836
837 BUG_ON(!same_thread_group(leader, tsk));
838 BUG_ON(has_group_leader_pid(tsk));
839 /*
840 * An exec() starts a new thread group with the
841 * TGID of the previous thread group. Rehash the
842 * two threads with a switched PID, and release
843 * the former thread group leader:
844 */
845
846 /* Become a process group leader with the old leader's pid.
847 * The old leader becomes a thread of the this thread group.
848 * Note: The old leader also uses this pid until release_task
849 * is called. Odd but simple and correct.
850 */
851 detach_pid(tsk, PIDTYPE_PID);
852 tsk->pid = leader->pid;
853 attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
854 transfer_pid(leader, tsk, PIDTYPE_PGID);
855 transfer_pid(leader, tsk, PIDTYPE_SID);
856 list_replace_rcu(&leader->tasks, &tsk->tasks);
857
858 tsk->group_leader = tsk;
859 leader->group_leader = tsk;
860
861 tsk->exit_signal = SIGCHLD;
862
863 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
864 leader->exit_state = EXIT_DEAD;
865
866 write_unlock_irq(&tasklist_lock);
867 }
868
869 sig->group_exit_task = NULL;
870 sig->notify_count = 0;
871
872 no_thread_group:
873 exit_itimers(sig);
874 flush_itimer_signals();
875 if (leader)
876 release_task(leader);
877
878 if (atomic_read(&oldsighand->count) != 1) {
879 struct sighand_struct *newsighand;
880 /*
881 * This ->sighand is shared with the CLONE_SIGHAND
882 * but not CLONE_THREAD task, switch to the new one.
883 */
884 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
885 if (!newsighand)
886 return -ENOMEM;
887
888 atomic_set(&newsighand->count, 1);
889 memcpy(newsighand->action, oldsighand->action,
890 sizeof(newsighand->action));
891
892 write_lock_irq(&tasklist_lock);
893 spin_lock(&oldsighand->siglock);
894 rcu_assign_pointer(tsk->sighand, newsighand);
895 spin_unlock(&oldsighand->siglock);
896 write_unlock_irq(&tasklist_lock);
897
898 __cleanup_sighand(oldsighand);
899 }
900
901 BUG_ON(!thread_group_leader(tsk));
902 return 0;
903 }
904
905 /*
906 * These functions flushes out all traces of the currently running executable
907 * so that a new one can be started
908 */
909 static void flush_old_files(struct files_struct * files)
910 {
911 long j = -1;
912 struct fdtable *fdt;
913
914 spin_lock(&files->file_lock);
915 for (;;) {
916 unsigned long set, i;
917
918 j++;
919 i = j * __NFDBITS;
920 fdt = files_fdtable(files);
921 if (i >= fdt->max_fds)
922 break;
923 set = fdt->close_on_exec->fds_bits[j];
924 if (!set)
925 continue;
926 fdt->close_on_exec->fds_bits[j] = 0;
927 spin_unlock(&files->file_lock);
928 for ( ; set ; i++,set >>= 1) {
929 if (set & 1) {
930 sys_close(i);
931 }
932 }
933 spin_lock(&files->file_lock);
934
935 }
936 spin_unlock(&files->file_lock);
937 }
938
939 char *get_task_comm(char *buf, struct task_struct *tsk)
940 {
941 /* buf must be at least sizeof(tsk->comm) in size */
942 task_lock(tsk);
943 strncpy(buf, tsk->comm, sizeof(tsk->comm));
944 task_unlock(tsk);
945 return buf;
946 }
947
948 void set_task_comm(struct task_struct *tsk, char *buf)
949 {
950 task_lock(tsk);
951 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
952 task_unlock(tsk);
953 }
954
955 int flush_old_exec(struct linux_binprm * bprm)
956 {
957 char * name;
958 int i, ch, retval;
959 char tcomm[sizeof(current->comm)];
960
961 /*
962 * Make sure we have a private signal table and that
963 * we are unassociated from the previous thread group.
964 */
965 retval = de_thread(current);
966 if (retval)
967 goto out;
968
969 set_mm_exe_file(bprm->mm, bprm->file);
970
971 /*
972 * Release all of the old mmap stuff
973 */
974 retval = exec_mmap(bprm->mm);
975 if (retval)
976 goto out;
977
978 bprm->mm = NULL; /* We're using it now */
979
980 /* This is the point of no return */
981 current->sas_ss_sp = current->sas_ss_size = 0;
982
983 if (current->euid == current->uid && current->egid == current->gid)
984 set_dumpable(current->mm, 1);
985 else
986 set_dumpable(current->mm, suid_dumpable);
987
988 name = bprm->filename;
989
990 /* Copies the binary name from after last slash */
991 for (i=0; (ch = *(name++)) != '\0';) {
992 if (ch == '/')
993 i = 0; /* overwrite what we wrote */
994 else
995 if (i < (sizeof(tcomm) - 1))
996 tcomm[i++] = ch;
997 }
998 tcomm[i] = '\0';
999 set_task_comm(current, tcomm);
1000
1001 current->flags &= ~PF_RANDOMIZE;
1002 flush_thread();
1003
1004 /* Set the new mm task size. We have to do that late because it may
1005 * depend on TIF_32BIT which is only updated in flush_thread() on
1006 * some architectures like powerpc
1007 */
1008 current->mm->task_size = TASK_SIZE;
1009
1010 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
1011 suid_keys(current);
1012 set_dumpable(current->mm, suid_dumpable);
1013 current->pdeath_signal = 0;
1014 } else if (file_permission(bprm->file, MAY_READ) ||
1015 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
1016 suid_keys(current);
1017 set_dumpable(current->mm, suid_dumpable);
1018 }
1019
1020 /* An exec changes our domain. We are no longer part of the thread
1021 group */
1022
1023 current->self_exec_id++;
1024
1025 flush_signal_handlers(current, 0);
1026 flush_old_files(current->files);
1027
1028 return 0;
1029
1030 out:
1031 return retval;
1032 }
1033
1034 EXPORT_SYMBOL(flush_old_exec);
1035
1036 /*
1037 * Fill the binprm structure from the inode.
1038 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1039 */
1040 int prepare_binprm(struct linux_binprm *bprm)
1041 {
1042 int mode;
1043 struct inode * inode = bprm->file->f_path.dentry->d_inode;
1044 int retval;
1045
1046 mode = inode->i_mode;
1047 if (bprm->file->f_op == NULL)
1048 return -EACCES;
1049
1050 bprm->e_uid = current->euid;
1051 bprm->e_gid = current->egid;
1052
1053 if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
1054 /* Set-uid? */
1055 if (mode & S_ISUID) {
1056 current->personality &= ~PER_CLEAR_ON_SETID;
1057 bprm->e_uid = inode->i_uid;
1058 }
1059
1060 /* Set-gid? */
1061 /*
1062 * If setgid is set but no group execute bit then this
1063 * is a candidate for mandatory locking, not a setgid
1064 * executable.
1065 */
1066 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1067 current->personality &= ~PER_CLEAR_ON_SETID;
1068 bprm->e_gid = inode->i_gid;
1069 }
1070 }
1071
1072 /* fill in binprm security blob */
1073 retval = security_bprm_set(bprm);
1074 if (retval)
1075 return retval;
1076
1077 memset(bprm->buf,0,BINPRM_BUF_SIZE);
1078 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
1079 }
1080
1081 EXPORT_SYMBOL(prepare_binprm);
1082
1083 static int unsafe_exec(struct task_struct *p)
1084 {
1085 int unsafe = tracehook_unsafe_exec(p);
1086
1087 if (atomic_read(&p->fs->count) > 1 ||
1088 atomic_read(&p->files->count) > 1 ||
1089 atomic_read(&p->sighand->count) > 1)
1090 unsafe |= LSM_UNSAFE_SHARE;
1091
1092 return unsafe;
1093 }
1094
1095 void compute_creds(struct linux_binprm *bprm)
1096 {
1097 int unsafe;
1098
1099 if (bprm->e_uid != current->uid) {
1100 suid_keys(current);
1101 current->pdeath_signal = 0;
1102 }
1103 exec_keys(current);
1104
1105 task_lock(current);
1106 unsafe = unsafe_exec(current);
1107 security_bprm_apply_creds(bprm, unsafe);
1108 task_unlock(current);
1109 security_bprm_post_apply_creds(bprm);
1110 }
1111 EXPORT_SYMBOL(compute_creds);
1112
1113 /*
1114 * Arguments are '\0' separated strings found at the location bprm->p
1115 * points to; chop off the first by relocating brpm->p to right after
1116 * the first '\0' encountered.
1117 */
1118 int remove_arg_zero(struct linux_binprm *bprm)
1119 {
1120 int ret = 0;
1121 unsigned long offset;
1122 char *kaddr;
1123 struct page *page;
1124
1125 if (!bprm->argc)
1126 return 0;
1127
1128 do {
1129 offset = bprm->p & ~PAGE_MASK;
1130 page = get_arg_page(bprm, bprm->p, 0);
1131 if (!page) {
1132 ret = -EFAULT;
1133 goto out;
1134 }
1135 kaddr = kmap_atomic(page, KM_USER0);
1136
1137 for (; offset < PAGE_SIZE && kaddr[offset];
1138 offset++, bprm->p++)
1139 ;
1140
1141 kunmap_atomic(kaddr, KM_USER0);
1142 put_arg_page(page);
1143
1144 if (offset == PAGE_SIZE)
1145 free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1146 } while (offset == PAGE_SIZE);
1147
1148 bprm->p++;
1149 bprm->argc--;
1150 ret = 0;
1151
1152 out:
1153 return ret;
1154 }
1155 EXPORT_SYMBOL(remove_arg_zero);
1156
1157 /*
1158 * cycle the list of binary formats handler, until one recognizes the image
1159 */
1160 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1161 {
1162 unsigned int depth = bprm->recursion_depth;
1163 int try,retval;
1164 struct linux_binfmt *fmt;
1165 #ifdef __alpha__
1166 /* handle /sbin/loader.. */
1167 {
1168 struct exec * eh = (struct exec *) bprm->buf;
1169
1170 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1171 (eh->fh.f_flags & 0x3000) == 0x3000)
1172 {
1173 struct file * file;
1174 unsigned long loader;
1175
1176 allow_write_access(bprm->file);
1177 fput(bprm->file);
1178 bprm->file = NULL;
1179
1180 loader = bprm->vma->vm_end - sizeof(void *);
1181
1182 file = open_exec("/sbin/loader");
1183 retval = PTR_ERR(file);
1184 if (IS_ERR(file))
1185 return retval;
1186
1187 /* Remember if the application is TASO. */
1188 bprm->taso = eh->ah.entry < 0x100000000UL;
1189
1190 bprm->file = file;
1191 bprm->loader = loader;
1192 retval = prepare_binprm(bprm);
1193 if (retval<0)
1194 return retval;
1195 /* should call search_binary_handler recursively here,
1196 but it does not matter */
1197 }
1198 }
1199 #endif
1200 retval = security_bprm_check(bprm);
1201 if (retval)
1202 return retval;
1203
1204 /* kernel module loader fixup */
1205 /* so we don't try to load run modprobe in kernel space. */
1206 set_fs(USER_DS);
1207
1208 retval = audit_bprm(bprm);
1209 if (retval)
1210 return retval;
1211
1212 retval = -ENOENT;
1213 for (try=0; try<2; try++) {
1214 read_lock(&binfmt_lock);
1215 list_for_each_entry(fmt, &formats, lh) {
1216 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1217 if (!fn)
1218 continue;
1219 if (!try_module_get(fmt->module))
1220 continue;
1221 read_unlock(&binfmt_lock);
1222 retval = fn(bprm, regs);
1223 /*
1224 * Restore the depth counter to its starting value
1225 * in this call, so we don't have to rely on every
1226 * load_binary function to restore it on return.
1227 */
1228 bprm->recursion_depth = depth;
1229 if (retval >= 0) {
1230 if (depth == 0)
1231 tracehook_report_exec(fmt, bprm, regs);
1232 put_binfmt(fmt);
1233 allow_write_access(bprm->file);
1234 if (bprm->file)
1235 fput(bprm->file);
1236 bprm->file = NULL;
1237 current->did_exec = 1;
1238 proc_exec_connector(current);
1239 return retval;
1240 }
1241 read_lock(&binfmt_lock);
1242 put_binfmt(fmt);
1243 if (retval != -ENOEXEC || bprm->mm == NULL)
1244 break;
1245 if (!bprm->file) {
1246 read_unlock(&binfmt_lock);
1247 return retval;
1248 }
1249 }
1250 read_unlock(&binfmt_lock);
1251 if (retval != -ENOEXEC || bprm->mm == NULL) {
1252 break;
1253 #ifdef CONFIG_MODULES
1254 } else {
1255 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1256 if (printable(bprm->buf[0]) &&
1257 printable(bprm->buf[1]) &&
1258 printable(bprm->buf[2]) &&
1259 printable(bprm->buf[3]))
1260 break; /* -ENOEXEC */
1261 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1262 #endif
1263 }
1264 }
1265 return retval;
1266 }
1267
1268 EXPORT_SYMBOL(search_binary_handler);
1269
1270 void free_bprm(struct linux_binprm *bprm)
1271 {
1272 free_arg_pages(bprm);
1273 kfree(bprm);
1274 }
1275
1276 /*
1277 * sys_execve() executes a new program.
1278 */
1279 int do_execve(char * filename,
1280 char __user *__user *argv,
1281 char __user *__user *envp,
1282 struct pt_regs * regs)
1283 {
1284 struct linux_binprm *bprm;
1285 struct file *file;
1286 struct files_struct *displaced;
1287 int retval;
1288
1289 retval = unshare_files(&displaced);
1290 if (retval)
1291 goto out_ret;
1292
1293 retval = -ENOMEM;
1294 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1295 if (!bprm)
1296 goto out_files;
1297
1298 file = open_exec(filename);
1299 retval = PTR_ERR(file);
1300 if (IS_ERR(file))
1301 goto out_kfree;
1302
1303 sched_exec();
1304
1305 bprm->file = file;
1306 bprm->filename = filename;
1307 bprm->interp = filename;
1308
1309 retval = bprm_mm_init(bprm);
1310 if (retval)
1311 goto out_file;
1312
1313 bprm->argc = count(argv, MAX_ARG_STRINGS);
1314 if ((retval = bprm->argc) < 0)
1315 goto out_mm;
1316
1317 bprm->envc = count(envp, MAX_ARG_STRINGS);
1318 if ((retval = bprm->envc) < 0)
1319 goto out_mm;
1320
1321 retval = security_bprm_alloc(bprm);
1322 if (retval)
1323 goto out;
1324
1325 retval = prepare_binprm(bprm);
1326 if (retval < 0)
1327 goto out;
1328
1329 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1330 if (retval < 0)
1331 goto out;
1332
1333 bprm->exec = bprm->p;
1334 retval = copy_strings(bprm->envc, envp, bprm);
1335 if (retval < 0)
1336 goto out;
1337
1338 retval = copy_strings(bprm->argc, argv, bprm);
1339 if (retval < 0)
1340 goto out;
1341
1342 current->flags &= ~PF_KTHREAD;
1343 retval = search_binary_handler(bprm,regs);
1344 if (retval >= 0) {
1345 /* execve success */
1346 security_bprm_free(bprm);
1347 acct_update_integrals(current);
1348 free_bprm(bprm);
1349 if (displaced)
1350 put_files_struct(displaced);
1351 return retval;
1352 }
1353
1354 out:
1355 if (bprm->security)
1356 security_bprm_free(bprm);
1357
1358 out_mm:
1359 if (bprm->mm)
1360 mmput (bprm->mm);
1361
1362 out_file:
1363 if (bprm->file) {
1364 allow_write_access(bprm->file);
1365 fput(bprm->file);
1366 }
1367 out_kfree:
1368 free_bprm(bprm);
1369
1370 out_files:
1371 if (displaced)
1372 reset_files_struct(displaced);
1373 out_ret:
1374 return retval;
1375 }
1376
1377 int set_binfmt(struct linux_binfmt *new)
1378 {
1379 struct linux_binfmt *old = current->binfmt;
1380
1381 if (new) {
1382 if (!try_module_get(new->module))
1383 return -1;
1384 }
1385 current->binfmt = new;
1386 if (old)
1387 module_put(old->module);
1388 return 0;
1389 }
1390
1391 EXPORT_SYMBOL(set_binfmt);
1392
1393 /* format_corename will inspect the pattern parameter, and output a
1394 * name into corename, which must have space for at least
1395 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1396 */
1397 static int format_corename(char *corename, long signr)
1398 {
1399 const char *pat_ptr = core_pattern;
1400 int ispipe = (*pat_ptr == '|');
1401 char *out_ptr = corename;
1402 char *const out_end = corename + CORENAME_MAX_SIZE;
1403 int rc;
1404 int pid_in_pattern = 0;
1405
1406 /* Repeat as long as we have more pattern to process and more output
1407 space */
1408 while (*pat_ptr) {
1409 if (*pat_ptr != '%') {
1410 if (out_ptr == out_end)
1411 goto out;
1412 *out_ptr++ = *pat_ptr++;
1413 } else {
1414 switch (*++pat_ptr) {
1415 case 0:
1416 goto out;
1417 /* Double percent, output one percent */
1418 case '%':
1419 if (out_ptr == out_end)
1420 goto out;
1421 *out_ptr++ = '%';
1422 break;
1423 /* pid */
1424 case 'p':
1425 pid_in_pattern = 1;
1426 rc = snprintf(out_ptr, out_end - out_ptr,
1427 "%d", task_tgid_vnr(current));
1428 if (rc > out_end - out_ptr)
1429 goto out;
1430 out_ptr += rc;
1431 break;
1432 /* uid */
1433 case 'u':
1434 rc = snprintf(out_ptr, out_end - out_ptr,
1435 "%d", current->uid);
1436 if (rc > out_end - out_ptr)
1437 goto out;
1438 out_ptr += rc;
1439 break;
1440 /* gid */
1441 case 'g':
1442 rc = snprintf(out_ptr, out_end - out_ptr,
1443 "%d", current->gid);
1444 if (rc > out_end - out_ptr)
1445 goto out;
1446 out_ptr += rc;
1447 break;
1448 /* signal that caused the coredump */
1449 case 's':
1450 rc = snprintf(out_ptr, out_end - out_ptr,
1451 "%ld", signr);
1452 if (rc > out_end - out_ptr)
1453 goto out;
1454 out_ptr += rc;
1455 break;
1456 /* UNIX time of coredump */
1457 case 't': {
1458 struct timeval tv;
1459 do_gettimeofday(&tv);
1460 rc = snprintf(out_ptr, out_end - out_ptr,
1461 "%lu", tv.tv_sec);
1462 if (rc > out_end - out_ptr)
1463 goto out;
1464 out_ptr += rc;
1465 break;
1466 }
1467 /* hostname */
1468 case 'h':
1469 down_read(&uts_sem);
1470 rc = snprintf(out_ptr, out_end - out_ptr,
1471 "%s", utsname()->nodename);
1472 up_read(&uts_sem);
1473 if (rc > out_end - out_ptr)
1474 goto out;
1475 out_ptr += rc;
1476 break;
1477 /* executable */
1478 case 'e':
1479 rc = snprintf(out_ptr, out_end - out_ptr,
1480 "%s", current->comm);
1481 if (rc > out_end - out_ptr)
1482 goto out;
1483 out_ptr += rc;
1484 break;
1485 /* core limit size */
1486 case 'c':
1487 rc = snprintf(out_ptr, out_end - out_ptr,
1488 "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
1489 if (rc > out_end - out_ptr)
1490 goto out;
1491 out_ptr += rc;
1492 break;
1493 default:
1494 break;
1495 }
1496 ++pat_ptr;
1497 }
1498 }
1499 /* Backward compatibility with core_uses_pid:
1500 *
1501 * If core_pattern does not include a %p (as is the default)
1502 * and core_uses_pid is set, then .%pid will be appended to
1503 * the filename. Do not do this for piped commands. */
1504 if (!ispipe && !pid_in_pattern && core_uses_pid) {
1505 rc = snprintf(out_ptr, out_end - out_ptr,
1506 ".%d", task_tgid_vnr(current));
1507 if (rc > out_end - out_ptr)
1508 goto out;
1509 out_ptr += rc;
1510 }
1511 out:
1512 *out_ptr = 0;
1513 return ispipe;
1514 }
1515
1516 static int zap_process(struct task_struct *start)
1517 {
1518 struct task_struct *t;
1519 int nr = 0;
1520
1521 start->signal->flags = SIGNAL_GROUP_EXIT;
1522 start->signal->group_stop_count = 0;
1523
1524 t = start;
1525 do {
1526 if (t != current && t->mm) {
1527 sigaddset(&t->pending.signal, SIGKILL);
1528 signal_wake_up(t, 1);
1529 nr++;
1530 }
1531 } while_each_thread(start, t);
1532
1533 return nr;
1534 }
1535
1536 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1537 struct core_state *core_state, int exit_code)
1538 {
1539 struct task_struct *g, *p;
1540 unsigned long flags;
1541 int nr = -EAGAIN;
1542
1543 spin_lock_irq(&tsk->sighand->siglock);
1544 if (!signal_group_exit(tsk->signal)) {
1545 mm->core_state = core_state;
1546 tsk->signal->group_exit_code = exit_code;
1547 nr = zap_process(tsk);
1548 }
1549 spin_unlock_irq(&tsk->sighand->siglock);
1550 if (unlikely(nr < 0))
1551 return nr;
1552
1553 if (atomic_read(&mm->mm_users) == nr + 1)
1554 goto done;
1555 /*
1556 * We should find and kill all tasks which use this mm, and we should
1557 * count them correctly into ->nr_threads. We don't take tasklist
1558 * lock, but this is safe wrt:
1559 *
1560 * fork:
1561 * None of sub-threads can fork after zap_process(leader). All
1562 * processes which were created before this point should be
1563 * visible to zap_threads() because copy_process() adds the new
1564 * process to the tail of init_task.tasks list, and lock/unlock
1565 * of ->siglock provides a memory barrier.
1566 *
1567 * do_exit:
1568 * The caller holds mm->mmap_sem. This means that the task which
1569 * uses this mm can't pass exit_mm(), so it can't exit or clear
1570 * its ->mm.
1571 *
1572 * de_thread:
1573 * It does list_replace_rcu(&leader->tasks, &current->tasks),
1574 * we must see either old or new leader, this does not matter.
1575 * However, it can change p->sighand, so lock_task_sighand(p)
1576 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1577 * it can't fail.
1578 *
1579 * Note also that "g" can be the old leader with ->mm == NULL
1580 * and already unhashed and thus removed from ->thread_group.
1581 * This is OK, __unhash_process()->list_del_rcu() does not
1582 * clear the ->next pointer, we will find the new leader via
1583 * next_thread().
1584 */
1585 rcu_read_lock();
1586 for_each_process(g) {
1587 if (g == tsk->group_leader)
1588 continue;
1589 if (g->flags & PF_KTHREAD)
1590 continue;
1591 p = g;
1592 do {
1593 if (p->mm) {
1594 if (unlikely(p->mm == mm)) {
1595 lock_task_sighand(p, &flags);
1596 nr += zap_process(p);
1597 unlock_task_sighand(p, &flags);
1598 }
1599 break;
1600 }
1601 } while_each_thread(g, p);
1602 }
1603 rcu_read_unlock();
1604 done:
1605 atomic_set(&core_state->nr_threads, nr);
1606 return nr;
1607 }
1608
1609 static int coredump_wait(int exit_code, struct core_state *core_state)
1610 {
1611 struct task_struct *tsk = current;
1612 struct mm_struct *mm = tsk->mm;
1613 struct completion *vfork_done;
1614 int core_waiters;
1615
1616 init_completion(&core_state->startup);
1617 core_state->dumper.task = tsk;
1618 core_state->dumper.next = NULL;
1619 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1620 up_write(&mm->mmap_sem);
1621
1622 if (unlikely(core_waiters < 0))
1623 goto fail;
1624
1625 /*
1626 * Make sure nobody is waiting for us to release the VM,
1627 * otherwise we can deadlock when we wait on each other
1628 */
1629 vfork_done = tsk->vfork_done;
1630 if (vfork_done) {
1631 tsk->vfork_done = NULL;
1632 complete(vfork_done);
1633 }
1634
1635 if (core_waiters)
1636 wait_for_completion(&core_state->startup);
1637 fail:
1638 return core_waiters;
1639 }
1640
1641 static void coredump_finish(struct mm_struct *mm)
1642 {
1643 struct core_thread *curr, *next;
1644 struct task_struct *task;
1645
1646 next = mm->core_state->dumper.next;
1647 while ((curr = next) != NULL) {
1648 next = curr->next;
1649 task = curr->task;
1650 /*
1651 * see exit_mm(), curr->task must not see
1652 * ->task == NULL before we read ->next.
1653 */
1654 smp_mb();
1655 curr->task = NULL;
1656 wake_up_process(task);
1657 }
1658
1659 mm->core_state = NULL;
1660 }
1661
1662 /*
1663 * set_dumpable converts traditional three-value dumpable to two flags and
1664 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1665 * these bits are not changed atomically. So get_dumpable can observe the
1666 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1667 * return either old dumpable or new one by paying attention to the order of
1668 * modifying the bits.
1669 *
1670 * dumpable | mm->flags (binary)
1671 * old new | initial interim final
1672 * ---------+-----------------------
1673 * 0 1 | 00 01 01
1674 * 0 2 | 00 10(*) 11
1675 * 1 0 | 01 00 00
1676 * 1 2 | 01 11 11
1677 * 2 0 | 11 10(*) 00
1678 * 2 1 | 11 11 01
1679 *
1680 * (*) get_dumpable regards interim value of 10 as 11.
1681 */
1682 void set_dumpable(struct mm_struct *mm, int value)
1683 {
1684 switch (value) {
1685 case 0:
1686 clear_bit(MMF_DUMPABLE, &mm->flags);
1687 smp_wmb();
1688 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1689 break;
1690 case 1:
1691 set_bit(MMF_DUMPABLE, &mm->flags);
1692 smp_wmb();
1693 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1694 break;
1695 case 2:
1696 set_bit(MMF_DUMP_SECURELY, &mm->flags);
1697 smp_wmb();
1698 set_bit(MMF_DUMPABLE, &mm->flags);
1699 break;
1700 }
1701 }
1702
1703 int get_dumpable(struct mm_struct *mm)
1704 {
1705 int ret;
1706
1707 ret = mm->flags & 0x3;
1708 return (ret >= 2) ? 2 : ret;
1709 }
1710
1711 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1712 {
1713 struct core_state core_state;
1714 char corename[CORENAME_MAX_SIZE + 1];
1715 struct mm_struct *mm = current->mm;
1716 struct linux_binfmt * binfmt;
1717 struct inode * inode;
1718 struct file * file;
1719 int retval = 0;
1720 int fsuid = current->fsuid;
1721 int flag = 0;
1722 int ispipe = 0;
1723 unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1724 char **helper_argv = NULL;
1725 int helper_argc = 0;
1726 char *delimit;
1727
1728 audit_core_dumps(signr);
1729
1730 binfmt = current->binfmt;
1731 if (!binfmt || !binfmt->core_dump)
1732 goto fail;
1733 down_write(&mm->mmap_sem);
1734 /*
1735 * If another thread got here first, or we are not dumpable, bail out.
1736 */
1737 if (mm->core_state || !get_dumpable(mm)) {
1738 up_write(&mm->mmap_sem);
1739 goto fail;
1740 }
1741
1742 /*
1743 * We cannot trust fsuid as being the "true" uid of the
1744 * process nor do we know its entire history. We only know it
1745 * was tainted so we dump it as root in mode 2.
1746 */
1747 if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
1748 flag = O_EXCL; /* Stop rewrite attacks */
1749 current->fsuid = 0; /* Dump root private */
1750 }
1751
1752 retval = coredump_wait(exit_code, &core_state);
1753 if (retval < 0)
1754 goto fail;
1755
1756 /*
1757 * Clear any false indication of pending signals that might
1758 * be seen by the filesystem code called to write the core file.
1759 */
1760 clear_thread_flag(TIF_SIGPENDING);
1761
1762 /*
1763 * lock_kernel() because format_corename() is controlled by sysctl, which
1764 * uses lock_kernel()
1765 */
1766 lock_kernel();
1767 ispipe = format_corename(corename, signr);
1768 unlock_kernel();
1769 /*
1770 * Don't bother to check the RLIMIT_CORE value if core_pattern points
1771 * to a pipe. Since we're not writing directly to the filesystem
1772 * RLIMIT_CORE doesn't really apply, as no actual core file will be
1773 * created unless the pipe reader choses to write out the core file
1774 * at which point file size limits and permissions will be imposed
1775 * as it does with any other process
1776 */
1777 if ((!ispipe) && (core_limit < binfmt->min_coredump))
1778 goto fail_unlock;
1779
1780 if (ispipe) {
1781 helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
1782 /* Terminate the string before the first option */
1783 delimit = strchr(corename, ' ');
1784 if (delimit)
1785 *delimit = '\0';
1786 delimit = strrchr(helper_argv[0], '/');
1787 if (delimit)
1788 delimit++;
1789 else
1790 delimit = helper_argv[0];
1791 if (!strcmp(delimit, current->comm)) {
1792 printk(KERN_NOTICE "Recursive core dump detected, "
1793 "aborting\n");
1794 goto fail_unlock;
1795 }
1796
1797 core_limit = RLIM_INFINITY;
1798
1799 /* SIGPIPE can happen, but it's just never processed */
1800 if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
1801 &file)) {
1802 printk(KERN_INFO "Core dump to %s pipe failed\n",
1803 corename);
1804 goto fail_unlock;
1805 }
1806 } else
1807 file = filp_open(corename,
1808 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1809 0600);
1810 if (IS_ERR(file))
1811 goto fail_unlock;
1812 inode = file->f_path.dentry->d_inode;
1813 if (inode->i_nlink > 1)
1814 goto close_fail; /* multiple links - don't dump */
1815 if (!ispipe && d_unhashed(file->f_path.dentry))
1816 goto close_fail;
1817
1818 /* AK: actually i see no reason to not allow this for named pipes etc.,
1819 but keep the previous behaviour for now. */
1820 if (!ispipe && !S_ISREG(inode->i_mode))
1821 goto close_fail;
1822 /*
1823 * Dont allow local users get cute and trick others to coredump
1824 * into their pre-created files:
1825 */
1826 if (inode->i_uid != current->fsuid)
1827 goto close_fail;
1828 if (!file->f_op)
1829 goto close_fail;
1830 if (!file->f_op->write)
1831 goto close_fail;
1832 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1833 goto close_fail;
1834
1835 retval = binfmt->core_dump(signr, regs, file, core_limit);
1836
1837 if (retval)
1838 current->signal->group_exit_code |= 0x80;
1839 close_fail:
1840 filp_close(file, NULL);
1841 fail_unlock:
1842 if (helper_argv)
1843 argv_free(helper_argv);
1844
1845 current->fsuid = fsuid;
1846 coredump_finish(mm);
1847 fail:
1848 return retval;
1849 }