Merge git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemoen/avr32-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / exec.c
1 /*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * #!-checking implemented by tytso.
9 */
10 /*
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
14 *
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
17 *
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
23 */
24
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/pagemap.h>
36 #include <linux/perf_event.h>
37 #include <linux/highmem.h>
38 #include <linux/spinlock.h>
39 #include <linux/key.h>
40 #include <linux/personality.h>
41 #include <linux/binfmts.h>
42 #include <linux/utsname.h>
43 #include <linux/pid_namespace.h>
44 #include <linux/module.h>
45 #include <linux/namei.h>
46 #include <linux/proc_fs.h>
47 #include <linux/mount.h>
48 #include <linux/security.h>
49 #include <linux/syscalls.h>
50 #include <linux/tsacct_kern.h>
51 #include <linux/cn_proc.h>
52 #include <linux/audit.h>
53 #include <linux/tracehook.h>
54 #include <linux/kmod.h>
55 #include <linux/fsnotify.h>
56 #include <linux/fs_struct.h>
57 #include <linux/pipe_fs_i.h>
58
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
61 #include <asm/tlb.h>
62 #include "internal.h"
63
64 int core_uses_pid;
65 char core_pattern[CORENAME_MAX_SIZE] = "core";
66 unsigned int core_pipe_limit;
67 int suid_dumpable = 0;
68
69 /* The maximal length of core_pattern is also specified in sysctl.c */
70
71 static LIST_HEAD(formats);
72 static DEFINE_RWLOCK(binfmt_lock);
73
74 int __register_binfmt(struct linux_binfmt * fmt, int insert)
75 {
76 if (!fmt)
77 return -EINVAL;
78 write_lock(&binfmt_lock);
79 insert ? list_add(&fmt->lh, &formats) :
80 list_add_tail(&fmt->lh, &formats);
81 write_unlock(&binfmt_lock);
82 return 0;
83 }
84
85 EXPORT_SYMBOL(__register_binfmt);
86
87 void unregister_binfmt(struct linux_binfmt * fmt)
88 {
89 write_lock(&binfmt_lock);
90 list_del(&fmt->lh);
91 write_unlock(&binfmt_lock);
92 }
93
94 EXPORT_SYMBOL(unregister_binfmt);
95
96 static inline void put_binfmt(struct linux_binfmt * fmt)
97 {
98 module_put(fmt->module);
99 }
100
101 /*
102 * Note that a shared library must be both readable and executable due to
103 * security reasons.
104 *
105 * Also note that we take the address to load from from the file itself.
106 */
107 SYSCALL_DEFINE1(uselib, const char __user *, library)
108 {
109 struct file *file;
110 char *tmp = getname(library);
111 int error = PTR_ERR(tmp);
112
113 if (IS_ERR(tmp))
114 goto out;
115
116 file = do_filp_open(AT_FDCWD, tmp,
117 O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
118 MAY_READ | MAY_EXEC | MAY_OPEN);
119 putname(tmp);
120 error = PTR_ERR(file);
121 if (IS_ERR(file))
122 goto out;
123
124 error = -EINVAL;
125 if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
126 goto exit;
127
128 error = -EACCES;
129 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
130 goto exit;
131
132 fsnotify_open(file->f_path.dentry);
133
134 error = -ENOEXEC;
135 if(file->f_op) {
136 struct linux_binfmt * fmt;
137
138 read_lock(&binfmt_lock);
139 list_for_each_entry(fmt, &formats, lh) {
140 if (!fmt->load_shlib)
141 continue;
142 if (!try_module_get(fmt->module))
143 continue;
144 read_unlock(&binfmt_lock);
145 error = fmt->load_shlib(file);
146 read_lock(&binfmt_lock);
147 put_binfmt(fmt);
148 if (error != -ENOEXEC)
149 break;
150 }
151 read_unlock(&binfmt_lock);
152 }
153 exit:
154 fput(file);
155 out:
156 return error;
157 }
158
159 #ifdef CONFIG_MMU
160
161 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
162 int write)
163 {
164 struct page *page;
165 int ret;
166
167 #ifdef CONFIG_STACK_GROWSUP
168 if (write) {
169 ret = expand_stack_downwards(bprm->vma, pos);
170 if (ret < 0)
171 return NULL;
172 }
173 #endif
174 ret = get_user_pages(current, bprm->mm, pos,
175 1, write, 1, &page, NULL);
176 if (ret <= 0)
177 return NULL;
178
179 if (write) {
180 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
181 struct rlimit *rlim;
182
183 /*
184 * We've historically supported up to 32 pages (ARG_MAX)
185 * of argument strings even with small stacks
186 */
187 if (size <= ARG_MAX)
188 return page;
189
190 /*
191 * Limit to 1/4-th the stack size for the argv+env strings.
192 * This ensures that:
193 * - the remaining binfmt code will not run out of stack space,
194 * - the program will have a reasonable amount of stack left
195 * to work from.
196 */
197 rlim = current->signal->rlim;
198 if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
199 put_page(page);
200 return NULL;
201 }
202 }
203
204 return page;
205 }
206
207 static void put_arg_page(struct page *page)
208 {
209 put_page(page);
210 }
211
212 static void free_arg_page(struct linux_binprm *bprm, int i)
213 {
214 }
215
216 static void free_arg_pages(struct linux_binprm *bprm)
217 {
218 }
219
220 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
221 struct page *page)
222 {
223 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
224 }
225
226 static int __bprm_mm_init(struct linux_binprm *bprm)
227 {
228 int err;
229 struct vm_area_struct *vma = NULL;
230 struct mm_struct *mm = bprm->mm;
231
232 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
233 if (!vma)
234 return -ENOMEM;
235
236 down_write(&mm->mmap_sem);
237 vma->vm_mm = mm;
238
239 /*
240 * Place the stack at the largest stack address the architecture
241 * supports. Later, we'll move this to an appropriate place. We don't
242 * use STACK_TOP because that can depend on attributes which aren't
243 * configured yet.
244 */
245 vma->vm_end = STACK_TOP_MAX;
246 vma->vm_start = vma->vm_end - PAGE_SIZE;
247 vma->vm_flags = VM_STACK_FLAGS;
248 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
249 err = insert_vm_struct(mm, vma);
250 if (err)
251 goto err;
252
253 mm->stack_vm = mm->total_vm = 1;
254 up_write(&mm->mmap_sem);
255 bprm->p = vma->vm_end - sizeof(void *);
256 return 0;
257 err:
258 up_write(&mm->mmap_sem);
259 bprm->vma = NULL;
260 kmem_cache_free(vm_area_cachep, vma);
261 return err;
262 }
263
264 static bool valid_arg_len(struct linux_binprm *bprm, long len)
265 {
266 return len <= MAX_ARG_STRLEN;
267 }
268
269 #else
270
271 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
272 int write)
273 {
274 struct page *page;
275
276 page = bprm->page[pos / PAGE_SIZE];
277 if (!page && write) {
278 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
279 if (!page)
280 return NULL;
281 bprm->page[pos / PAGE_SIZE] = page;
282 }
283
284 return page;
285 }
286
287 static void put_arg_page(struct page *page)
288 {
289 }
290
291 static void free_arg_page(struct linux_binprm *bprm, int i)
292 {
293 if (bprm->page[i]) {
294 __free_page(bprm->page[i]);
295 bprm->page[i] = NULL;
296 }
297 }
298
299 static void free_arg_pages(struct linux_binprm *bprm)
300 {
301 int i;
302
303 for (i = 0; i < MAX_ARG_PAGES; i++)
304 free_arg_page(bprm, i);
305 }
306
307 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
308 struct page *page)
309 {
310 }
311
312 static int __bprm_mm_init(struct linux_binprm *bprm)
313 {
314 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
315 return 0;
316 }
317
318 static bool valid_arg_len(struct linux_binprm *bprm, long len)
319 {
320 return len <= bprm->p;
321 }
322
323 #endif /* CONFIG_MMU */
324
325 /*
326 * Create a new mm_struct and populate it with a temporary stack
327 * vm_area_struct. We don't have enough context at this point to set the stack
328 * flags, permissions, and offset, so we use temporary values. We'll update
329 * them later in setup_arg_pages().
330 */
331 int bprm_mm_init(struct linux_binprm *bprm)
332 {
333 int err;
334 struct mm_struct *mm = NULL;
335
336 bprm->mm = mm = mm_alloc();
337 err = -ENOMEM;
338 if (!mm)
339 goto err;
340
341 err = init_new_context(current, mm);
342 if (err)
343 goto err;
344
345 err = __bprm_mm_init(bprm);
346 if (err)
347 goto err;
348
349 return 0;
350
351 err:
352 if (mm) {
353 bprm->mm = NULL;
354 mmdrop(mm);
355 }
356
357 return err;
358 }
359
360 /*
361 * count() counts the number of strings in array ARGV.
362 */
363 static int count(char __user * __user * argv, int max)
364 {
365 int i = 0;
366
367 if (argv != NULL) {
368 for (;;) {
369 char __user * p;
370
371 if (get_user(p, argv))
372 return -EFAULT;
373 if (!p)
374 break;
375 argv++;
376 if (i++ >= max)
377 return -E2BIG;
378 cond_resched();
379 }
380 }
381 return i;
382 }
383
384 /*
385 * 'copy_strings()' copies argument/environment strings from the old
386 * processes's memory to the new process's stack. The call to get_user_pages()
387 * ensures the destination page is created and not swapped out.
388 */
389 static int copy_strings(int argc, char __user * __user * argv,
390 struct linux_binprm *bprm)
391 {
392 struct page *kmapped_page = NULL;
393 char *kaddr = NULL;
394 unsigned long kpos = 0;
395 int ret;
396
397 while (argc-- > 0) {
398 char __user *str;
399 int len;
400 unsigned long pos;
401
402 if (get_user(str, argv+argc) ||
403 !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
404 ret = -EFAULT;
405 goto out;
406 }
407
408 if (!valid_arg_len(bprm, len)) {
409 ret = -E2BIG;
410 goto out;
411 }
412
413 /* We're going to work our way backwords. */
414 pos = bprm->p;
415 str += len;
416 bprm->p -= len;
417
418 while (len > 0) {
419 int offset, bytes_to_copy;
420
421 offset = pos % PAGE_SIZE;
422 if (offset == 0)
423 offset = PAGE_SIZE;
424
425 bytes_to_copy = offset;
426 if (bytes_to_copy > len)
427 bytes_to_copy = len;
428
429 offset -= bytes_to_copy;
430 pos -= bytes_to_copy;
431 str -= bytes_to_copy;
432 len -= bytes_to_copy;
433
434 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
435 struct page *page;
436
437 page = get_arg_page(bprm, pos, 1);
438 if (!page) {
439 ret = -E2BIG;
440 goto out;
441 }
442
443 if (kmapped_page) {
444 flush_kernel_dcache_page(kmapped_page);
445 kunmap(kmapped_page);
446 put_arg_page(kmapped_page);
447 }
448 kmapped_page = page;
449 kaddr = kmap(kmapped_page);
450 kpos = pos & PAGE_MASK;
451 flush_arg_page(bprm, kpos, kmapped_page);
452 }
453 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
454 ret = -EFAULT;
455 goto out;
456 }
457 }
458 }
459 ret = 0;
460 out:
461 if (kmapped_page) {
462 flush_kernel_dcache_page(kmapped_page);
463 kunmap(kmapped_page);
464 put_arg_page(kmapped_page);
465 }
466 return ret;
467 }
468
469 /*
470 * Like copy_strings, but get argv and its values from kernel memory.
471 */
472 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
473 {
474 int r;
475 mm_segment_t oldfs = get_fs();
476 set_fs(KERNEL_DS);
477 r = copy_strings(argc, (char __user * __user *)argv, bprm);
478 set_fs(oldfs);
479 return r;
480 }
481 EXPORT_SYMBOL(copy_strings_kernel);
482
483 #ifdef CONFIG_MMU
484
485 /*
486 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
487 * the binfmt code determines where the new stack should reside, we shift it to
488 * its final location. The process proceeds as follows:
489 *
490 * 1) Use shift to calculate the new vma endpoints.
491 * 2) Extend vma to cover both the old and new ranges. This ensures the
492 * arguments passed to subsequent functions are consistent.
493 * 3) Move vma's page tables to the new range.
494 * 4) Free up any cleared pgd range.
495 * 5) Shrink the vma to cover only the new range.
496 */
497 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
498 {
499 struct mm_struct *mm = vma->vm_mm;
500 unsigned long old_start = vma->vm_start;
501 unsigned long old_end = vma->vm_end;
502 unsigned long length = old_end - old_start;
503 unsigned long new_start = old_start - shift;
504 unsigned long new_end = old_end - shift;
505 struct mmu_gather *tlb;
506
507 BUG_ON(new_start > new_end);
508
509 /*
510 * ensure there are no vmas between where we want to go
511 * and where we are
512 */
513 if (vma != find_vma(mm, new_start))
514 return -EFAULT;
515
516 /*
517 * cover the whole range: [new_start, old_end)
518 */
519 vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
520
521 /*
522 * move the page tables downwards, on failure we rely on
523 * process cleanup to remove whatever mess we made.
524 */
525 if (length != move_page_tables(vma, old_start,
526 vma, new_start, length))
527 return -ENOMEM;
528
529 lru_add_drain();
530 tlb = tlb_gather_mmu(mm, 0);
531 if (new_end > old_start) {
532 /*
533 * when the old and new regions overlap clear from new_end.
534 */
535 free_pgd_range(tlb, new_end, old_end, new_end,
536 vma->vm_next ? vma->vm_next->vm_start : 0);
537 } else {
538 /*
539 * otherwise, clean from old_start; this is done to not touch
540 * the address space in [new_end, old_start) some architectures
541 * have constraints on va-space that make this illegal (IA64) -
542 * for the others its just a little faster.
543 */
544 free_pgd_range(tlb, old_start, old_end, new_end,
545 vma->vm_next ? vma->vm_next->vm_start : 0);
546 }
547 tlb_finish_mmu(tlb, new_end, old_end);
548
549 /*
550 * shrink the vma to just the new range.
551 */
552 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
553
554 return 0;
555 }
556
557 #define EXTRA_STACK_VM_PAGES 20 /* random */
558
559 /*
560 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
561 * the stack is optionally relocated, and some extra space is added.
562 */
563 int setup_arg_pages(struct linux_binprm *bprm,
564 unsigned long stack_top,
565 int executable_stack)
566 {
567 unsigned long ret;
568 unsigned long stack_shift;
569 struct mm_struct *mm = current->mm;
570 struct vm_area_struct *vma = bprm->vma;
571 struct vm_area_struct *prev = NULL;
572 unsigned long vm_flags;
573 unsigned long stack_base;
574
575 #ifdef CONFIG_STACK_GROWSUP
576 /* Limit stack size to 1GB */
577 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
578 if (stack_base > (1 << 30))
579 stack_base = 1 << 30;
580
581 /* Make sure we didn't let the argument array grow too large. */
582 if (vma->vm_end - vma->vm_start > stack_base)
583 return -ENOMEM;
584
585 stack_base = PAGE_ALIGN(stack_top - stack_base);
586
587 stack_shift = vma->vm_start - stack_base;
588 mm->arg_start = bprm->p - stack_shift;
589 bprm->p = vma->vm_end - stack_shift;
590 #else
591 stack_top = arch_align_stack(stack_top);
592 stack_top = PAGE_ALIGN(stack_top);
593 stack_shift = vma->vm_end - stack_top;
594
595 bprm->p -= stack_shift;
596 mm->arg_start = bprm->p;
597 #endif
598
599 if (bprm->loader)
600 bprm->loader -= stack_shift;
601 bprm->exec -= stack_shift;
602
603 down_write(&mm->mmap_sem);
604 vm_flags = VM_STACK_FLAGS;
605
606 /*
607 * Adjust stack execute permissions; explicitly enable for
608 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
609 * (arch default) otherwise.
610 */
611 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
612 vm_flags |= VM_EXEC;
613 else if (executable_stack == EXSTACK_DISABLE_X)
614 vm_flags &= ~VM_EXEC;
615 vm_flags |= mm->def_flags;
616
617 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
618 vm_flags);
619 if (ret)
620 goto out_unlock;
621 BUG_ON(prev != vma);
622
623 /* Move stack pages down in memory. */
624 if (stack_shift) {
625 ret = shift_arg_pages(vma, stack_shift);
626 if (ret)
627 goto out_unlock;
628 }
629
630 #ifdef CONFIG_STACK_GROWSUP
631 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
632 #else
633 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
634 #endif
635 ret = expand_stack(vma, stack_base);
636 if (ret)
637 ret = -EFAULT;
638
639 out_unlock:
640 up_write(&mm->mmap_sem);
641 return ret;
642 }
643 EXPORT_SYMBOL(setup_arg_pages);
644
645 #endif /* CONFIG_MMU */
646
647 struct file *open_exec(const char *name)
648 {
649 struct file *file;
650 int err;
651
652 file = do_filp_open(AT_FDCWD, name,
653 O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
654 MAY_EXEC | MAY_OPEN);
655 if (IS_ERR(file))
656 goto out;
657
658 err = -EACCES;
659 if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
660 goto exit;
661
662 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
663 goto exit;
664
665 fsnotify_open(file->f_path.dentry);
666
667 err = deny_write_access(file);
668 if (err)
669 goto exit;
670
671 out:
672 return file;
673
674 exit:
675 fput(file);
676 return ERR_PTR(err);
677 }
678 EXPORT_SYMBOL(open_exec);
679
680 int kernel_read(struct file *file, loff_t offset,
681 char *addr, unsigned long count)
682 {
683 mm_segment_t old_fs;
684 loff_t pos = offset;
685 int result;
686
687 old_fs = get_fs();
688 set_fs(get_ds());
689 /* The cast to a user pointer is valid due to the set_fs() */
690 result = vfs_read(file, (void __user *)addr, count, &pos);
691 set_fs(old_fs);
692 return result;
693 }
694
695 EXPORT_SYMBOL(kernel_read);
696
697 static int exec_mmap(struct mm_struct *mm)
698 {
699 struct task_struct *tsk;
700 struct mm_struct * old_mm, *active_mm;
701
702 /* Notify parent that we're no longer interested in the old VM */
703 tsk = current;
704 old_mm = current->mm;
705 mm_release(tsk, old_mm);
706
707 if (old_mm) {
708 /*
709 * Make sure that if there is a core dump in progress
710 * for the old mm, we get out and die instead of going
711 * through with the exec. We must hold mmap_sem around
712 * checking core_state and changing tsk->mm.
713 */
714 down_read(&old_mm->mmap_sem);
715 if (unlikely(old_mm->core_state)) {
716 up_read(&old_mm->mmap_sem);
717 return -EINTR;
718 }
719 }
720 task_lock(tsk);
721 active_mm = tsk->active_mm;
722 tsk->mm = mm;
723 tsk->active_mm = mm;
724 activate_mm(active_mm, mm);
725 task_unlock(tsk);
726 arch_pick_mmap_layout(mm);
727 if (old_mm) {
728 up_read(&old_mm->mmap_sem);
729 BUG_ON(active_mm != old_mm);
730 mm_update_next_owner(old_mm);
731 mmput(old_mm);
732 return 0;
733 }
734 mmdrop(active_mm);
735 return 0;
736 }
737
738 /*
739 * This function makes sure the current process has its own signal table,
740 * so that flush_signal_handlers can later reset the handlers without
741 * disturbing other processes. (Other processes might share the signal
742 * table via the CLONE_SIGHAND option to clone().)
743 */
744 static int de_thread(struct task_struct *tsk)
745 {
746 struct signal_struct *sig = tsk->signal;
747 struct sighand_struct *oldsighand = tsk->sighand;
748 spinlock_t *lock = &oldsighand->siglock;
749 int count;
750
751 if (thread_group_empty(tsk))
752 goto no_thread_group;
753
754 /*
755 * Kill all other threads in the thread group.
756 */
757 spin_lock_irq(lock);
758 if (signal_group_exit(sig)) {
759 /*
760 * Another group action in progress, just
761 * return so that the signal is processed.
762 */
763 spin_unlock_irq(lock);
764 return -EAGAIN;
765 }
766 sig->group_exit_task = tsk;
767 zap_other_threads(tsk);
768
769 /* Account for the thread group leader hanging around: */
770 count = thread_group_leader(tsk) ? 1 : 2;
771 sig->notify_count = count;
772 while (atomic_read(&sig->count) > count) {
773 __set_current_state(TASK_UNINTERRUPTIBLE);
774 spin_unlock_irq(lock);
775 schedule();
776 spin_lock_irq(lock);
777 }
778 spin_unlock_irq(lock);
779
780 /*
781 * At this point all other threads have exited, all we have to
782 * do is to wait for the thread group leader to become inactive,
783 * and to assume its PID:
784 */
785 if (!thread_group_leader(tsk)) {
786 struct task_struct *leader = tsk->group_leader;
787
788 sig->notify_count = -1; /* for exit_notify() */
789 for (;;) {
790 write_lock_irq(&tasklist_lock);
791 if (likely(leader->exit_state))
792 break;
793 __set_current_state(TASK_UNINTERRUPTIBLE);
794 write_unlock_irq(&tasklist_lock);
795 schedule();
796 }
797
798 /*
799 * The only record we have of the real-time age of a
800 * process, regardless of execs it's done, is start_time.
801 * All the past CPU time is accumulated in signal_struct
802 * from sister threads now dead. But in this non-leader
803 * exec, nothing survives from the original leader thread,
804 * whose birth marks the true age of this process now.
805 * When we take on its identity by switching to its PID, we
806 * also take its birthdate (always earlier than our own).
807 */
808 tsk->start_time = leader->start_time;
809
810 BUG_ON(!same_thread_group(leader, tsk));
811 BUG_ON(has_group_leader_pid(tsk));
812 /*
813 * An exec() starts a new thread group with the
814 * TGID of the previous thread group. Rehash the
815 * two threads with a switched PID, and release
816 * the former thread group leader:
817 */
818
819 /* Become a process group leader with the old leader's pid.
820 * The old leader becomes a thread of the this thread group.
821 * Note: The old leader also uses this pid until release_task
822 * is called. Odd but simple and correct.
823 */
824 detach_pid(tsk, PIDTYPE_PID);
825 tsk->pid = leader->pid;
826 attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
827 transfer_pid(leader, tsk, PIDTYPE_PGID);
828 transfer_pid(leader, tsk, PIDTYPE_SID);
829 list_replace_rcu(&leader->tasks, &tsk->tasks);
830
831 tsk->group_leader = tsk;
832 leader->group_leader = tsk;
833
834 tsk->exit_signal = SIGCHLD;
835
836 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
837 leader->exit_state = EXIT_DEAD;
838 write_unlock_irq(&tasklist_lock);
839
840 release_task(leader);
841 }
842
843 sig->group_exit_task = NULL;
844 sig->notify_count = 0;
845
846 no_thread_group:
847 if (current->mm)
848 setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
849
850 exit_itimers(sig);
851 flush_itimer_signals();
852
853 if (atomic_read(&oldsighand->count) != 1) {
854 struct sighand_struct *newsighand;
855 /*
856 * This ->sighand is shared with the CLONE_SIGHAND
857 * but not CLONE_THREAD task, switch to the new one.
858 */
859 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
860 if (!newsighand)
861 return -ENOMEM;
862
863 atomic_set(&newsighand->count, 1);
864 memcpy(newsighand->action, oldsighand->action,
865 sizeof(newsighand->action));
866
867 write_lock_irq(&tasklist_lock);
868 spin_lock(&oldsighand->siglock);
869 rcu_assign_pointer(tsk->sighand, newsighand);
870 spin_unlock(&oldsighand->siglock);
871 write_unlock_irq(&tasklist_lock);
872
873 __cleanup_sighand(oldsighand);
874 }
875
876 BUG_ON(!thread_group_leader(tsk));
877 return 0;
878 }
879
880 /*
881 * These functions flushes out all traces of the currently running executable
882 * so that a new one can be started
883 */
884 static void flush_old_files(struct files_struct * files)
885 {
886 long j = -1;
887 struct fdtable *fdt;
888
889 spin_lock(&files->file_lock);
890 for (;;) {
891 unsigned long set, i;
892
893 j++;
894 i = j * __NFDBITS;
895 fdt = files_fdtable(files);
896 if (i >= fdt->max_fds)
897 break;
898 set = fdt->close_on_exec->fds_bits[j];
899 if (!set)
900 continue;
901 fdt->close_on_exec->fds_bits[j] = 0;
902 spin_unlock(&files->file_lock);
903 for ( ; set ; i++,set >>= 1) {
904 if (set & 1) {
905 sys_close(i);
906 }
907 }
908 spin_lock(&files->file_lock);
909
910 }
911 spin_unlock(&files->file_lock);
912 }
913
914 char *get_task_comm(char *buf, struct task_struct *tsk)
915 {
916 /* buf must be at least sizeof(tsk->comm) in size */
917 task_lock(tsk);
918 strncpy(buf, tsk->comm, sizeof(tsk->comm));
919 task_unlock(tsk);
920 return buf;
921 }
922
923 void set_task_comm(struct task_struct *tsk, char *buf)
924 {
925 task_lock(tsk);
926
927 /*
928 * Threads may access current->comm without holding
929 * the task lock, so write the string carefully.
930 * Readers without a lock may see incomplete new
931 * names but are safe from non-terminating string reads.
932 */
933 memset(tsk->comm, 0, TASK_COMM_LEN);
934 wmb();
935 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
936 task_unlock(tsk);
937 perf_event_comm(tsk);
938 }
939
940 int flush_old_exec(struct linux_binprm * bprm)
941 {
942 char * name;
943 int i, ch, retval;
944 char tcomm[sizeof(current->comm)];
945
946 /*
947 * Make sure we have a private signal table and that
948 * we are unassociated from the previous thread group.
949 */
950 retval = de_thread(current);
951 if (retval)
952 goto out;
953
954 set_mm_exe_file(bprm->mm, bprm->file);
955
956 /*
957 * Release all of the old mmap stuff
958 */
959 retval = exec_mmap(bprm->mm);
960 if (retval)
961 goto out;
962
963 bprm->mm = NULL; /* We're using it now */
964
965 /* This is the point of no return */
966 current->sas_ss_sp = current->sas_ss_size = 0;
967
968 if (current_euid() == current_uid() && current_egid() == current_gid())
969 set_dumpable(current->mm, 1);
970 else
971 set_dumpable(current->mm, suid_dumpable);
972
973 name = bprm->filename;
974
975 /* Copies the binary name from after last slash */
976 for (i=0; (ch = *(name++)) != '\0';) {
977 if (ch == '/')
978 i = 0; /* overwrite what we wrote */
979 else
980 if (i < (sizeof(tcomm) - 1))
981 tcomm[i++] = ch;
982 }
983 tcomm[i] = '\0';
984 set_task_comm(current, tcomm);
985
986 current->flags &= ~PF_RANDOMIZE;
987 flush_thread();
988
989 /* Set the new mm task size. We have to do that late because it may
990 * depend on TIF_32BIT which is only updated in flush_thread() on
991 * some architectures like powerpc
992 */
993 current->mm->task_size = TASK_SIZE;
994
995 /* install the new credentials */
996 if (bprm->cred->uid != current_euid() ||
997 bprm->cred->gid != current_egid()) {
998 current->pdeath_signal = 0;
999 } else if (file_permission(bprm->file, MAY_READ) ||
1000 bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) {
1001 set_dumpable(current->mm, suid_dumpable);
1002 }
1003
1004 current->personality &= ~bprm->per_clear;
1005
1006 /*
1007 * Flush performance counters when crossing a
1008 * security domain:
1009 */
1010 if (!get_dumpable(current->mm))
1011 perf_event_exit_task(current);
1012
1013 /* An exec changes our domain. We are no longer part of the thread
1014 group */
1015
1016 current->self_exec_id++;
1017
1018 flush_signal_handlers(current, 0);
1019 flush_old_files(current->files);
1020
1021 return 0;
1022
1023 out:
1024 return retval;
1025 }
1026
1027 EXPORT_SYMBOL(flush_old_exec);
1028
1029 /*
1030 * Prepare credentials and lock ->cred_guard_mutex.
1031 * install_exec_creds() commits the new creds and drops the lock.
1032 * Or, if exec fails before, free_bprm() should release ->cred and
1033 * and unlock.
1034 */
1035 int prepare_bprm_creds(struct linux_binprm *bprm)
1036 {
1037 if (mutex_lock_interruptible(&current->cred_guard_mutex))
1038 return -ERESTARTNOINTR;
1039
1040 bprm->cred = prepare_exec_creds();
1041 if (likely(bprm->cred))
1042 return 0;
1043
1044 mutex_unlock(&current->cred_guard_mutex);
1045 return -ENOMEM;
1046 }
1047
1048 void free_bprm(struct linux_binprm *bprm)
1049 {
1050 free_arg_pages(bprm);
1051 if (bprm->cred) {
1052 mutex_unlock(&current->cred_guard_mutex);
1053 abort_creds(bprm->cred);
1054 }
1055 kfree(bprm);
1056 }
1057
1058 /*
1059 * install the new credentials for this executable
1060 */
1061 void install_exec_creds(struct linux_binprm *bprm)
1062 {
1063 security_bprm_committing_creds(bprm);
1064
1065 commit_creds(bprm->cred);
1066 bprm->cred = NULL;
1067 /*
1068 * cred_guard_mutex must be held at least to this point to prevent
1069 * ptrace_attach() from altering our determination of the task's
1070 * credentials; any time after this it may be unlocked.
1071 */
1072 security_bprm_committed_creds(bprm);
1073 mutex_unlock(&current->cred_guard_mutex);
1074 }
1075 EXPORT_SYMBOL(install_exec_creds);
1076
1077 /*
1078 * determine how safe it is to execute the proposed program
1079 * - the caller must hold current->cred_guard_mutex to protect against
1080 * PTRACE_ATTACH
1081 */
1082 int check_unsafe_exec(struct linux_binprm *bprm)
1083 {
1084 struct task_struct *p = current, *t;
1085 unsigned n_fs;
1086 int res = 0;
1087
1088 bprm->unsafe = tracehook_unsafe_exec(p);
1089
1090 n_fs = 1;
1091 write_lock(&p->fs->lock);
1092 rcu_read_lock();
1093 for (t = next_thread(p); t != p; t = next_thread(t)) {
1094 if (t->fs == p->fs)
1095 n_fs++;
1096 }
1097 rcu_read_unlock();
1098
1099 if (p->fs->users > n_fs) {
1100 bprm->unsafe |= LSM_UNSAFE_SHARE;
1101 } else {
1102 res = -EAGAIN;
1103 if (!p->fs->in_exec) {
1104 p->fs->in_exec = 1;
1105 res = 1;
1106 }
1107 }
1108 write_unlock(&p->fs->lock);
1109
1110 return res;
1111 }
1112
1113 /*
1114 * Fill the binprm structure from the inode.
1115 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1116 *
1117 * This may be called multiple times for binary chains (scripts for example).
1118 */
1119 int prepare_binprm(struct linux_binprm *bprm)
1120 {
1121 umode_t mode;
1122 struct inode * inode = bprm->file->f_path.dentry->d_inode;
1123 int retval;
1124
1125 mode = inode->i_mode;
1126 if (bprm->file->f_op == NULL)
1127 return -EACCES;
1128
1129 /* clear any previous set[ug]id data from a previous binary */
1130 bprm->cred->euid = current_euid();
1131 bprm->cred->egid = current_egid();
1132
1133 if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
1134 /* Set-uid? */
1135 if (mode & S_ISUID) {
1136 bprm->per_clear |= PER_CLEAR_ON_SETID;
1137 bprm->cred->euid = inode->i_uid;
1138 }
1139
1140 /* Set-gid? */
1141 /*
1142 * If setgid is set but no group execute bit then this
1143 * is a candidate for mandatory locking, not a setgid
1144 * executable.
1145 */
1146 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1147 bprm->per_clear |= PER_CLEAR_ON_SETID;
1148 bprm->cred->egid = inode->i_gid;
1149 }
1150 }
1151
1152 /* fill in binprm security blob */
1153 retval = security_bprm_set_creds(bprm);
1154 if (retval)
1155 return retval;
1156 bprm->cred_prepared = 1;
1157
1158 memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1159 return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1160 }
1161
1162 EXPORT_SYMBOL(prepare_binprm);
1163
1164 /*
1165 * Arguments are '\0' separated strings found at the location bprm->p
1166 * points to; chop off the first by relocating brpm->p to right after
1167 * the first '\0' encountered.
1168 */
1169 int remove_arg_zero(struct linux_binprm *bprm)
1170 {
1171 int ret = 0;
1172 unsigned long offset;
1173 char *kaddr;
1174 struct page *page;
1175
1176 if (!bprm->argc)
1177 return 0;
1178
1179 do {
1180 offset = bprm->p & ~PAGE_MASK;
1181 page = get_arg_page(bprm, bprm->p, 0);
1182 if (!page) {
1183 ret = -EFAULT;
1184 goto out;
1185 }
1186 kaddr = kmap_atomic(page, KM_USER0);
1187
1188 for (; offset < PAGE_SIZE && kaddr[offset];
1189 offset++, bprm->p++)
1190 ;
1191
1192 kunmap_atomic(kaddr, KM_USER0);
1193 put_arg_page(page);
1194
1195 if (offset == PAGE_SIZE)
1196 free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1197 } while (offset == PAGE_SIZE);
1198
1199 bprm->p++;
1200 bprm->argc--;
1201 ret = 0;
1202
1203 out:
1204 return ret;
1205 }
1206 EXPORT_SYMBOL(remove_arg_zero);
1207
1208 /*
1209 * cycle the list of binary formats handler, until one recognizes the image
1210 */
1211 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1212 {
1213 unsigned int depth = bprm->recursion_depth;
1214 int try,retval;
1215 struct linux_binfmt *fmt;
1216
1217 retval = security_bprm_check(bprm);
1218 if (retval)
1219 return retval;
1220
1221 /* kernel module loader fixup */
1222 /* so we don't try to load run modprobe in kernel space. */
1223 set_fs(USER_DS);
1224
1225 retval = audit_bprm(bprm);
1226 if (retval)
1227 return retval;
1228
1229 retval = -ENOENT;
1230 for (try=0; try<2; try++) {
1231 read_lock(&binfmt_lock);
1232 list_for_each_entry(fmt, &formats, lh) {
1233 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1234 if (!fn)
1235 continue;
1236 if (!try_module_get(fmt->module))
1237 continue;
1238 read_unlock(&binfmt_lock);
1239 retval = fn(bprm, regs);
1240 /*
1241 * Restore the depth counter to its starting value
1242 * in this call, so we don't have to rely on every
1243 * load_binary function to restore it on return.
1244 */
1245 bprm->recursion_depth = depth;
1246 if (retval >= 0) {
1247 if (depth == 0)
1248 tracehook_report_exec(fmt, bprm, regs);
1249 put_binfmt(fmt);
1250 allow_write_access(bprm->file);
1251 if (bprm->file)
1252 fput(bprm->file);
1253 bprm->file = NULL;
1254 current->did_exec = 1;
1255 proc_exec_connector(current);
1256 return retval;
1257 }
1258 read_lock(&binfmt_lock);
1259 put_binfmt(fmt);
1260 if (retval != -ENOEXEC || bprm->mm == NULL)
1261 break;
1262 if (!bprm->file) {
1263 read_unlock(&binfmt_lock);
1264 return retval;
1265 }
1266 }
1267 read_unlock(&binfmt_lock);
1268 if (retval != -ENOEXEC || bprm->mm == NULL) {
1269 break;
1270 #ifdef CONFIG_MODULES
1271 } else {
1272 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1273 if (printable(bprm->buf[0]) &&
1274 printable(bprm->buf[1]) &&
1275 printable(bprm->buf[2]) &&
1276 printable(bprm->buf[3]))
1277 break; /* -ENOEXEC */
1278 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1279 #endif
1280 }
1281 }
1282 return retval;
1283 }
1284
1285 EXPORT_SYMBOL(search_binary_handler);
1286
1287 /*
1288 * sys_execve() executes a new program.
1289 */
1290 int do_execve(char * filename,
1291 char __user *__user *argv,
1292 char __user *__user *envp,
1293 struct pt_regs * regs)
1294 {
1295 struct linux_binprm *bprm;
1296 struct file *file;
1297 struct files_struct *displaced;
1298 bool clear_in_exec;
1299 int retval;
1300
1301 retval = unshare_files(&displaced);
1302 if (retval)
1303 goto out_ret;
1304
1305 retval = -ENOMEM;
1306 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1307 if (!bprm)
1308 goto out_files;
1309
1310 retval = prepare_bprm_creds(bprm);
1311 if (retval)
1312 goto out_free;
1313
1314 retval = check_unsafe_exec(bprm);
1315 if (retval < 0)
1316 goto out_free;
1317 clear_in_exec = retval;
1318 current->in_execve = 1;
1319
1320 file = open_exec(filename);
1321 retval = PTR_ERR(file);
1322 if (IS_ERR(file))
1323 goto out_unmark;
1324
1325 sched_exec();
1326
1327 bprm->file = file;
1328 bprm->filename = filename;
1329 bprm->interp = filename;
1330
1331 retval = bprm_mm_init(bprm);
1332 if (retval)
1333 goto out_file;
1334
1335 bprm->argc = count(argv, MAX_ARG_STRINGS);
1336 if ((retval = bprm->argc) < 0)
1337 goto out;
1338
1339 bprm->envc = count(envp, MAX_ARG_STRINGS);
1340 if ((retval = bprm->envc) < 0)
1341 goto out;
1342
1343 retval = prepare_binprm(bprm);
1344 if (retval < 0)
1345 goto out;
1346
1347 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1348 if (retval < 0)
1349 goto out;
1350
1351 bprm->exec = bprm->p;
1352 retval = copy_strings(bprm->envc, envp, bprm);
1353 if (retval < 0)
1354 goto out;
1355
1356 retval = copy_strings(bprm->argc, argv, bprm);
1357 if (retval < 0)
1358 goto out;
1359
1360 current->flags &= ~PF_KTHREAD;
1361 retval = search_binary_handler(bprm,regs);
1362 if (retval < 0)
1363 goto out;
1364
1365 current->stack_start = current->mm->start_stack;
1366
1367 /* execve succeeded */
1368 current->fs->in_exec = 0;
1369 current->in_execve = 0;
1370 acct_update_integrals(current);
1371 free_bprm(bprm);
1372 if (displaced)
1373 put_files_struct(displaced);
1374 return retval;
1375
1376 out:
1377 if (bprm->mm)
1378 mmput (bprm->mm);
1379
1380 out_file:
1381 if (bprm->file) {
1382 allow_write_access(bprm->file);
1383 fput(bprm->file);
1384 }
1385
1386 out_unmark:
1387 if (clear_in_exec)
1388 current->fs->in_exec = 0;
1389 current->in_execve = 0;
1390
1391 out_free:
1392 free_bprm(bprm);
1393
1394 out_files:
1395 if (displaced)
1396 reset_files_struct(displaced);
1397 out_ret:
1398 return retval;
1399 }
1400
1401 void set_binfmt(struct linux_binfmt *new)
1402 {
1403 struct mm_struct *mm = current->mm;
1404
1405 if (mm->binfmt)
1406 module_put(mm->binfmt->module);
1407
1408 mm->binfmt = new;
1409 if (new)
1410 __module_get(new->module);
1411 }
1412
1413 EXPORT_SYMBOL(set_binfmt);
1414
1415 /* format_corename will inspect the pattern parameter, and output a
1416 * name into corename, which must have space for at least
1417 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1418 */
1419 static int format_corename(char *corename, long signr)
1420 {
1421 const struct cred *cred = current_cred();
1422 const char *pat_ptr = core_pattern;
1423 int ispipe = (*pat_ptr == '|');
1424 char *out_ptr = corename;
1425 char *const out_end = corename + CORENAME_MAX_SIZE;
1426 int rc;
1427 int pid_in_pattern = 0;
1428
1429 /* Repeat as long as we have more pattern to process and more output
1430 space */
1431 while (*pat_ptr) {
1432 if (*pat_ptr != '%') {
1433 if (out_ptr == out_end)
1434 goto out;
1435 *out_ptr++ = *pat_ptr++;
1436 } else {
1437 switch (*++pat_ptr) {
1438 case 0:
1439 goto out;
1440 /* Double percent, output one percent */
1441 case '%':
1442 if (out_ptr == out_end)
1443 goto out;
1444 *out_ptr++ = '%';
1445 break;
1446 /* pid */
1447 case 'p':
1448 pid_in_pattern = 1;
1449 rc = snprintf(out_ptr, out_end - out_ptr,
1450 "%d", task_tgid_vnr(current));
1451 if (rc > out_end - out_ptr)
1452 goto out;
1453 out_ptr += rc;
1454 break;
1455 /* uid */
1456 case 'u':
1457 rc = snprintf(out_ptr, out_end - out_ptr,
1458 "%d", cred->uid);
1459 if (rc > out_end - out_ptr)
1460 goto out;
1461 out_ptr += rc;
1462 break;
1463 /* gid */
1464 case 'g':
1465 rc = snprintf(out_ptr, out_end - out_ptr,
1466 "%d", cred->gid);
1467 if (rc > out_end - out_ptr)
1468 goto out;
1469 out_ptr += rc;
1470 break;
1471 /* signal that caused the coredump */
1472 case 's':
1473 rc = snprintf(out_ptr, out_end - out_ptr,
1474 "%ld", signr);
1475 if (rc > out_end - out_ptr)
1476 goto out;
1477 out_ptr += rc;
1478 break;
1479 /* UNIX time of coredump */
1480 case 't': {
1481 struct timeval tv;
1482 do_gettimeofday(&tv);
1483 rc = snprintf(out_ptr, out_end - out_ptr,
1484 "%lu", tv.tv_sec);
1485 if (rc > out_end - out_ptr)
1486 goto out;
1487 out_ptr += rc;
1488 break;
1489 }
1490 /* hostname */
1491 case 'h':
1492 down_read(&uts_sem);
1493 rc = snprintf(out_ptr, out_end - out_ptr,
1494 "%s", utsname()->nodename);
1495 up_read(&uts_sem);
1496 if (rc > out_end - out_ptr)
1497 goto out;
1498 out_ptr += rc;
1499 break;
1500 /* executable */
1501 case 'e':
1502 rc = snprintf(out_ptr, out_end - out_ptr,
1503 "%s", current->comm);
1504 if (rc > out_end - out_ptr)
1505 goto out;
1506 out_ptr += rc;
1507 break;
1508 /* core limit size */
1509 case 'c':
1510 rc = snprintf(out_ptr, out_end - out_ptr,
1511 "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
1512 if (rc > out_end - out_ptr)
1513 goto out;
1514 out_ptr += rc;
1515 break;
1516 default:
1517 break;
1518 }
1519 ++pat_ptr;
1520 }
1521 }
1522 /* Backward compatibility with core_uses_pid:
1523 *
1524 * If core_pattern does not include a %p (as is the default)
1525 * and core_uses_pid is set, then .%pid will be appended to
1526 * the filename. Do not do this for piped commands. */
1527 if (!ispipe && !pid_in_pattern && core_uses_pid) {
1528 rc = snprintf(out_ptr, out_end - out_ptr,
1529 ".%d", task_tgid_vnr(current));
1530 if (rc > out_end - out_ptr)
1531 goto out;
1532 out_ptr += rc;
1533 }
1534 out:
1535 *out_ptr = 0;
1536 return ispipe;
1537 }
1538
1539 static int zap_process(struct task_struct *start)
1540 {
1541 struct task_struct *t;
1542 int nr = 0;
1543
1544 start->signal->flags = SIGNAL_GROUP_EXIT;
1545 start->signal->group_stop_count = 0;
1546
1547 t = start;
1548 do {
1549 if (t != current && t->mm) {
1550 sigaddset(&t->pending.signal, SIGKILL);
1551 signal_wake_up(t, 1);
1552 nr++;
1553 }
1554 } while_each_thread(start, t);
1555
1556 return nr;
1557 }
1558
1559 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1560 struct core_state *core_state, int exit_code)
1561 {
1562 struct task_struct *g, *p;
1563 unsigned long flags;
1564 int nr = -EAGAIN;
1565
1566 spin_lock_irq(&tsk->sighand->siglock);
1567 if (!signal_group_exit(tsk->signal)) {
1568 mm->core_state = core_state;
1569 tsk->signal->group_exit_code = exit_code;
1570 nr = zap_process(tsk);
1571 }
1572 spin_unlock_irq(&tsk->sighand->siglock);
1573 if (unlikely(nr < 0))
1574 return nr;
1575
1576 if (atomic_read(&mm->mm_users) == nr + 1)
1577 goto done;
1578 /*
1579 * We should find and kill all tasks which use this mm, and we should
1580 * count them correctly into ->nr_threads. We don't take tasklist
1581 * lock, but this is safe wrt:
1582 *
1583 * fork:
1584 * None of sub-threads can fork after zap_process(leader). All
1585 * processes which were created before this point should be
1586 * visible to zap_threads() because copy_process() adds the new
1587 * process to the tail of init_task.tasks list, and lock/unlock
1588 * of ->siglock provides a memory barrier.
1589 *
1590 * do_exit:
1591 * The caller holds mm->mmap_sem. This means that the task which
1592 * uses this mm can't pass exit_mm(), so it can't exit or clear
1593 * its ->mm.
1594 *
1595 * de_thread:
1596 * It does list_replace_rcu(&leader->tasks, &current->tasks),
1597 * we must see either old or new leader, this does not matter.
1598 * However, it can change p->sighand, so lock_task_sighand(p)
1599 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1600 * it can't fail.
1601 *
1602 * Note also that "g" can be the old leader with ->mm == NULL
1603 * and already unhashed and thus removed from ->thread_group.
1604 * This is OK, __unhash_process()->list_del_rcu() does not
1605 * clear the ->next pointer, we will find the new leader via
1606 * next_thread().
1607 */
1608 rcu_read_lock();
1609 for_each_process(g) {
1610 if (g == tsk->group_leader)
1611 continue;
1612 if (g->flags & PF_KTHREAD)
1613 continue;
1614 p = g;
1615 do {
1616 if (p->mm) {
1617 if (unlikely(p->mm == mm)) {
1618 lock_task_sighand(p, &flags);
1619 nr += zap_process(p);
1620 unlock_task_sighand(p, &flags);
1621 }
1622 break;
1623 }
1624 } while_each_thread(g, p);
1625 }
1626 rcu_read_unlock();
1627 done:
1628 atomic_set(&core_state->nr_threads, nr);
1629 return nr;
1630 }
1631
1632 static int coredump_wait(int exit_code, struct core_state *core_state)
1633 {
1634 struct task_struct *tsk = current;
1635 struct mm_struct *mm = tsk->mm;
1636 struct completion *vfork_done;
1637 int core_waiters;
1638
1639 init_completion(&core_state->startup);
1640 core_state->dumper.task = tsk;
1641 core_state->dumper.next = NULL;
1642 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1643 up_write(&mm->mmap_sem);
1644
1645 if (unlikely(core_waiters < 0))
1646 goto fail;
1647
1648 /*
1649 * Make sure nobody is waiting for us to release the VM,
1650 * otherwise we can deadlock when we wait on each other
1651 */
1652 vfork_done = tsk->vfork_done;
1653 if (vfork_done) {
1654 tsk->vfork_done = NULL;
1655 complete(vfork_done);
1656 }
1657
1658 if (core_waiters)
1659 wait_for_completion(&core_state->startup);
1660 fail:
1661 return core_waiters;
1662 }
1663
1664 static void coredump_finish(struct mm_struct *mm)
1665 {
1666 struct core_thread *curr, *next;
1667 struct task_struct *task;
1668
1669 next = mm->core_state->dumper.next;
1670 while ((curr = next) != NULL) {
1671 next = curr->next;
1672 task = curr->task;
1673 /*
1674 * see exit_mm(), curr->task must not see
1675 * ->task == NULL before we read ->next.
1676 */
1677 smp_mb();
1678 curr->task = NULL;
1679 wake_up_process(task);
1680 }
1681
1682 mm->core_state = NULL;
1683 }
1684
1685 /*
1686 * set_dumpable converts traditional three-value dumpable to two flags and
1687 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1688 * these bits are not changed atomically. So get_dumpable can observe the
1689 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1690 * return either old dumpable or new one by paying attention to the order of
1691 * modifying the bits.
1692 *
1693 * dumpable | mm->flags (binary)
1694 * old new | initial interim final
1695 * ---------+-----------------------
1696 * 0 1 | 00 01 01
1697 * 0 2 | 00 10(*) 11
1698 * 1 0 | 01 00 00
1699 * 1 2 | 01 11 11
1700 * 2 0 | 11 10(*) 00
1701 * 2 1 | 11 11 01
1702 *
1703 * (*) get_dumpable regards interim value of 10 as 11.
1704 */
1705 void set_dumpable(struct mm_struct *mm, int value)
1706 {
1707 switch (value) {
1708 case 0:
1709 clear_bit(MMF_DUMPABLE, &mm->flags);
1710 smp_wmb();
1711 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1712 break;
1713 case 1:
1714 set_bit(MMF_DUMPABLE, &mm->flags);
1715 smp_wmb();
1716 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1717 break;
1718 case 2:
1719 set_bit(MMF_DUMP_SECURELY, &mm->flags);
1720 smp_wmb();
1721 set_bit(MMF_DUMPABLE, &mm->flags);
1722 break;
1723 }
1724 }
1725
1726 int get_dumpable(struct mm_struct *mm)
1727 {
1728 int ret;
1729
1730 ret = mm->flags & 0x3;
1731 return (ret >= 2) ? 2 : ret;
1732 }
1733
1734 static void wait_for_dump_helpers(struct file *file)
1735 {
1736 struct pipe_inode_info *pipe;
1737
1738 pipe = file->f_path.dentry->d_inode->i_pipe;
1739
1740 pipe_lock(pipe);
1741 pipe->readers++;
1742 pipe->writers--;
1743
1744 while ((pipe->readers > 1) && (!signal_pending(current))) {
1745 wake_up_interruptible_sync(&pipe->wait);
1746 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1747 pipe_wait(pipe);
1748 }
1749
1750 pipe->readers--;
1751 pipe->writers++;
1752 pipe_unlock(pipe);
1753
1754 }
1755
1756
1757 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1758 {
1759 struct core_state core_state;
1760 char corename[CORENAME_MAX_SIZE + 1];
1761 struct mm_struct *mm = current->mm;
1762 struct linux_binfmt * binfmt;
1763 struct inode * inode;
1764 struct file * file;
1765 const struct cred *old_cred;
1766 struct cred *cred;
1767 int retval = 0;
1768 int flag = 0;
1769 int ispipe = 0;
1770 unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1771 char **helper_argv = NULL;
1772 int helper_argc = 0;
1773 int dump_count = 0;
1774 static atomic_t core_dump_count = ATOMIC_INIT(0);
1775
1776 audit_core_dumps(signr);
1777
1778 binfmt = mm->binfmt;
1779 if (!binfmt || !binfmt->core_dump)
1780 goto fail;
1781
1782 cred = prepare_creds();
1783 if (!cred) {
1784 retval = -ENOMEM;
1785 goto fail;
1786 }
1787
1788 down_write(&mm->mmap_sem);
1789 /*
1790 * If another thread got here first, or we are not dumpable, bail out.
1791 */
1792 if (mm->core_state || !get_dumpable(mm)) {
1793 up_write(&mm->mmap_sem);
1794 put_cred(cred);
1795 goto fail;
1796 }
1797
1798 /*
1799 * We cannot trust fsuid as being the "true" uid of the
1800 * process nor do we know its entire history. We only know it
1801 * was tainted so we dump it as root in mode 2.
1802 */
1803 if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
1804 flag = O_EXCL; /* Stop rewrite attacks */
1805 cred->fsuid = 0; /* Dump root private */
1806 }
1807
1808 retval = coredump_wait(exit_code, &core_state);
1809 if (retval < 0) {
1810 put_cred(cred);
1811 goto fail;
1812 }
1813
1814 old_cred = override_creds(cred);
1815
1816 /*
1817 * Clear any false indication of pending signals that might
1818 * be seen by the filesystem code called to write the core file.
1819 */
1820 clear_thread_flag(TIF_SIGPENDING);
1821
1822 /*
1823 * lock_kernel() because format_corename() is controlled by sysctl, which
1824 * uses lock_kernel()
1825 */
1826 lock_kernel();
1827 ispipe = format_corename(corename, signr);
1828 unlock_kernel();
1829
1830 if ((!ispipe) && (core_limit < binfmt->min_coredump))
1831 goto fail_unlock;
1832
1833 if (ispipe) {
1834 if (core_limit == 0) {
1835 /*
1836 * Normally core limits are irrelevant to pipes, since
1837 * we're not writing to the file system, but we use
1838 * core_limit of 0 here as a speacial value. Any
1839 * non-zero limit gets set to RLIM_INFINITY below, but
1840 * a limit of 0 skips the dump. This is a consistent
1841 * way to catch recursive crashes. We can still crash
1842 * if the core_pattern binary sets RLIM_CORE = !0
1843 * but it runs as root, and can do lots of stupid things
1844 * Note that we use task_tgid_vnr here to grab the pid
1845 * of the process group leader. That way we get the
1846 * right pid if a thread in a multi-threaded
1847 * core_pattern process dies.
1848 */
1849 printk(KERN_WARNING
1850 "Process %d(%s) has RLIMIT_CORE set to 0\n",
1851 task_tgid_vnr(current), current->comm);
1852 printk(KERN_WARNING "Aborting core\n");
1853 goto fail_unlock;
1854 }
1855
1856 dump_count = atomic_inc_return(&core_dump_count);
1857 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
1858 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
1859 task_tgid_vnr(current), current->comm);
1860 printk(KERN_WARNING "Skipping core dump\n");
1861 goto fail_dropcount;
1862 }
1863
1864 helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
1865 if (!helper_argv) {
1866 printk(KERN_WARNING "%s failed to allocate memory\n",
1867 __func__);
1868 goto fail_dropcount;
1869 }
1870
1871 core_limit = RLIM_INFINITY;
1872
1873 /* SIGPIPE can happen, but it's just never processed */
1874 if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
1875 &file)) {
1876 printk(KERN_INFO "Core dump to %s pipe failed\n",
1877 corename);
1878 goto fail_dropcount;
1879 }
1880 } else
1881 file = filp_open(corename,
1882 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1883 0600);
1884 if (IS_ERR(file))
1885 goto fail_dropcount;
1886 inode = file->f_path.dentry->d_inode;
1887 if (inode->i_nlink > 1)
1888 goto close_fail; /* multiple links - don't dump */
1889 if (!ispipe && d_unhashed(file->f_path.dentry))
1890 goto close_fail;
1891
1892 /* AK: actually i see no reason to not allow this for named pipes etc.,
1893 but keep the previous behaviour for now. */
1894 if (!ispipe && !S_ISREG(inode->i_mode))
1895 goto close_fail;
1896 /*
1897 * Dont allow local users get cute and trick others to coredump
1898 * into their pre-created files:
1899 */
1900 if (inode->i_uid != current_fsuid())
1901 goto close_fail;
1902 if (!file->f_op)
1903 goto close_fail;
1904 if (!file->f_op->write)
1905 goto close_fail;
1906 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1907 goto close_fail;
1908
1909 retval = binfmt->core_dump(signr, regs, file, core_limit);
1910
1911 if (retval)
1912 current->signal->group_exit_code |= 0x80;
1913 close_fail:
1914 if (ispipe && core_pipe_limit)
1915 wait_for_dump_helpers(file);
1916 filp_close(file, NULL);
1917 fail_dropcount:
1918 if (dump_count)
1919 atomic_dec(&core_dump_count);
1920 fail_unlock:
1921 if (helper_argv)
1922 argv_free(helper_argv);
1923
1924 revert_creds(old_cred);
1925 put_cred(cred);
1926 coredump_finish(mm);
1927 fail:
1928 return;
1929 }