pid namespaces: allow cloning of new namespace
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / proc / base.c
1 /*
2 * linux/fs/proc/base.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * proc base directory handling functions
7 *
8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part.
9 * Instead of using magical inumbers to determine the kind of object
10 * we allocate and fill in-core inodes upon lookup. They don't even
11 * go into icache. We cache the reference to task_struct upon lookup too.
12 * Eventually it should become a filesystem in its own. We don't use the
13 * rest of procfs anymore.
14 *
15 *
16 * Changelog:
17 * 17-Jan-2005
18 * Allan Bezerra
19 * Bruna Moreira <bruna.moreira@indt.org.br>
20 * Edjard Mota <edjard.mota@indt.org.br>
21 * Ilias Biris <ilias.biris@indt.org.br>
22 * Mauricio Lin <mauricio.lin@indt.org.br>
23 *
24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
25 *
26 * A new process specific entry (smaps) included in /proc. It shows the
27 * size of rss for each memory area. The maps entry lacks information
28 * about physical memory size (rss) for each mapped file, i.e.,
29 * rss information for executables and library files.
30 * This additional information is useful for any tools that need to know
31 * about physical memory consumption for a process specific library.
32 *
33 * Changelog:
34 * 21-Feb-2005
35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
36 * Pud inclusion in the page table walking.
37 *
38 * ChangeLog:
39 * 10-Mar-2005
40 * 10LE Instituto Nokia de Tecnologia - INdT:
41 * A better way to walks through the page table as suggested by Hugh Dickins.
42 *
43 * Simo Piiroinen <simo.piiroinen@nokia.com>:
44 * Smaps information related to shared, private, clean and dirty pages.
45 *
46 * Paul Mundt <paul.mundt@nokia.com>:
47 * Overall revision about smaps.
48 */
49
50 #include <asm/uaccess.h>
51
52 #include <linux/errno.h>
53 #include <linux/time.h>
54 #include <linux/proc_fs.h>
55 #include <linux/stat.h>
56 #include <linux/init.h>
57 #include <linux/capability.h>
58 #include <linux/file.h>
59 #include <linux/string.h>
60 #include <linux/seq_file.h>
61 #include <linux/namei.h>
62 #include <linux/mnt_namespace.h>
63 #include <linux/mm.h>
64 #include <linux/rcupdate.h>
65 #include <linux/kallsyms.h>
66 #include <linux/module.h>
67 #include <linux/mount.h>
68 #include <linux/security.h>
69 #include <linux/ptrace.h>
70 #include <linux/cgroup.h>
71 #include <linux/cpuset.h>
72 #include <linux/audit.h>
73 #include <linux/poll.h>
74 #include <linux/nsproxy.h>
75 #include <linux/oom.h>
76 #include <linux/elf.h>
77 #include <linux/pid_namespace.h>
78 #include "internal.h"
79
80 /* NOTE:
81 * Implementing inode permission operations in /proc is almost
82 * certainly an error. Permission checks need to happen during
83 * each system call not at open time. The reason is that most of
84 * what we wish to check for permissions in /proc varies at runtime.
85 *
86 * The classic example of a problem is opening file descriptors
87 * in /proc for a task before it execs a suid executable.
88 */
89
90
91 /* Worst case buffer size needed for holding an integer. */
92 #define PROC_NUMBUF 13
93
94 struct pid_entry {
95 char *name;
96 int len;
97 mode_t mode;
98 const struct inode_operations *iop;
99 const struct file_operations *fop;
100 union proc_op op;
101 };
102
103 #define NOD(NAME, MODE, IOP, FOP, OP) { \
104 .name = (NAME), \
105 .len = sizeof(NAME) - 1, \
106 .mode = MODE, \
107 .iop = IOP, \
108 .fop = FOP, \
109 .op = OP, \
110 }
111
112 #define DIR(NAME, MODE, OTYPE) \
113 NOD(NAME, (S_IFDIR|(MODE)), \
114 &proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations, \
115 {} )
116 #define LNK(NAME, OTYPE) \
117 NOD(NAME, (S_IFLNK|S_IRWXUGO), \
118 &proc_pid_link_inode_operations, NULL, \
119 { .proc_get_link = &proc_##OTYPE##_link } )
120 #define REG(NAME, MODE, OTYPE) \
121 NOD(NAME, (S_IFREG|(MODE)), NULL, \
122 &proc_##OTYPE##_operations, {})
123 #define INF(NAME, MODE, OTYPE) \
124 NOD(NAME, (S_IFREG|(MODE)), \
125 NULL, &proc_info_file_operations, \
126 { .proc_read = &proc_##OTYPE } )
127
128 int maps_protect;
129 EXPORT_SYMBOL(maps_protect);
130
131 static struct fs_struct *get_fs_struct(struct task_struct *task)
132 {
133 struct fs_struct *fs;
134 task_lock(task);
135 fs = task->fs;
136 if(fs)
137 atomic_inc(&fs->count);
138 task_unlock(task);
139 return fs;
140 }
141
142 static int get_nr_threads(struct task_struct *tsk)
143 {
144 /* Must be called with the rcu_read_lock held */
145 unsigned long flags;
146 int count = 0;
147
148 if (lock_task_sighand(tsk, &flags)) {
149 count = atomic_read(&tsk->signal->count);
150 unlock_task_sighand(tsk, &flags);
151 }
152 return count;
153 }
154
155 static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
156 {
157 struct task_struct *task = get_proc_task(inode);
158 struct fs_struct *fs = NULL;
159 int result = -ENOENT;
160
161 if (task) {
162 fs = get_fs_struct(task);
163 put_task_struct(task);
164 }
165 if (fs) {
166 read_lock(&fs->lock);
167 *mnt = mntget(fs->pwdmnt);
168 *dentry = dget(fs->pwd);
169 read_unlock(&fs->lock);
170 result = 0;
171 put_fs_struct(fs);
172 }
173 return result;
174 }
175
176 static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
177 {
178 struct task_struct *task = get_proc_task(inode);
179 struct fs_struct *fs = NULL;
180 int result = -ENOENT;
181
182 if (task) {
183 fs = get_fs_struct(task);
184 put_task_struct(task);
185 }
186 if (fs) {
187 read_lock(&fs->lock);
188 *mnt = mntget(fs->rootmnt);
189 *dentry = dget(fs->root);
190 read_unlock(&fs->lock);
191 result = 0;
192 put_fs_struct(fs);
193 }
194 return result;
195 }
196
197 #define MAY_PTRACE(task) \
198 (task == current || \
199 (task->parent == current && \
200 (task->ptrace & PT_PTRACED) && \
201 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
202 security_ptrace(current,task) == 0))
203
204 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
205 {
206 int res = 0;
207 unsigned int len;
208 struct mm_struct *mm = get_task_mm(task);
209 if (!mm)
210 goto out;
211 if (!mm->arg_end)
212 goto out_mm; /* Shh! No looking before we're done */
213
214 len = mm->arg_end - mm->arg_start;
215
216 if (len > PAGE_SIZE)
217 len = PAGE_SIZE;
218
219 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
220
221 // If the nul at the end of args has been overwritten, then
222 // assume application is using setproctitle(3).
223 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
224 len = strnlen(buffer, res);
225 if (len < res) {
226 res = len;
227 } else {
228 len = mm->env_end - mm->env_start;
229 if (len > PAGE_SIZE - res)
230 len = PAGE_SIZE - res;
231 res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
232 res = strnlen(buffer, res);
233 }
234 }
235 out_mm:
236 mmput(mm);
237 out:
238 return res;
239 }
240
241 static int proc_pid_auxv(struct task_struct *task, char *buffer)
242 {
243 int res = 0;
244 struct mm_struct *mm = get_task_mm(task);
245 if (mm) {
246 unsigned int nwords = 0;
247 do
248 nwords += 2;
249 while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
250 res = nwords * sizeof(mm->saved_auxv[0]);
251 if (res > PAGE_SIZE)
252 res = PAGE_SIZE;
253 memcpy(buffer, mm->saved_auxv, res);
254 mmput(mm);
255 }
256 return res;
257 }
258
259
260 #ifdef CONFIG_KALLSYMS
261 /*
262 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
263 * Returns the resolved symbol. If that fails, simply return the address.
264 */
265 static int proc_pid_wchan(struct task_struct *task, char *buffer)
266 {
267 unsigned long wchan;
268 char symname[KSYM_NAME_LEN];
269
270 wchan = get_wchan(task);
271
272 if (lookup_symbol_name(wchan, symname) < 0)
273 return sprintf(buffer, "%lu", wchan);
274 else
275 return sprintf(buffer, "%s", symname);
276 }
277 #endif /* CONFIG_KALLSYMS */
278
279 #ifdef CONFIG_SCHEDSTATS
280 /*
281 * Provides /proc/PID/schedstat
282 */
283 static int proc_pid_schedstat(struct task_struct *task, char *buffer)
284 {
285 return sprintf(buffer, "%llu %llu %lu\n",
286 task->sched_info.cpu_time,
287 task->sched_info.run_delay,
288 task->sched_info.pcount);
289 }
290 #endif
291
292 /* The badness from the OOM killer */
293 unsigned long badness(struct task_struct *p, unsigned long uptime);
294 static int proc_oom_score(struct task_struct *task, char *buffer)
295 {
296 unsigned long points;
297 struct timespec uptime;
298
299 do_posix_clock_monotonic_gettime(&uptime);
300 read_lock(&tasklist_lock);
301 points = badness(task, uptime.tv_sec);
302 read_unlock(&tasklist_lock);
303 return sprintf(buffer, "%lu\n", points);
304 }
305
306 /************************************************************************/
307 /* Here the fs part begins */
308 /************************************************************************/
309
310 /* permission checks */
311 static int proc_fd_access_allowed(struct inode *inode)
312 {
313 struct task_struct *task;
314 int allowed = 0;
315 /* Allow access to a task's file descriptors if it is us or we
316 * may use ptrace attach to the process and find out that
317 * information.
318 */
319 task = get_proc_task(inode);
320 if (task) {
321 allowed = ptrace_may_attach(task);
322 put_task_struct(task);
323 }
324 return allowed;
325 }
326
327 static int proc_setattr(struct dentry *dentry, struct iattr *attr)
328 {
329 int error;
330 struct inode *inode = dentry->d_inode;
331
332 if (attr->ia_valid & ATTR_MODE)
333 return -EPERM;
334
335 error = inode_change_ok(inode, attr);
336 if (!error)
337 error = inode_setattr(inode, attr);
338 return error;
339 }
340
341 static const struct inode_operations proc_def_inode_operations = {
342 .setattr = proc_setattr,
343 };
344
345 extern struct seq_operations mounts_op;
346 struct proc_mounts {
347 struct seq_file m;
348 int event;
349 };
350
351 static int mounts_open(struct inode *inode, struct file *file)
352 {
353 struct task_struct *task = get_proc_task(inode);
354 struct nsproxy *nsp;
355 struct mnt_namespace *ns = NULL;
356 struct proc_mounts *p;
357 int ret = -EINVAL;
358
359 if (task) {
360 rcu_read_lock();
361 nsp = task_nsproxy(task);
362 if (nsp) {
363 ns = nsp->mnt_ns;
364 if (ns)
365 get_mnt_ns(ns);
366 }
367 rcu_read_unlock();
368
369 put_task_struct(task);
370 }
371
372 if (ns) {
373 ret = -ENOMEM;
374 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
375 if (p) {
376 file->private_data = &p->m;
377 ret = seq_open(file, &mounts_op);
378 if (!ret) {
379 p->m.private = ns;
380 p->event = ns->event;
381 return 0;
382 }
383 kfree(p);
384 }
385 put_mnt_ns(ns);
386 }
387 return ret;
388 }
389
390 static int mounts_release(struct inode *inode, struct file *file)
391 {
392 struct seq_file *m = file->private_data;
393 struct mnt_namespace *ns = m->private;
394 put_mnt_ns(ns);
395 return seq_release(inode, file);
396 }
397
398 static unsigned mounts_poll(struct file *file, poll_table *wait)
399 {
400 struct proc_mounts *p = file->private_data;
401 struct mnt_namespace *ns = p->m.private;
402 unsigned res = 0;
403
404 poll_wait(file, &ns->poll, wait);
405
406 spin_lock(&vfsmount_lock);
407 if (p->event != ns->event) {
408 p->event = ns->event;
409 res = POLLERR;
410 }
411 spin_unlock(&vfsmount_lock);
412
413 return res;
414 }
415
416 static const struct file_operations proc_mounts_operations = {
417 .open = mounts_open,
418 .read = seq_read,
419 .llseek = seq_lseek,
420 .release = mounts_release,
421 .poll = mounts_poll,
422 };
423
424 extern struct seq_operations mountstats_op;
425 static int mountstats_open(struct inode *inode, struct file *file)
426 {
427 int ret = seq_open(file, &mountstats_op);
428
429 if (!ret) {
430 struct seq_file *m = file->private_data;
431 struct nsproxy *nsp;
432 struct mnt_namespace *mnt_ns = NULL;
433 struct task_struct *task = get_proc_task(inode);
434
435 if (task) {
436 rcu_read_lock();
437 nsp = task_nsproxy(task);
438 if (nsp) {
439 mnt_ns = nsp->mnt_ns;
440 if (mnt_ns)
441 get_mnt_ns(mnt_ns);
442 }
443 rcu_read_unlock();
444
445 put_task_struct(task);
446 }
447
448 if (mnt_ns)
449 m->private = mnt_ns;
450 else {
451 seq_release(inode, file);
452 ret = -EINVAL;
453 }
454 }
455 return ret;
456 }
457
458 static const struct file_operations proc_mountstats_operations = {
459 .open = mountstats_open,
460 .read = seq_read,
461 .llseek = seq_lseek,
462 .release = mounts_release,
463 };
464
465 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
466
467 static ssize_t proc_info_read(struct file * file, char __user * buf,
468 size_t count, loff_t *ppos)
469 {
470 struct inode * inode = file->f_path.dentry->d_inode;
471 unsigned long page;
472 ssize_t length;
473 struct task_struct *task = get_proc_task(inode);
474
475 length = -ESRCH;
476 if (!task)
477 goto out_no_task;
478
479 if (count > PROC_BLOCK_SIZE)
480 count = PROC_BLOCK_SIZE;
481
482 length = -ENOMEM;
483 if (!(page = __get_free_page(GFP_TEMPORARY)))
484 goto out;
485
486 length = PROC_I(inode)->op.proc_read(task, (char*)page);
487
488 if (length >= 0)
489 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
490 free_page(page);
491 out:
492 put_task_struct(task);
493 out_no_task:
494 return length;
495 }
496
497 static const struct file_operations proc_info_file_operations = {
498 .read = proc_info_read,
499 };
500
501 static int mem_open(struct inode* inode, struct file* file)
502 {
503 file->private_data = (void*)((long)current->self_exec_id);
504 return 0;
505 }
506
507 static ssize_t mem_read(struct file * file, char __user * buf,
508 size_t count, loff_t *ppos)
509 {
510 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
511 char *page;
512 unsigned long src = *ppos;
513 int ret = -ESRCH;
514 struct mm_struct *mm;
515
516 if (!task)
517 goto out_no_task;
518
519 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
520 goto out;
521
522 ret = -ENOMEM;
523 page = (char *)__get_free_page(GFP_TEMPORARY);
524 if (!page)
525 goto out;
526
527 ret = 0;
528
529 mm = get_task_mm(task);
530 if (!mm)
531 goto out_free;
532
533 ret = -EIO;
534
535 if (file->private_data != (void*)((long)current->self_exec_id))
536 goto out_put;
537
538 ret = 0;
539
540 while (count > 0) {
541 int this_len, retval;
542
543 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
544 retval = access_process_vm(task, src, page, this_len, 0);
545 if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
546 if (!ret)
547 ret = -EIO;
548 break;
549 }
550
551 if (copy_to_user(buf, page, retval)) {
552 ret = -EFAULT;
553 break;
554 }
555
556 ret += retval;
557 src += retval;
558 buf += retval;
559 count -= retval;
560 }
561 *ppos = src;
562
563 out_put:
564 mmput(mm);
565 out_free:
566 free_page((unsigned long) page);
567 out:
568 put_task_struct(task);
569 out_no_task:
570 return ret;
571 }
572
573 #define mem_write NULL
574
575 #ifndef mem_write
576 /* This is a security hazard */
577 static ssize_t mem_write(struct file * file, const char __user *buf,
578 size_t count, loff_t *ppos)
579 {
580 int copied;
581 char *page;
582 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
583 unsigned long dst = *ppos;
584
585 copied = -ESRCH;
586 if (!task)
587 goto out_no_task;
588
589 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
590 goto out;
591
592 copied = -ENOMEM;
593 page = (char *)__get_free_page(GFP_TEMPORARY);
594 if (!page)
595 goto out;
596
597 copied = 0;
598 while (count > 0) {
599 int this_len, retval;
600
601 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
602 if (copy_from_user(page, buf, this_len)) {
603 copied = -EFAULT;
604 break;
605 }
606 retval = access_process_vm(task, dst, page, this_len, 1);
607 if (!retval) {
608 if (!copied)
609 copied = -EIO;
610 break;
611 }
612 copied += retval;
613 buf += retval;
614 dst += retval;
615 count -= retval;
616 }
617 *ppos = dst;
618 free_page((unsigned long) page);
619 out:
620 put_task_struct(task);
621 out_no_task:
622 return copied;
623 }
624 #endif
625
626 static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
627 {
628 switch (orig) {
629 case 0:
630 file->f_pos = offset;
631 break;
632 case 1:
633 file->f_pos += offset;
634 break;
635 default:
636 return -EINVAL;
637 }
638 force_successful_syscall_return();
639 return file->f_pos;
640 }
641
642 static const struct file_operations proc_mem_operations = {
643 .llseek = mem_lseek,
644 .read = mem_read,
645 .write = mem_write,
646 .open = mem_open,
647 };
648
649 static ssize_t environ_read(struct file *file, char __user *buf,
650 size_t count, loff_t *ppos)
651 {
652 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
653 char *page;
654 unsigned long src = *ppos;
655 int ret = -ESRCH;
656 struct mm_struct *mm;
657
658 if (!task)
659 goto out_no_task;
660
661 if (!ptrace_may_attach(task))
662 goto out;
663
664 ret = -ENOMEM;
665 page = (char *)__get_free_page(GFP_TEMPORARY);
666 if (!page)
667 goto out;
668
669 ret = 0;
670
671 mm = get_task_mm(task);
672 if (!mm)
673 goto out_free;
674
675 while (count > 0) {
676 int this_len, retval, max_len;
677
678 this_len = mm->env_end - (mm->env_start + src);
679
680 if (this_len <= 0)
681 break;
682
683 max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
684 this_len = (this_len > max_len) ? max_len : this_len;
685
686 retval = access_process_vm(task, (mm->env_start + src),
687 page, this_len, 0);
688
689 if (retval <= 0) {
690 ret = retval;
691 break;
692 }
693
694 if (copy_to_user(buf, page, retval)) {
695 ret = -EFAULT;
696 break;
697 }
698
699 ret += retval;
700 src += retval;
701 buf += retval;
702 count -= retval;
703 }
704 *ppos = src;
705
706 mmput(mm);
707 out_free:
708 free_page((unsigned long) page);
709 out:
710 put_task_struct(task);
711 out_no_task:
712 return ret;
713 }
714
715 static const struct file_operations proc_environ_operations = {
716 .read = environ_read,
717 };
718
719 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
720 size_t count, loff_t *ppos)
721 {
722 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
723 char buffer[PROC_NUMBUF];
724 size_t len;
725 int oom_adjust;
726
727 if (!task)
728 return -ESRCH;
729 oom_adjust = task->oomkilladj;
730 put_task_struct(task);
731
732 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
733
734 return simple_read_from_buffer(buf, count, ppos, buffer, len);
735 }
736
737 static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
738 size_t count, loff_t *ppos)
739 {
740 struct task_struct *task;
741 char buffer[PROC_NUMBUF], *end;
742 int oom_adjust;
743
744 memset(buffer, 0, sizeof(buffer));
745 if (count > sizeof(buffer) - 1)
746 count = sizeof(buffer) - 1;
747 if (copy_from_user(buffer, buf, count))
748 return -EFAULT;
749 oom_adjust = simple_strtol(buffer, &end, 0);
750 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
751 oom_adjust != OOM_DISABLE)
752 return -EINVAL;
753 if (*end == '\n')
754 end++;
755 task = get_proc_task(file->f_path.dentry->d_inode);
756 if (!task)
757 return -ESRCH;
758 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
759 put_task_struct(task);
760 return -EACCES;
761 }
762 task->oomkilladj = oom_adjust;
763 put_task_struct(task);
764 if (end - buffer == 0)
765 return -EIO;
766 return end - buffer;
767 }
768
769 static const struct file_operations proc_oom_adjust_operations = {
770 .read = oom_adjust_read,
771 .write = oom_adjust_write,
772 };
773
774 #ifdef CONFIG_MMU
775 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
776 size_t count, loff_t *ppos)
777 {
778 struct task_struct *task;
779 char buffer[PROC_NUMBUF], *end;
780 struct mm_struct *mm;
781
782 memset(buffer, 0, sizeof(buffer));
783 if (count > sizeof(buffer) - 1)
784 count = sizeof(buffer) - 1;
785 if (copy_from_user(buffer, buf, count))
786 return -EFAULT;
787 if (!simple_strtol(buffer, &end, 0))
788 return -EINVAL;
789 if (*end == '\n')
790 end++;
791 task = get_proc_task(file->f_path.dentry->d_inode);
792 if (!task)
793 return -ESRCH;
794 mm = get_task_mm(task);
795 if (mm) {
796 clear_refs_smap(mm);
797 mmput(mm);
798 }
799 put_task_struct(task);
800 if (end - buffer == 0)
801 return -EIO;
802 return end - buffer;
803 }
804
805 static struct file_operations proc_clear_refs_operations = {
806 .write = clear_refs_write,
807 };
808 #endif
809
810 #ifdef CONFIG_AUDITSYSCALL
811 #define TMPBUFLEN 21
812 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
813 size_t count, loff_t *ppos)
814 {
815 struct inode * inode = file->f_path.dentry->d_inode;
816 struct task_struct *task = get_proc_task(inode);
817 ssize_t length;
818 char tmpbuf[TMPBUFLEN];
819
820 if (!task)
821 return -ESRCH;
822 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
823 audit_get_loginuid(task->audit_context));
824 put_task_struct(task);
825 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
826 }
827
828 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
829 size_t count, loff_t *ppos)
830 {
831 struct inode * inode = file->f_path.dentry->d_inode;
832 char *page, *tmp;
833 ssize_t length;
834 uid_t loginuid;
835
836 if (!capable(CAP_AUDIT_CONTROL))
837 return -EPERM;
838
839 if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
840 return -EPERM;
841
842 if (count >= PAGE_SIZE)
843 count = PAGE_SIZE - 1;
844
845 if (*ppos != 0) {
846 /* No partial writes. */
847 return -EINVAL;
848 }
849 page = (char*)__get_free_page(GFP_TEMPORARY);
850 if (!page)
851 return -ENOMEM;
852 length = -EFAULT;
853 if (copy_from_user(page, buf, count))
854 goto out_free_page;
855
856 page[count] = '\0';
857 loginuid = simple_strtoul(page, &tmp, 10);
858 if (tmp == page) {
859 length = -EINVAL;
860 goto out_free_page;
861
862 }
863 length = audit_set_loginuid(current, loginuid);
864 if (likely(length == 0))
865 length = count;
866
867 out_free_page:
868 free_page((unsigned long) page);
869 return length;
870 }
871
872 static const struct file_operations proc_loginuid_operations = {
873 .read = proc_loginuid_read,
874 .write = proc_loginuid_write,
875 };
876 #endif
877
878 #ifdef CONFIG_FAULT_INJECTION
879 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
880 size_t count, loff_t *ppos)
881 {
882 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
883 char buffer[PROC_NUMBUF];
884 size_t len;
885 int make_it_fail;
886
887 if (!task)
888 return -ESRCH;
889 make_it_fail = task->make_it_fail;
890 put_task_struct(task);
891
892 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
893
894 return simple_read_from_buffer(buf, count, ppos, buffer, len);
895 }
896
897 static ssize_t proc_fault_inject_write(struct file * file,
898 const char __user * buf, size_t count, loff_t *ppos)
899 {
900 struct task_struct *task;
901 char buffer[PROC_NUMBUF], *end;
902 int make_it_fail;
903
904 if (!capable(CAP_SYS_RESOURCE))
905 return -EPERM;
906 memset(buffer, 0, sizeof(buffer));
907 if (count > sizeof(buffer) - 1)
908 count = sizeof(buffer) - 1;
909 if (copy_from_user(buffer, buf, count))
910 return -EFAULT;
911 make_it_fail = simple_strtol(buffer, &end, 0);
912 if (*end == '\n')
913 end++;
914 task = get_proc_task(file->f_dentry->d_inode);
915 if (!task)
916 return -ESRCH;
917 task->make_it_fail = make_it_fail;
918 put_task_struct(task);
919 if (end - buffer == 0)
920 return -EIO;
921 return end - buffer;
922 }
923
924 static const struct file_operations proc_fault_inject_operations = {
925 .read = proc_fault_inject_read,
926 .write = proc_fault_inject_write,
927 };
928 #endif
929
930 #ifdef CONFIG_SCHED_DEBUG
931 /*
932 * Print out various scheduling related per-task fields:
933 */
934 static int sched_show(struct seq_file *m, void *v)
935 {
936 struct inode *inode = m->private;
937 struct task_struct *p;
938
939 WARN_ON(!inode);
940
941 p = get_proc_task(inode);
942 if (!p)
943 return -ESRCH;
944 proc_sched_show_task(p, m);
945
946 put_task_struct(p);
947
948 return 0;
949 }
950
951 static ssize_t
952 sched_write(struct file *file, const char __user *buf,
953 size_t count, loff_t *offset)
954 {
955 struct inode *inode = file->f_path.dentry->d_inode;
956 struct task_struct *p;
957
958 WARN_ON(!inode);
959
960 p = get_proc_task(inode);
961 if (!p)
962 return -ESRCH;
963 proc_sched_set_task(p);
964
965 put_task_struct(p);
966
967 return count;
968 }
969
970 static int sched_open(struct inode *inode, struct file *filp)
971 {
972 int ret;
973
974 ret = single_open(filp, sched_show, NULL);
975 if (!ret) {
976 struct seq_file *m = filp->private_data;
977
978 m->private = inode;
979 }
980 return ret;
981 }
982
983 static const struct file_operations proc_pid_sched_operations = {
984 .open = sched_open,
985 .read = seq_read,
986 .write = sched_write,
987 .llseek = seq_lseek,
988 .release = single_release,
989 };
990
991 #endif
992
993 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
994 {
995 struct inode *inode = dentry->d_inode;
996 int error = -EACCES;
997
998 /* We don't need a base pointer in the /proc filesystem */
999 path_release(nd);
1000
1001 /* Are we allowed to snoop on the tasks file descriptors? */
1002 if (!proc_fd_access_allowed(inode))
1003 goto out;
1004
1005 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
1006 nd->last_type = LAST_BIND;
1007 out:
1008 return ERR_PTR(error);
1009 }
1010
1011 static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
1012 char __user *buffer, int buflen)
1013 {
1014 struct inode * inode;
1015 char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
1016 char *path;
1017 int len;
1018
1019 if (!tmp)
1020 return -ENOMEM;
1021
1022 inode = dentry->d_inode;
1023 path = d_path(dentry, mnt, tmp, PAGE_SIZE);
1024 len = PTR_ERR(path);
1025 if (IS_ERR(path))
1026 goto out;
1027 len = tmp + PAGE_SIZE - 1 - path;
1028
1029 if (len > buflen)
1030 len = buflen;
1031 if (copy_to_user(buffer, path, len))
1032 len = -EFAULT;
1033 out:
1034 free_page((unsigned long)tmp);
1035 return len;
1036 }
1037
1038 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
1039 {
1040 int error = -EACCES;
1041 struct inode *inode = dentry->d_inode;
1042 struct dentry *de;
1043 struct vfsmount *mnt = NULL;
1044
1045 /* Are we allowed to snoop on the tasks file descriptors? */
1046 if (!proc_fd_access_allowed(inode))
1047 goto out;
1048
1049 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
1050 if (error)
1051 goto out;
1052
1053 error = do_proc_readlink(de, mnt, buffer, buflen);
1054 dput(de);
1055 mntput(mnt);
1056 out:
1057 return error;
1058 }
1059
1060 static const struct inode_operations proc_pid_link_inode_operations = {
1061 .readlink = proc_pid_readlink,
1062 .follow_link = proc_pid_follow_link,
1063 .setattr = proc_setattr,
1064 };
1065
1066
1067 /* building an inode */
1068
1069 static int task_dumpable(struct task_struct *task)
1070 {
1071 int dumpable = 0;
1072 struct mm_struct *mm;
1073
1074 task_lock(task);
1075 mm = task->mm;
1076 if (mm)
1077 dumpable = get_dumpable(mm);
1078 task_unlock(task);
1079 if(dumpable == 1)
1080 return 1;
1081 return 0;
1082 }
1083
1084
1085 static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1086 {
1087 struct inode * inode;
1088 struct proc_inode *ei;
1089
1090 /* We need a new inode */
1091
1092 inode = new_inode(sb);
1093 if (!inode)
1094 goto out;
1095
1096 /* Common stuff */
1097 ei = PROC_I(inode);
1098 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1099 inode->i_op = &proc_def_inode_operations;
1100
1101 /*
1102 * grab the reference to task.
1103 */
1104 ei->pid = get_task_pid(task, PIDTYPE_PID);
1105 if (!ei->pid)
1106 goto out_unlock;
1107
1108 inode->i_uid = 0;
1109 inode->i_gid = 0;
1110 if (task_dumpable(task)) {
1111 inode->i_uid = task->euid;
1112 inode->i_gid = task->egid;
1113 }
1114 security_task_to_inode(task, inode);
1115
1116 out:
1117 return inode;
1118
1119 out_unlock:
1120 iput(inode);
1121 return NULL;
1122 }
1123
1124 static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1125 {
1126 struct inode *inode = dentry->d_inode;
1127 struct task_struct *task;
1128 generic_fillattr(inode, stat);
1129
1130 rcu_read_lock();
1131 stat->uid = 0;
1132 stat->gid = 0;
1133 task = pid_task(proc_pid(inode), PIDTYPE_PID);
1134 if (task) {
1135 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1136 task_dumpable(task)) {
1137 stat->uid = task->euid;
1138 stat->gid = task->egid;
1139 }
1140 }
1141 rcu_read_unlock();
1142 return 0;
1143 }
1144
1145 /* dentry stuff */
1146
1147 /*
1148 * Exceptional case: normally we are not allowed to unhash a busy
1149 * directory. In this case, however, we can do it - no aliasing problems
1150 * due to the way we treat inodes.
1151 *
1152 * Rewrite the inode's ownerships here because the owning task may have
1153 * performed a setuid(), etc.
1154 *
1155 * Before the /proc/pid/status file was created the only way to read
1156 * the effective uid of a /process was to stat /proc/pid. Reading
1157 * /proc/pid/status is slow enough that procps and other packages
1158 * kept stating /proc/pid. To keep the rules in /proc simple I have
1159 * made this apply to all per process world readable and executable
1160 * directories.
1161 */
1162 static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1163 {
1164 struct inode *inode = dentry->d_inode;
1165 struct task_struct *task = get_proc_task(inode);
1166 if (task) {
1167 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1168 task_dumpable(task)) {
1169 inode->i_uid = task->euid;
1170 inode->i_gid = task->egid;
1171 } else {
1172 inode->i_uid = 0;
1173 inode->i_gid = 0;
1174 }
1175 inode->i_mode &= ~(S_ISUID | S_ISGID);
1176 security_task_to_inode(task, inode);
1177 put_task_struct(task);
1178 return 1;
1179 }
1180 d_drop(dentry);
1181 return 0;
1182 }
1183
1184 static int pid_delete_dentry(struct dentry * dentry)
1185 {
1186 /* Is the task we represent dead?
1187 * If so, then don't put the dentry on the lru list,
1188 * kill it immediately.
1189 */
1190 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1191 }
1192
1193 static struct dentry_operations pid_dentry_operations =
1194 {
1195 .d_revalidate = pid_revalidate,
1196 .d_delete = pid_delete_dentry,
1197 };
1198
1199 /* Lookups */
1200
1201 typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
1202 struct task_struct *, const void *);
1203
1204 /*
1205 * Fill a directory entry.
1206 *
1207 * If possible create the dcache entry and derive our inode number and
1208 * file type from dcache entry.
1209 *
1210 * Since all of the proc inode numbers are dynamically generated, the inode
1211 * numbers do not exist until the inode is cache. This means creating the
1212 * the dcache entry in readdir is necessary to keep the inode numbers
1213 * reported by readdir in sync with the inode numbers reported
1214 * by stat.
1215 */
1216 static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1217 char *name, int len,
1218 instantiate_t instantiate, struct task_struct *task, const void *ptr)
1219 {
1220 struct dentry *child, *dir = filp->f_path.dentry;
1221 struct inode *inode;
1222 struct qstr qname;
1223 ino_t ino = 0;
1224 unsigned type = DT_UNKNOWN;
1225
1226 qname.name = name;
1227 qname.len = len;
1228 qname.hash = full_name_hash(name, len);
1229
1230 child = d_lookup(dir, &qname);
1231 if (!child) {
1232 struct dentry *new;
1233 new = d_alloc(dir, &qname);
1234 if (new) {
1235 child = instantiate(dir->d_inode, new, task, ptr);
1236 if (child)
1237 dput(new);
1238 else
1239 child = new;
1240 }
1241 }
1242 if (!child || IS_ERR(child) || !child->d_inode)
1243 goto end_instantiate;
1244 inode = child->d_inode;
1245 if (inode) {
1246 ino = inode->i_ino;
1247 type = inode->i_mode >> 12;
1248 }
1249 dput(child);
1250 end_instantiate:
1251 if (!ino)
1252 ino = find_inode_number(dir, &qname);
1253 if (!ino)
1254 ino = 1;
1255 return filldir(dirent, name, len, filp->f_pos, ino, type);
1256 }
1257
1258 static unsigned name_to_int(struct dentry *dentry)
1259 {
1260 const char *name = dentry->d_name.name;
1261 int len = dentry->d_name.len;
1262 unsigned n = 0;
1263
1264 if (len > 1 && *name == '0')
1265 goto out;
1266 while (len-- > 0) {
1267 unsigned c = *name++ - '0';
1268 if (c > 9)
1269 goto out;
1270 if (n >= (~0U-9)/10)
1271 goto out;
1272 n *= 10;
1273 n += c;
1274 }
1275 return n;
1276 out:
1277 return ~0U;
1278 }
1279
1280 #define PROC_FDINFO_MAX 64
1281
1282 static int proc_fd_info(struct inode *inode, struct dentry **dentry,
1283 struct vfsmount **mnt, char *info)
1284 {
1285 struct task_struct *task = get_proc_task(inode);
1286 struct files_struct *files = NULL;
1287 struct file *file;
1288 int fd = proc_fd(inode);
1289
1290 if (task) {
1291 files = get_files_struct(task);
1292 put_task_struct(task);
1293 }
1294 if (files) {
1295 /*
1296 * We are not taking a ref to the file structure, so we must
1297 * hold ->file_lock.
1298 */
1299 spin_lock(&files->file_lock);
1300 file = fcheck_files(files, fd);
1301 if (file) {
1302 if (mnt)
1303 *mnt = mntget(file->f_path.mnt);
1304 if (dentry)
1305 *dentry = dget(file->f_path.dentry);
1306 if (info)
1307 snprintf(info, PROC_FDINFO_MAX,
1308 "pos:\t%lli\n"
1309 "flags:\t0%o\n",
1310 (long long) file->f_pos,
1311 file->f_flags);
1312 spin_unlock(&files->file_lock);
1313 put_files_struct(files);
1314 return 0;
1315 }
1316 spin_unlock(&files->file_lock);
1317 put_files_struct(files);
1318 }
1319 return -ENOENT;
1320 }
1321
1322 static int proc_fd_link(struct inode *inode, struct dentry **dentry,
1323 struct vfsmount **mnt)
1324 {
1325 return proc_fd_info(inode, dentry, mnt, NULL);
1326 }
1327
1328 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1329 {
1330 struct inode *inode = dentry->d_inode;
1331 struct task_struct *task = get_proc_task(inode);
1332 int fd = proc_fd(inode);
1333 struct files_struct *files;
1334
1335 if (task) {
1336 files = get_files_struct(task);
1337 if (files) {
1338 rcu_read_lock();
1339 if (fcheck_files(files, fd)) {
1340 rcu_read_unlock();
1341 put_files_struct(files);
1342 if (task_dumpable(task)) {
1343 inode->i_uid = task->euid;
1344 inode->i_gid = task->egid;
1345 } else {
1346 inode->i_uid = 0;
1347 inode->i_gid = 0;
1348 }
1349 inode->i_mode &= ~(S_ISUID | S_ISGID);
1350 security_task_to_inode(task, inode);
1351 put_task_struct(task);
1352 return 1;
1353 }
1354 rcu_read_unlock();
1355 put_files_struct(files);
1356 }
1357 put_task_struct(task);
1358 }
1359 d_drop(dentry);
1360 return 0;
1361 }
1362
1363 static struct dentry_operations tid_fd_dentry_operations =
1364 {
1365 .d_revalidate = tid_fd_revalidate,
1366 .d_delete = pid_delete_dentry,
1367 };
1368
1369 static struct dentry *proc_fd_instantiate(struct inode *dir,
1370 struct dentry *dentry, struct task_struct *task, const void *ptr)
1371 {
1372 unsigned fd = *(const unsigned *)ptr;
1373 struct file *file;
1374 struct files_struct *files;
1375 struct inode *inode;
1376 struct proc_inode *ei;
1377 struct dentry *error = ERR_PTR(-ENOENT);
1378
1379 inode = proc_pid_make_inode(dir->i_sb, task);
1380 if (!inode)
1381 goto out;
1382 ei = PROC_I(inode);
1383 ei->fd = fd;
1384 files = get_files_struct(task);
1385 if (!files)
1386 goto out_iput;
1387 inode->i_mode = S_IFLNK;
1388
1389 /*
1390 * We are not taking a ref to the file structure, so we must
1391 * hold ->file_lock.
1392 */
1393 spin_lock(&files->file_lock);
1394 file = fcheck_files(files, fd);
1395 if (!file)
1396 goto out_unlock;
1397 if (file->f_mode & 1)
1398 inode->i_mode |= S_IRUSR | S_IXUSR;
1399 if (file->f_mode & 2)
1400 inode->i_mode |= S_IWUSR | S_IXUSR;
1401 spin_unlock(&files->file_lock);
1402 put_files_struct(files);
1403
1404 inode->i_op = &proc_pid_link_inode_operations;
1405 inode->i_size = 64;
1406 ei->op.proc_get_link = proc_fd_link;
1407 dentry->d_op = &tid_fd_dentry_operations;
1408 d_add(dentry, inode);
1409 /* Close the race of the process dying before we return the dentry */
1410 if (tid_fd_revalidate(dentry, NULL))
1411 error = NULL;
1412
1413 out:
1414 return error;
1415 out_unlock:
1416 spin_unlock(&files->file_lock);
1417 put_files_struct(files);
1418 out_iput:
1419 iput(inode);
1420 goto out;
1421 }
1422
1423 static struct dentry *proc_lookupfd_common(struct inode *dir,
1424 struct dentry *dentry,
1425 instantiate_t instantiate)
1426 {
1427 struct task_struct *task = get_proc_task(dir);
1428 unsigned fd = name_to_int(dentry);
1429 struct dentry *result = ERR_PTR(-ENOENT);
1430
1431 if (!task)
1432 goto out_no_task;
1433 if (fd == ~0U)
1434 goto out;
1435
1436 result = instantiate(dir, dentry, task, &fd);
1437 out:
1438 put_task_struct(task);
1439 out_no_task:
1440 return result;
1441 }
1442
1443 static int proc_readfd_common(struct file * filp, void * dirent,
1444 filldir_t filldir, instantiate_t instantiate)
1445 {
1446 struct dentry *dentry = filp->f_path.dentry;
1447 struct inode *inode = dentry->d_inode;
1448 struct task_struct *p = get_proc_task(inode);
1449 unsigned int fd, tid, ino;
1450 int retval;
1451 struct files_struct * files;
1452 struct fdtable *fdt;
1453
1454 retval = -ENOENT;
1455 if (!p)
1456 goto out_no_task;
1457 retval = 0;
1458 tid = p->pid;
1459
1460 fd = filp->f_pos;
1461 switch (fd) {
1462 case 0:
1463 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
1464 goto out;
1465 filp->f_pos++;
1466 case 1:
1467 ino = parent_ino(dentry);
1468 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
1469 goto out;
1470 filp->f_pos++;
1471 default:
1472 files = get_files_struct(p);
1473 if (!files)
1474 goto out;
1475 rcu_read_lock();
1476 fdt = files_fdtable(files);
1477 for (fd = filp->f_pos-2;
1478 fd < fdt->max_fds;
1479 fd++, filp->f_pos++) {
1480 char name[PROC_NUMBUF];
1481 int len;
1482
1483 if (!fcheck_files(files, fd))
1484 continue;
1485 rcu_read_unlock();
1486
1487 len = snprintf(name, sizeof(name), "%d", fd);
1488 if (proc_fill_cache(filp, dirent, filldir,
1489 name, len, instantiate,
1490 p, &fd) < 0) {
1491 rcu_read_lock();
1492 break;
1493 }
1494 rcu_read_lock();
1495 }
1496 rcu_read_unlock();
1497 put_files_struct(files);
1498 }
1499 out:
1500 put_task_struct(p);
1501 out_no_task:
1502 return retval;
1503 }
1504
1505 static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
1506 struct nameidata *nd)
1507 {
1508 return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
1509 }
1510
1511 static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
1512 {
1513 return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
1514 }
1515
1516 static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
1517 size_t len, loff_t *ppos)
1518 {
1519 char tmp[PROC_FDINFO_MAX];
1520 int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, NULL, tmp);
1521 if (!err)
1522 err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp));
1523 return err;
1524 }
1525
1526 static const struct file_operations proc_fdinfo_file_operations = {
1527 .open = nonseekable_open,
1528 .read = proc_fdinfo_read,
1529 };
1530
1531 static const struct file_operations proc_fd_operations = {
1532 .read = generic_read_dir,
1533 .readdir = proc_readfd,
1534 };
1535
1536 /*
1537 * /proc/pid/fd needs a special permission handler so that a process can still
1538 * access /proc/self/fd after it has executed a setuid().
1539 */
1540 static int proc_fd_permission(struct inode *inode, int mask,
1541 struct nameidata *nd)
1542 {
1543 int rv;
1544
1545 rv = generic_permission(inode, mask, NULL);
1546 if (rv == 0)
1547 return 0;
1548 if (task_pid(current) == proc_pid(inode))
1549 rv = 0;
1550 return rv;
1551 }
1552
1553 /*
1554 * proc directories can do almost nothing..
1555 */
1556 static const struct inode_operations proc_fd_inode_operations = {
1557 .lookup = proc_lookupfd,
1558 .permission = proc_fd_permission,
1559 .setattr = proc_setattr,
1560 };
1561
1562 static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
1563 struct dentry *dentry, struct task_struct *task, const void *ptr)
1564 {
1565 unsigned fd = *(unsigned *)ptr;
1566 struct inode *inode;
1567 struct proc_inode *ei;
1568 struct dentry *error = ERR_PTR(-ENOENT);
1569
1570 inode = proc_pid_make_inode(dir->i_sb, task);
1571 if (!inode)
1572 goto out;
1573 ei = PROC_I(inode);
1574 ei->fd = fd;
1575 inode->i_mode = S_IFREG | S_IRUSR;
1576 inode->i_fop = &proc_fdinfo_file_operations;
1577 dentry->d_op = &tid_fd_dentry_operations;
1578 d_add(dentry, inode);
1579 /* Close the race of the process dying before we return the dentry */
1580 if (tid_fd_revalidate(dentry, NULL))
1581 error = NULL;
1582
1583 out:
1584 return error;
1585 }
1586
1587 static struct dentry *proc_lookupfdinfo(struct inode *dir,
1588 struct dentry *dentry,
1589 struct nameidata *nd)
1590 {
1591 return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
1592 }
1593
1594 static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
1595 {
1596 return proc_readfd_common(filp, dirent, filldir,
1597 proc_fdinfo_instantiate);
1598 }
1599
1600 static const struct file_operations proc_fdinfo_operations = {
1601 .read = generic_read_dir,
1602 .readdir = proc_readfdinfo,
1603 };
1604
1605 /*
1606 * proc directories can do almost nothing..
1607 */
1608 static const struct inode_operations proc_fdinfo_inode_operations = {
1609 .lookup = proc_lookupfdinfo,
1610 .setattr = proc_setattr,
1611 };
1612
1613
1614 static struct dentry *proc_pident_instantiate(struct inode *dir,
1615 struct dentry *dentry, struct task_struct *task, const void *ptr)
1616 {
1617 const struct pid_entry *p = ptr;
1618 struct inode *inode;
1619 struct proc_inode *ei;
1620 struct dentry *error = ERR_PTR(-EINVAL);
1621
1622 inode = proc_pid_make_inode(dir->i_sb, task);
1623 if (!inode)
1624 goto out;
1625
1626 ei = PROC_I(inode);
1627 inode->i_mode = p->mode;
1628 if (S_ISDIR(inode->i_mode))
1629 inode->i_nlink = 2; /* Use getattr to fix if necessary */
1630 if (p->iop)
1631 inode->i_op = p->iop;
1632 if (p->fop)
1633 inode->i_fop = p->fop;
1634 ei->op = p->op;
1635 dentry->d_op = &pid_dentry_operations;
1636 d_add(dentry, inode);
1637 /* Close the race of the process dying before we return the dentry */
1638 if (pid_revalidate(dentry, NULL))
1639 error = NULL;
1640 out:
1641 return error;
1642 }
1643
1644 static struct dentry *proc_pident_lookup(struct inode *dir,
1645 struct dentry *dentry,
1646 const struct pid_entry *ents,
1647 unsigned int nents)
1648 {
1649 struct inode *inode;
1650 struct dentry *error;
1651 struct task_struct *task = get_proc_task(dir);
1652 const struct pid_entry *p, *last;
1653
1654 error = ERR_PTR(-ENOENT);
1655 inode = NULL;
1656
1657 if (!task)
1658 goto out_no_task;
1659
1660 /*
1661 * Yes, it does not scale. And it should not. Don't add
1662 * new entries into /proc/<tgid>/ without very good reasons.
1663 */
1664 last = &ents[nents - 1];
1665 for (p = ents; p <= last; p++) {
1666 if (p->len != dentry->d_name.len)
1667 continue;
1668 if (!memcmp(dentry->d_name.name, p->name, p->len))
1669 break;
1670 }
1671 if (p > last)
1672 goto out;
1673
1674 error = proc_pident_instantiate(dir, dentry, task, p);
1675 out:
1676 put_task_struct(task);
1677 out_no_task:
1678 return error;
1679 }
1680
1681 static int proc_pident_fill_cache(struct file *filp, void *dirent,
1682 filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
1683 {
1684 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
1685 proc_pident_instantiate, task, p);
1686 }
1687
1688 static int proc_pident_readdir(struct file *filp,
1689 void *dirent, filldir_t filldir,
1690 const struct pid_entry *ents, unsigned int nents)
1691 {
1692 int i;
1693 int pid;
1694 struct dentry *dentry = filp->f_path.dentry;
1695 struct inode *inode = dentry->d_inode;
1696 struct task_struct *task = get_proc_task(inode);
1697 const struct pid_entry *p, *last;
1698 ino_t ino;
1699 int ret;
1700
1701 ret = -ENOENT;
1702 if (!task)
1703 goto out_no_task;
1704
1705 ret = 0;
1706 pid = task->pid;
1707 i = filp->f_pos;
1708 switch (i) {
1709 case 0:
1710 ino = inode->i_ino;
1711 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
1712 goto out;
1713 i++;
1714 filp->f_pos++;
1715 /* fall through */
1716 case 1:
1717 ino = parent_ino(dentry);
1718 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
1719 goto out;
1720 i++;
1721 filp->f_pos++;
1722 /* fall through */
1723 default:
1724 i -= 2;
1725 if (i >= nents) {
1726 ret = 1;
1727 goto out;
1728 }
1729 p = ents + i;
1730 last = &ents[nents - 1];
1731 while (p <= last) {
1732 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
1733 goto out;
1734 filp->f_pos++;
1735 p++;
1736 }
1737 }
1738
1739 ret = 1;
1740 out:
1741 put_task_struct(task);
1742 out_no_task:
1743 return ret;
1744 }
1745
1746 #ifdef CONFIG_SECURITY
1747 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
1748 size_t count, loff_t *ppos)
1749 {
1750 struct inode * inode = file->f_path.dentry->d_inode;
1751 char *p = NULL;
1752 ssize_t length;
1753 struct task_struct *task = get_proc_task(inode);
1754
1755 if (!task)
1756 return -ESRCH;
1757
1758 length = security_getprocattr(task,
1759 (char*)file->f_path.dentry->d_name.name,
1760 &p);
1761 put_task_struct(task);
1762 if (length > 0)
1763 length = simple_read_from_buffer(buf, count, ppos, p, length);
1764 kfree(p);
1765 return length;
1766 }
1767
1768 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
1769 size_t count, loff_t *ppos)
1770 {
1771 struct inode * inode = file->f_path.dentry->d_inode;
1772 char *page;
1773 ssize_t length;
1774 struct task_struct *task = get_proc_task(inode);
1775
1776 length = -ESRCH;
1777 if (!task)
1778 goto out_no_task;
1779 if (count > PAGE_SIZE)
1780 count = PAGE_SIZE;
1781
1782 /* No partial writes. */
1783 length = -EINVAL;
1784 if (*ppos != 0)
1785 goto out;
1786
1787 length = -ENOMEM;
1788 page = (char*)__get_free_page(GFP_TEMPORARY);
1789 if (!page)
1790 goto out;
1791
1792 length = -EFAULT;
1793 if (copy_from_user(page, buf, count))
1794 goto out_free;
1795
1796 length = security_setprocattr(task,
1797 (char*)file->f_path.dentry->d_name.name,
1798 (void*)page, count);
1799 out_free:
1800 free_page((unsigned long) page);
1801 out:
1802 put_task_struct(task);
1803 out_no_task:
1804 return length;
1805 }
1806
1807 static const struct file_operations proc_pid_attr_operations = {
1808 .read = proc_pid_attr_read,
1809 .write = proc_pid_attr_write,
1810 };
1811
1812 static const struct pid_entry attr_dir_stuff[] = {
1813 REG("current", S_IRUGO|S_IWUGO, pid_attr),
1814 REG("prev", S_IRUGO, pid_attr),
1815 REG("exec", S_IRUGO|S_IWUGO, pid_attr),
1816 REG("fscreate", S_IRUGO|S_IWUGO, pid_attr),
1817 REG("keycreate", S_IRUGO|S_IWUGO, pid_attr),
1818 REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr),
1819 };
1820
1821 static int proc_attr_dir_readdir(struct file * filp,
1822 void * dirent, filldir_t filldir)
1823 {
1824 return proc_pident_readdir(filp,dirent,filldir,
1825 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
1826 }
1827
1828 static const struct file_operations proc_attr_dir_operations = {
1829 .read = generic_read_dir,
1830 .readdir = proc_attr_dir_readdir,
1831 };
1832
1833 static struct dentry *proc_attr_dir_lookup(struct inode *dir,
1834 struct dentry *dentry, struct nameidata *nd)
1835 {
1836 return proc_pident_lookup(dir, dentry,
1837 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
1838 }
1839
1840 static const struct inode_operations proc_attr_dir_inode_operations = {
1841 .lookup = proc_attr_dir_lookup,
1842 .getattr = pid_getattr,
1843 .setattr = proc_setattr,
1844 };
1845
1846 #endif
1847
1848 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1849 static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
1850 size_t count, loff_t *ppos)
1851 {
1852 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
1853 struct mm_struct *mm;
1854 char buffer[PROC_NUMBUF];
1855 size_t len;
1856 int ret;
1857
1858 if (!task)
1859 return -ESRCH;
1860
1861 ret = 0;
1862 mm = get_task_mm(task);
1863 if (mm) {
1864 len = snprintf(buffer, sizeof(buffer), "%08lx\n",
1865 ((mm->flags & MMF_DUMP_FILTER_MASK) >>
1866 MMF_DUMP_FILTER_SHIFT));
1867 mmput(mm);
1868 ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
1869 }
1870
1871 put_task_struct(task);
1872
1873 return ret;
1874 }
1875
1876 static ssize_t proc_coredump_filter_write(struct file *file,
1877 const char __user *buf,
1878 size_t count,
1879 loff_t *ppos)
1880 {
1881 struct task_struct *task;
1882 struct mm_struct *mm;
1883 char buffer[PROC_NUMBUF], *end;
1884 unsigned int val;
1885 int ret;
1886 int i;
1887 unsigned long mask;
1888
1889 ret = -EFAULT;
1890 memset(buffer, 0, sizeof(buffer));
1891 if (count > sizeof(buffer) - 1)
1892 count = sizeof(buffer) - 1;
1893 if (copy_from_user(buffer, buf, count))
1894 goto out_no_task;
1895
1896 ret = -EINVAL;
1897 val = (unsigned int)simple_strtoul(buffer, &end, 0);
1898 if (*end == '\n')
1899 end++;
1900 if (end - buffer == 0)
1901 goto out_no_task;
1902
1903 ret = -ESRCH;
1904 task = get_proc_task(file->f_dentry->d_inode);
1905 if (!task)
1906 goto out_no_task;
1907
1908 ret = end - buffer;
1909 mm = get_task_mm(task);
1910 if (!mm)
1911 goto out_no_mm;
1912
1913 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
1914 if (val & mask)
1915 set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
1916 else
1917 clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
1918 }
1919
1920 mmput(mm);
1921 out_no_mm:
1922 put_task_struct(task);
1923 out_no_task:
1924 return ret;
1925 }
1926
1927 static const struct file_operations proc_coredump_filter_operations = {
1928 .read = proc_coredump_filter_read,
1929 .write = proc_coredump_filter_write,
1930 };
1931 #endif
1932
1933 /*
1934 * /proc/self:
1935 */
1936 static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
1937 int buflen)
1938 {
1939 char tmp[PROC_NUMBUF];
1940 sprintf(tmp, "%d", current->tgid);
1941 return vfs_readlink(dentry,buffer,buflen,tmp);
1942 }
1943
1944 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
1945 {
1946 char tmp[PROC_NUMBUF];
1947 sprintf(tmp, "%d", current->tgid);
1948 return ERR_PTR(vfs_follow_link(nd,tmp));
1949 }
1950
1951 static const struct inode_operations proc_self_inode_operations = {
1952 .readlink = proc_self_readlink,
1953 .follow_link = proc_self_follow_link,
1954 };
1955
1956 /*
1957 * proc base
1958 *
1959 * These are the directory entries in the root directory of /proc
1960 * that properly belong to the /proc filesystem, as they describe
1961 * describe something that is process related.
1962 */
1963 static const struct pid_entry proc_base_stuff[] = {
1964 NOD("self", S_IFLNK|S_IRWXUGO,
1965 &proc_self_inode_operations, NULL, {}),
1966 };
1967
1968 /*
1969 * Exceptional case: normally we are not allowed to unhash a busy
1970 * directory. In this case, however, we can do it - no aliasing problems
1971 * due to the way we treat inodes.
1972 */
1973 static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
1974 {
1975 struct inode *inode = dentry->d_inode;
1976 struct task_struct *task = get_proc_task(inode);
1977 if (task) {
1978 put_task_struct(task);
1979 return 1;
1980 }
1981 d_drop(dentry);
1982 return 0;
1983 }
1984
1985 static struct dentry_operations proc_base_dentry_operations =
1986 {
1987 .d_revalidate = proc_base_revalidate,
1988 .d_delete = pid_delete_dentry,
1989 };
1990
1991 static struct dentry *proc_base_instantiate(struct inode *dir,
1992 struct dentry *dentry, struct task_struct *task, const void *ptr)
1993 {
1994 const struct pid_entry *p = ptr;
1995 struct inode *inode;
1996 struct proc_inode *ei;
1997 struct dentry *error = ERR_PTR(-EINVAL);
1998
1999 /* Allocate the inode */
2000 error = ERR_PTR(-ENOMEM);
2001 inode = new_inode(dir->i_sb);
2002 if (!inode)
2003 goto out;
2004
2005 /* Initialize the inode */
2006 ei = PROC_I(inode);
2007 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
2008
2009 /*
2010 * grab the reference to the task.
2011 */
2012 ei->pid = get_task_pid(task, PIDTYPE_PID);
2013 if (!ei->pid)
2014 goto out_iput;
2015
2016 inode->i_uid = 0;
2017 inode->i_gid = 0;
2018 inode->i_mode = p->mode;
2019 if (S_ISDIR(inode->i_mode))
2020 inode->i_nlink = 2;
2021 if (S_ISLNK(inode->i_mode))
2022 inode->i_size = 64;
2023 if (p->iop)
2024 inode->i_op = p->iop;
2025 if (p->fop)
2026 inode->i_fop = p->fop;
2027 ei->op = p->op;
2028 dentry->d_op = &proc_base_dentry_operations;
2029 d_add(dentry, inode);
2030 error = NULL;
2031 out:
2032 return error;
2033 out_iput:
2034 iput(inode);
2035 goto out;
2036 }
2037
2038 static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
2039 {
2040 struct dentry *error;
2041 struct task_struct *task = get_proc_task(dir);
2042 const struct pid_entry *p, *last;
2043
2044 error = ERR_PTR(-ENOENT);
2045
2046 if (!task)
2047 goto out_no_task;
2048
2049 /* Lookup the directory entry */
2050 last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1];
2051 for (p = proc_base_stuff; p <= last; p++) {
2052 if (p->len != dentry->d_name.len)
2053 continue;
2054 if (!memcmp(dentry->d_name.name, p->name, p->len))
2055 break;
2056 }
2057 if (p > last)
2058 goto out;
2059
2060 error = proc_base_instantiate(dir, dentry, task, p);
2061
2062 out:
2063 put_task_struct(task);
2064 out_no_task:
2065 return error;
2066 }
2067
2068 static int proc_base_fill_cache(struct file *filp, void *dirent,
2069 filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
2070 {
2071 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
2072 proc_base_instantiate, task, p);
2073 }
2074
2075 #ifdef CONFIG_TASK_IO_ACCOUNTING
2076 static int proc_pid_io_accounting(struct task_struct *task, char *buffer)
2077 {
2078 return sprintf(buffer,
2079 #ifdef CONFIG_TASK_XACCT
2080 "rchar: %llu\n"
2081 "wchar: %llu\n"
2082 "syscr: %llu\n"
2083 "syscw: %llu\n"
2084 #endif
2085 "read_bytes: %llu\n"
2086 "write_bytes: %llu\n"
2087 "cancelled_write_bytes: %llu\n",
2088 #ifdef CONFIG_TASK_XACCT
2089 (unsigned long long)task->rchar,
2090 (unsigned long long)task->wchar,
2091 (unsigned long long)task->syscr,
2092 (unsigned long long)task->syscw,
2093 #endif
2094 (unsigned long long)task->ioac.read_bytes,
2095 (unsigned long long)task->ioac.write_bytes,
2096 (unsigned long long)task->ioac.cancelled_write_bytes);
2097 }
2098 #endif
2099
2100 /*
2101 * Thread groups
2102 */
2103 static const struct file_operations proc_task_operations;
2104 static const struct inode_operations proc_task_inode_operations;
2105
2106 static const struct pid_entry tgid_base_stuff[] = {
2107 DIR("task", S_IRUGO|S_IXUGO, task),
2108 DIR("fd", S_IRUSR|S_IXUSR, fd),
2109 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
2110 REG("environ", S_IRUSR, environ),
2111 INF("auxv", S_IRUSR, pid_auxv),
2112 INF("status", S_IRUGO, pid_status),
2113 #ifdef CONFIG_SCHED_DEBUG
2114 REG("sched", S_IRUGO|S_IWUSR, pid_sched),
2115 #endif
2116 INF("cmdline", S_IRUGO, pid_cmdline),
2117 INF("stat", S_IRUGO, tgid_stat),
2118 INF("statm", S_IRUGO, pid_statm),
2119 REG("maps", S_IRUGO, maps),
2120 #ifdef CONFIG_NUMA
2121 REG("numa_maps", S_IRUGO, numa_maps),
2122 #endif
2123 REG("mem", S_IRUSR|S_IWUSR, mem),
2124 LNK("cwd", cwd),
2125 LNK("root", root),
2126 LNK("exe", exe),
2127 REG("mounts", S_IRUGO, mounts),
2128 REG("mountstats", S_IRUSR, mountstats),
2129 #ifdef CONFIG_MMU
2130 REG("clear_refs", S_IWUSR, clear_refs),
2131 REG("smaps", S_IRUGO, smaps),
2132 #endif
2133 #ifdef CONFIG_SECURITY
2134 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
2135 #endif
2136 #ifdef CONFIG_KALLSYMS
2137 INF("wchan", S_IRUGO, pid_wchan),
2138 #endif
2139 #ifdef CONFIG_SCHEDSTATS
2140 INF("schedstat", S_IRUGO, pid_schedstat),
2141 #endif
2142 #ifdef CONFIG_PROC_PID_CPUSET
2143 REG("cpuset", S_IRUGO, cpuset),
2144 #endif
2145 #ifdef CONFIG_CGROUPS
2146 REG("cgroup", S_IRUGO, cgroup),
2147 #endif
2148 INF("oom_score", S_IRUGO, oom_score),
2149 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2150 #ifdef CONFIG_AUDITSYSCALL
2151 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2152 #endif
2153 #ifdef CONFIG_FAULT_INJECTION
2154 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
2155 #endif
2156 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
2157 REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter),
2158 #endif
2159 #ifdef CONFIG_TASK_IO_ACCOUNTING
2160 INF("io", S_IRUGO, pid_io_accounting),
2161 #endif
2162 };
2163
2164 static int proc_tgid_base_readdir(struct file * filp,
2165 void * dirent, filldir_t filldir)
2166 {
2167 return proc_pident_readdir(filp,dirent,filldir,
2168 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
2169 }
2170
2171 static const struct file_operations proc_tgid_base_operations = {
2172 .read = generic_read_dir,
2173 .readdir = proc_tgid_base_readdir,
2174 };
2175
2176 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
2177 return proc_pident_lookup(dir, dentry,
2178 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
2179 }
2180
2181 static const struct inode_operations proc_tgid_base_inode_operations = {
2182 .lookup = proc_tgid_base_lookup,
2183 .getattr = pid_getattr,
2184 .setattr = proc_setattr,
2185 };
2186
2187 /**
2188 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
2189 *
2190 * @task: task that should be flushed.
2191 *
2192 * Looks in the dcache for
2193 * /proc/@pid
2194 * /proc/@tgid/task/@pid
2195 * if either directory is present flushes it and all of it'ts children
2196 * from the dcache.
2197 *
2198 * It is safe and reasonable to cache /proc entries for a task until
2199 * that task exits. After that they just clog up the dcache with
2200 * useless entries, possibly causing useful dcache entries to be
2201 * flushed instead. This routine is proved to flush those useless
2202 * dcache entries at process exit time.
2203 *
2204 * NOTE: This routine is just an optimization so it does not guarantee
2205 * that no dcache entries will exist at process exit time it
2206 * just makes it very unlikely that any will persist.
2207 */
2208 static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
2209 {
2210 struct dentry *dentry, *leader, *dir;
2211 char buf[PROC_NUMBUF];
2212 struct qstr name;
2213
2214 name.name = buf;
2215 name.len = snprintf(buf, sizeof(buf), "%d", pid);
2216 dentry = d_hash_and_lookup(mnt->mnt_root, &name);
2217 if (dentry) {
2218 shrink_dcache_parent(dentry);
2219 d_drop(dentry);
2220 dput(dentry);
2221 }
2222
2223 if (tgid == 0)
2224 goto out;
2225
2226 name.name = buf;
2227 name.len = snprintf(buf, sizeof(buf), "%d", tgid);
2228 leader = d_hash_and_lookup(mnt->mnt_root, &name);
2229 if (!leader)
2230 goto out;
2231
2232 name.name = "task";
2233 name.len = strlen(name.name);
2234 dir = d_hash_and_lookup(leader, &name);
2235 if (!dir)
2236 goto out_put_leader;
2237
2238 name.name = buf;
2239 name.len = snprintf(buf, sizeof(buf), "%d", pid);
2240 dentry = d_hash_and_lookup(dir, &name);
2241 if (dentry) {
2242 shrink_dcache_parent(dentry);
2243 d_drop(dentry);
2244 dput(dentry);
2245 }
2246
2247 dput(dir);
2248 out_put_leader:
2249 dput(leader);
2250 out:
2251 return;
2252 }
2253
2254 /*
2255 * when flushing dentries from proc one need to flush them from global
2256 * proc (proc_mnt) and from all the namespaces' procs this task was seen
2257 * in. this call is supposed to make all this job.
2258 */
2259
2260 void proc_flush_task(struct task_struct *task)
2261 {
2262 proc_flush_task_mnt(proc_mnt, task->pid,
2263 thread_group_leader(task) ? 0 : task->tgid);
2264 }
2265
2266 static struct dentry *proc_pid_instantiate(struct inode *dir,
2267 struct dentry * dentry,
2268 struct task_struct *task, const void *ptr)
2269 {
2270 struct dentry *error = ERR_PTR(-ENOENT);
2271 struct inode *inode;
2272
2273 inode = proc_pid_make_inode(dir->i_sb, task);
2274 if (!inode)
2275 goto out;
2276
2277 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2278 inode->i_op = &proc_tgid_base_inode_operations;
2279 inode->i_fop = &proc_tgid_base_operations;
2280 inode->i_flags|=S_IMMUTABLE;
2281 inode->i_nlink = 5;
2282 #ifdef CONFIG_SECURITY
2283 inode->i_nlink += 1;
2284 #endif
2285
2286 dentry->d_op = &pid_dentry_operations;
2287
2288 d_add(dentry, inode);
2289 /* Close the race of the process dying before we return the dentry */
2290 if (pid_revalidate(dentry, NULL))
2291 error = NULL;
2292 out:
2293 return error;
2294 }
2295
2296 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2297 {
2298 struct dentry *result = ERR_PTR(-ENOENT);
2299 struct task_struct *task;
2300 unsigned tgid;
2301
2302 result = proc_base_lookup(dir, dentry);
2303 if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
2304 goto out;
2305
2306 tgid = name_to_int(dentry);
2307 if (tgid == ~0U)
2308 goto out;
2309
2310 rcu_read_lock();
2311 task = find_task_by_pid(tgid);
2312 if (task)
2313 get_task_struct(task);
2314 rcu_read_unlock();
2315 if (!task)
2316 goto out;
2317
2318 result = proc_pid_instantiate(dir, dentry, task, NULL);
2319 put_task_struct(task);
2320 out:
2321 return result;
2322 }
2323
2324 /*
2325 * Find the first task with tgid >= tgid
2326 *
2327 */
2328 static struct task_struct *next_tgid(unsigned int tgid)
2329 {
2330 struct task_struct *task;
2331 struct pid *pid;
2332
2333 rcu_read_lock();
2334 retry:
2335 task = NULL;
2336 pid = find_ge_pid(tgid, &init_pid_ns);
2337 if (pid) {
2338 tgid = pid->nr + 1;
2339 task = pid_task(pid, PIDTYPE_PID);
2340 /* What we to know is if the pid we have find is the
2341 * pid of a thread_group_leader. Testing for task
2342 * being a thread_group_leader is the obvious thing
2343 * todo but there is a window when it fails, due to
2344 * the pid transfer logic in de_thread.
2345 *
2346 * So we perform the straight forward test of seeing
2347 * if the pid we have found is the pid of a thread
2348 * group leader, and don't worry if the task we have
2349 * found doesn't happen to be a thread group leader.
2350 * As we don't care in the case of readdir.
2351 */
2352 if (!task || !has_group_leader_pid(task))
2353 goto retry;
2354 get_task_struct(task);
2355 }
2356 rcu_read_unlock();
2357 return task;
2358 }
2359
2360 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
2361
2362 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2363 struct task_struct *task, int tgid)
2364 {
2365 char name[PROC_NUMBUF];
2366 int len = snprintf(name, sizeof(name), "%d", tgid);
2367 return proc_fill_cache(filp, dirent, filldir, name, len,
2368 proc_pid_instantiate, task, NULL);
2369 }
2370
2371 /* for the /proc/ directory itself, after non-process stuff has been done */
2372 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
2373 {
2374 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
2375 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
2376 struct task_struct *task;
2377 int tgid;
2378
2379 if (!reaper)
2380 goto out_no_task;
2381
2382 for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) {
2383 const struct pid_entry *p = &proc_base_stuff[nr];
2384 if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0)
2385 goto out;
2386 }
2387
2388 tgid = filp->f_pos - TGID_OFFSET;
2389 for (task = next_tgid(tgid);
2390 task;
2391 put_task_struct(task), task = next_tgid(tgid + 1)) {
2392 tgid = task->pid;
2393 filp->f_pos = tgid + TGID_OFFSET;
2394 if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) {
2395 put_task_struct(task);
2396 goto out;
2397 }
2398 }
2399 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
2400 out:
2401 put_task_struct(reaper);
2402 out_no_task:
2403 return 0;
2404 }
2405
2406 /*
2407 * Tasks
2408 */
2409 static const struct pid_entry tid_base_stuff[] = {
2410 DIR("fd", S_IRUSR|S_IXUSR, fd),
2411 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
2412 REG("environ", S_IRUSR, environ),
2413 INF("auxv", S_IRUSR, pid_auxv),
2414 INF("status", S_IRUGO, pid_status),
2415 #ifdef CONFIG_SCHED_DEBUG
2416 REG("sched", S_IRUGO|S_IWUSR, pid_sched),
2417 #endif
2418 INF("cmdline", S_IRUGO, pid_cmdline),
2419 INF("stat", S_IRUGO, tid_stat),
2420 INF("statm", S_IRUGO, pid_statm),
2421 REG("maps", S_IRUGO, maps),
2422 #ifdef CONFIG_NUMA
2423 REG("numa_maps", S_IRUGO, numa_maps),
2424 #endif
2425 REG("mem", S_IRUSR|S_IWUSR, mem),
2426 LNK("cwd", cwd),
2427 LNK("root", root),
2428 LNK("exe", exe),
2429 REG("mounts", S_IRUGO, mounts),
2430 #ifdef CONFIG_MMU
2431 REG("clear_refs", S_IWUSR, clear_refs),
2432 REG("smaps", S_IRUGO, smaps),
2433 #endif
2434 #ifdef CONFIG_SECURITY
2435 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
2436 #endif
2437 #ifdef CONFIG_KALLSYMS
2438 INF("wchan", S_IRUGO, pid_wchan),
2439 #endif
2440 #ifdef CONFIG_SCHEDSTATS
2441 INF("schedstat", S_IRUGO, pid_schedstat),
2442 #endif
2443 #ifdef CONFIG_PROC_PID_CPUSET
2444 REG("cpuset", S_IRUGO, cpuset),
2445 #endif
2446 #ifdef CONFIG_CGROUPS
2447 REG("cgroup", S_IRUGO, cgroup),
2448 #endif
2449 INF("oom_score", S_IRUGO, oom_score),
2450 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2451 #ifdef CONFIG_AUDITSYSCALL
2452 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2453 #endif
2454 #ifdef CONFIG_FAULT_INJECTION
2455 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
2456 #endif
2457 };
2458
2459 static int proc_tid_base_readdir(struct file * filp,
2460 void * dirent, filldir_t filldir)
2461 {
2462 return proc_pident_readdir(filp,dirent,filldir,
2463 tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
2464 }
2465
2466 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
2467 return proc_pident_lookup(dir, dentry,
2468 tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
2469 }
2470
2471 static const struct file_operations proc_tid_base_operations = {
2472 .read = generic_read_dir,
2473 .readdir = proc_tid_base_readdir,
2474 };
2475
2476 static const struct inode_operations proc_tid_base_inode_operations = {
2477 .lookup = proc_tid_base_lookup,
2478 .getattr = pid_getattr,
2479 .setattr = proc_setattr,
2480 };
2481
2482 static struct dentry *proc_task_instantiate(struct inode *dir,
2483 struct dentry *dentry, struct task_struct *task, const void *ptr)
2484 {
2485 struct dentry *error = ERR_PTR(-ENOENT);
2486 struct inode *inode;
2487 inode = proc_pid_make_inode(dir->i_sb, task);
2488
2489 if (!inode)
2490 goto out;
2491 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2492 inode->i_op = &proc_tid_base_inode_operations;
2493 inode->i_fop = &proc_tid_base_operations;
2494 inode->i_flags|=S_IMMUTABLE;
2495 inode->i_nlink = 4;
2496 #ifdef CONFIG_SECURITY
2497 inode->i_nlink += 1;
2498 #endif
2499
2500 dentry->d_op = &pid_dentry_operations;
2501
2502 d_add(dentry, inode);
2503 /* Close the race of the process dying before we return the dentry */
2504 if (pid_revalidate(dentry, NULL))
2505 error = NULL;
2506 out:
2507 return error;
2508 }
2509
2510 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2511 {
2512 struct dentry *result = ERR_PTR(-ENOENT);
2513 struct task_struct *task;
2514 struct task_struct *leader = get_proc_task(dir);
2515 unsigned tid;
2516
2517 if (!leader)
2518 goto out_no_task;
2519
2520 tid = name_to_int(dentry);
2521 if (tid == ~0U)
2522 goto out;
2523
2524 rcu_read_lock();
2525 task = find_task_by_pid(tid);
2526 if (task)
2527 get_task_struct(task);
2528 rcu_read_unlock();
2529 if (!task)
2530 goto out;
2531 if (leader->tgid != task->tgid)
2532 goto out_drop_task;
2533
2534 result = proc_task_instantiate(dir, dentry, task, NULL);
2535 out_drop_task:
2536 put_task_struct(task);
2537 out:
2538 put_task_struct(leader);
2539 out_no_task:
2540 return result;
2541 }
2542
2543 /*
2544 * Find the first tid of a thread group to return to user space.
2545 *
2546 * Usually this is just the thread group leader, but if the users
2547 * buffer was too small or there was a seek into the middle of the
2548 * directory we have more work todo.
2549 *
2550 * In the case of a short read we start with find_task_by_pid.
2551 *
2552 * In the case of a seek we start with the leader and walk nr
2553 * threads past it.
2554 */
2555 static struct task_struct *first_tid(struct task_struct *leader,
2556 int tid, int nr)
2557 {
2558 struct task_struct *pos;
2559
2560 rcu_read_lock();
2561 /* Attempt to start with the pid of a thread */
2562 if (tid && (nr > 0)) {
2563 pos = find_task_by_pid(tid);
2564 if (pos && (pos->group_leader == leader))
2565 goto found;
2566 }
2567
2568 /* If nr exceeds the number of threads there is nothing todo */
2569 pos = NULL;
2570 if (nr && nr >= get_nr_threads(leader))
2571 goto out;
2572
2573 /* If we haven't found our starting place yet start
2574 * with the leader and walk nr threads forward.
2575 */
2576 for (pos = leader; nr > 0; --nr) {
2577 pos = next_thread(pos);
2578 if (pos == leader) {
2579 pos = NULL;
2580 goto out;
2581 }
2582 }
2583 found:
2584 get_task_struct(pos);
2585 out:
2586 rcu_read_unlock();
2587 return pos;
2588 }
2589
2590 /*
2591 * Find the next thread in the thread list.
2592 * Return NULL if there is an error or no next thread.
2593 *
2594 * The reference to the input task_struct is released.
2595 */
2596 static struct task_struct *next_tid(struct task_struct *start)
2597 {
2598 struct task_struct *pos = NULL;
2599 rcu_read_lock();
2600 if (pid_alive(start)) {
2601 pos = next_thread(start);
2602 if (thread_group_leader(pos))
2603 pos = NULL;
2604 else
2605 get_task_struct(pos);
2606 }
2607 rcu_read_unlock();
2608 put_task_struct(start);
2609 return pos;
2610 }
2611
2612 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2613 struct task_struct *task, int tid)
2614 {
2615 char name[PROC_NUMBUF];
2616 int len = snprintf(name, sizeof(name), "%d", tid);
2617 return proc_fill_cache(filp, dirent, filldir, name, len,
2618 proc_task_instantiate, task, NULL);
2619 }
2620
2621 /* for the /proc/TGID/task/ directories */
2622 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
2623 {
2624 struct dentry *dentry = filp->f_path.dentry;
2625 struct inode *inode = dentry->d_inode;
2626 struct task_struct *leader = NULL;
2627 struct task_struct *task;
2628 int retval = -ENOENT;
2629 ino_t ino;
2630 int tid;
2631 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */
2632
2633 task = get_proc_task(inode);
2634 if (!task)
2635 goto out_no_task;
2636 rcu_read_lock();
2637 if (pid_alive(task)) {
2638 leader = task->group_leader;
2639 get_task_struct(leader);
2640 }
2641 rcu_read_unlock();
2642 put_task_struct(task);
2643 if (!leader)
2644 goto out_no_task;
2645 retval = 0;
2646
2647 switch (pos) {
2648 case 0:
2649 ino = inode->i_ino;
2650 if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
2651 goto out;
2652 pos++;
2653 /* fall through */
2654 case 1:
2655 ino = parent_ino(dentry);
2656 if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
2657 goto out;
2658 pos++;
2659 /* fall through */
2660 }
2661
2662 /* f_version caches the tgid value that the last readdir call couldn't
2663 * return. lseek aka telldir automagically resets f_version to 0.
2664 */
2665 tid = (int)filp->f_version;
2666 filp->f_version = 0;
2667 for (task = first_tid(leader, tid, pos - 2);
2668 task;
2669 task = next_tid(task), pos++) {
2670 tid = task->pid;
2671 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
2672 /* returning this tgid failed, save it as the first
2673 * pid for the next readir call */
2674 filp->f_version = (u64)tid;
2675 put_task_struct(task);
2676 break;
2677 }
2678 }
2679 out:
2680 filp->f_pos = pos;
2681 put_task_struct(leader);
2682 out_no_task:
2683 return retval;
2684 }
2685
2686 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
2687 {
2688 struct inode *inode = dentry->d_inode;
2689 struct task_struct *p = get_proc_task(inode);
2690 generic_fillattr(inode, stat);
2691
2692 if (p) {
2693 rcu_read_lock();
2694 stat->nlink += get_nr_threads(p);
2695 rcu_read_unlock();
2696 put_task_struct(p);
2697 }
2698
2699 return 0;
2700 }
2701
2702 static const struct inode_operations proc_task_inode_operations = {
2703 .lookup = proc_task_lookup,
2704 .getattr = proc_task_getattr,
2705 .setattr = proc_setattr,
2706 };
2707
2708 static const struct file_operations proc_task_operations = {
2709 .read = generic_read_dir,
2710 .readdir = proc_task_readdir,
2711 };