Merge branch 'next' into for-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / proc / array.c
1 /*
2 * linux/fs/proc/array.c
3 *
4 * Copyright (C) 1992 by Linus Torvalds
5 * based on ideas by Darren Senn
6 *
7 * Fixes:
8 * Michael. K. Johnson: stat,statm extensions.
9 * <johnsonm@stolaf.edu>
10 *
11 * Pauline Middelink : Made cmdline,envline only break at '\0's, to
12 * make sure SET_PROCTITLE works. Also removed
13 * bad '!' which forced address recalculation for
14 * EVERY character on the current page.
15 * <middelin@polyware.iaf.nl>
16 *
17 * Danny ter Haar : added cpuinfo
18 * <dth@cistron.nl>
19 *
20 * Alessandro Rubini : profile extension.
21 * <rubini@ipvvis.unipv.it>
22 *
23 * Jeff Tranter : added BogoMips field to cpuinfo
24 * <Jeff_Tranter@Mitel.COM>
25 *
26 * Bruno Haible : remove 4K limit for the maps file
27 * <haible@ma2s2.mathematik.uni-karlsruhe.de>
28 *
29 * Yves Arrouye : remove removal of trailing spaces in get_array.
30 * <Yves.Arrouye@marin.fdn.fr>
31 *
32 * Jerome Forissier : added per-CPU time information to /proc/stat
33 * and /proc/<pid>/cpu extension
34 * <forissier@isia.cma.fr>
35 * - Incorporation and non-SMP safe operation
36 * of forissier patch in 2.1.78 by
37 * Hans Marcus <crowbar@concepts.nl>
38 *
39 * aeb@cwi.nl : /proc/partitions
40 *
41 *
42 * Alan Cox : security fixes.
43 * <alan@lxorguk.ukuu.org.uk>
44 *
45 * Al Viro : safe handling of mm_struct
46 *
47 * Gerhard Wichert : added BIGMEM support
48 * Siemens AG <Gerhard.Wichert@pdb.siemens.de>
49 *
50 * Al Viro & Jeff Garzik : moved most of the thing into base.c and
51 * : proc_misc.c. The rest may eventually go into
52 * : base.c too.
53 */
54
55 #include <linux/types.h>
56 #include <linux/errno.h>
57 #include <linux/time.h>
58 #include <linux/kernel.h>
59 #include <linux/kernel_stat.h>
60 #include <linux/tty.h>
61 #include <linux/string.h>
62 #include <linux/mman.h>
63 #include <linux/proc_fs.h>
64 #include <linux/ioport.h>
65 #include <linux/uaccess.h>
66 #include <linux/io.h>
67 #include <linux/mm.h>
68 #include <linux/hugetlb.h>
69 #include <linux/pagemap.h>
70 #include <linux/swap.h>
71 #include <linux/smp.h>
72 #include <linux/signal.h>
73 #include <linux/highmem.h>
74 #include <linux/file.h>
75 #include <linux/fdtable.h>
76 #include <linux/times.h>
77 #include <linux/cpuset.h>
78 #include <linux/rcupdate.h>
79 #include <linux/delayacct.h>
80 #include <linux/seq_file.h>
81 #include <linux/pid_namespace.h>
82 #include <linux/ptrace.h>
83 #include <linux/tracehook.h>
84 #include <linux/user_namespace.h>
85
86 #include <asm/pgtable.h>
87 #include <asm/processor.h>
88 #include "internal.h"
89
90 static inline void task_name(struct seq_file *m, struct task_struct *p)
91 {
92 int i;
93 char *buf, *end;
94 char *name;
95 char tcomm[sizeof(p->comm)];
96
97 get_task_comm(tcomm, p);
98
99 seq_puts(m, "Name:\t");
100 end = m->buf + m->size;
101 buf = m->buf + m->count;
102 name = tcomm;
103 i = sizeof(tcomm);
104 while (i && (buf < end)) {
105 unsigned char c = *name;
106 name++;
107 i--;
108 *buf = c;
109 if (!c)
110 break;
111 if (c == '\\') {
112 buf++;
113 if (buf < end)
114 *buf++ = c;
115 continue;
116 }
117 if (c == '\n') {
118 *buf++ = '\\';
119 if (buf < end)
120 *buf++ = 'n';
121 continue;
122 }
123 buf++;
124 }
125 m->count = buf - m->buf;
126 seq_putc(m, '\n');
127 }
128
129 /*
130 * The task state array is a strange "bitmap" of
131 * reasons to sleep. Thus "running" is zero, and
132 * you can test for combinations of others with
133 * simple bit tests.
134 */
135 static const char * const task_state_array[] = {
136 "R (running)", /* 0 */
137 "S (sleeping)", /* 1 */
138 "D (disk sleep)", /* 2 */
139 "T (stopped)", /* 4 */
140 "t (tracing stop)", /* 8 */
141 "Z (zombie)", /* 16 */
142 "X (dead)", /* 32 */
143 "x (dead)", /* 64 */
144 "K (wakekill)", /* 128 */
145 "W (waking)", /* 256 */
146 };
147
148 static inline const char *get_task_state(struct task_struct *tsk)
149 {
150 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
151 const char * const *p = &task_state_array[0];
152
153 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
154
155 while (state) {
156 p++;
157 state >>= 1;
158 }
159 return *p;
160 }
161
162 static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
163 struct pid *pid, struct task_struct *p)
164 {
165 struct user_namespace *user_ns = seq_user_ns(m);
166 struct group_info *group_info;
167 int g;
168 struct fdtable *fdt = NULL;
169 const struct cred *cred;
170 pid_t ppid, tpid;
171
172 rcu_read_lock();
173 ppid = pid_alive(p) ?
174 task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
175 tpid = 0;
176 if (pid_alive(p)) {
177 struct task_struct *tracer = ptrace_parent(p);
178 if (tracer)
179 tpid = task_pid_nr_ns(tracer, ns);
180 }
181 cred = get_task_cred(p);
182 seq_printf(m,
183 "State:\t%s\n"
184 "Tgid:\t%d\n"
185 "Pid:\t%d\n"
186 "PPid:\t%d\n"
187 "TracerPid:\t%d\n"
188 "Uid:\t%d\t%d\t%d\t%d\n"
189 "Gid:\t%d\t%d\t%d\t%d\n",
190 get_task_state(p),
191 task_tgid_nr_ns(p, ns),
192 pid_nr_ns(pid, ns),
193 ppid, tpid,
194 from_kuid_munged(user_ns, cred->uid),
195 from_kuid_munged(user_ns, cred->euid),
196 from_kuid_munged(user_ns, cred->suid),
197 from_kuid_munged(user_ns, cred->fsuid),
198 from_kgid_munged(user_ns, cred->gid),
199 from_kgid_munged(user_ns, cred->egid),
200 from_kgid_munged(user_ns, cred->sgid),
201 from_kgid_munged(user_ns, cred->fsgid));
202
203 task_lock(p);
204 if (p->files)
205 fdt = files_fdtable(p->files);
206 seq_printf(m,
207 "FDSize:\t%d\n"
208 "Groups:\t",
209 fdt ? fdt->max_fds : 0);
210 rcu_read_unlock();
211
212 group_info = cred->group_info;
213 task_unlock(p);
214
215 for (g = 0; g < group_info->ngroups; g++)
216 seq_printf(m, "%d ",
217 from_kgid_munged(user_ns, GROUP_AT(group_info, g)));
218 put_cred(cred);
219
220 seq_putc(m, '\n');
221 }
222
223 void render_sigset_t(struct seq_file *m, const char *header,
224 sigset_t *set)
225 {
226 int i;
227
228 seq_puts(m, header);
229
230 i = _NSIG;
231 do {
232 int x = 0;
233
234 i -= 4;
235 if (sigismember(set, i+1)) x |= 1;
236 if (sigismember(set, i+2)) x |= 2;
237 if (sigismember(set, i+3)) x |= 4;
238 if (sigismember(set, i+4)) x |= 8;
239 seq_printf(m, "%x", x);
240 } while (i >= 4);
241
242 seq_putc(m, '\n');
243 }
244
245 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
246 sigset_t *catch)
247 {
248 struct k_sigaction *k;
249 int i;
250
251 k = p->sighand->action;
252 for (i = 1; i <= _NSIG; ++i, ++k) {
253 if (k->sa.sa_handler == SIG_IGN)
254 sigaddset(ign, i);
255 else if (k->sa.sa_handler != SIG_DFL)
256 sigaddset(catch, i);
257 }
258 }
259
260 static inline void task_sig(struct seq_file *m, struct task_struct *p)
261 {
262 unsigned long flags;
263 sigset_t pending, shpending, blocked, ignored, caught;
264 int num_threads = 0;
265 unsigned long qsize = 0;
266 unsigned long qlim = 0;
267
268 sigemptyset(&pending);
269 sigemptyset(&shpending);
270 sigemptyset(&blocked);
271 sigemptyset(&ignored);
272 sigemptyset(&caught);
273
274 if (lock_task_sighand(p, &flags)) {
275 pending = p->pending.signal;
276 shpending = p->signal->shared_pending.signal;
277 blocked = p->blocked;
278 collect_sigign_sigcatch(p, &ignored, &caught);
279 num_threads = get_nr_threads(p);
280 rcu_read_lock(); /* FIXME: is this correct? */
281 qsize = atomic_read(&__task_cred(p)->user->sigpending);
282 rcu_read_unlock();
283 qlim = task_rlimit(p, RLIMIT_SIGPENDING);
284 unlock_task_sighand(p, &flags);
285 }
286
287 seq_printf(m, "Threads:\t%d\n", num_threads);
288 seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
289
290 /* render them all */
291 render_sigset_t(m, "SigPnd:\t", &pending);
292 render_sigset_t(m, "ShdPnd:\t", &shpending);
293 render_sigset_t(m, "SigBlk:\t", &blocked);
294 render_sigset_t(m, "SigIgn:\t", &ignored);
295 render_sigset_t(m, "SigCgt:\t", &caught);
296 }
297
298 static void render_cap_t(struct seq_file *m, const char *header,
299 kernel_cap_t *a)
300 {
301 unsigned __capi;
302
303 seq_puts(m, header);
304 CAP_FOR_EACH_U32(__capi) {
305 seq_printf(m, "%08x",
306 a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
307 }
308 seq_putc(m, '\n');
309 }
310
311 /* Remove non-existent capabilities */
312 #define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
313 CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
314
315 static inline void task_cap(struct seq_file *m, struct task_struct *p)
316 {
317 const struct cred *cred;
318 kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset;
319
320 rcu_read_lock();
321 cred = __task_cred(p);
322 cap_inheritable = cred->cap_inheritable;
323 cap_permitted = cred->cap_permitted;
324 cap_effective = cred->cap_effective;
325 cap_bset = cred->cap_bset;
326 rcu_read_unlock();
327
328 NORM_CAPS(cap_inheritable);
329 NORM_CAPS(cap_permitted);
330 NORM_CAPS(cap_effective);
331 NORM_CAPS(cap_bset);
332
333 render_cap_t(m, "CapInh:\t", &cap_inheritable);
334 render_cap_t(m, "CapPrm:\t", &cap_permitted);
335 render_cap_t(m, "CapEff:\t", &cap_effective);
336 render_cap_t(m, "CapBnd:\t", &cap_bset);
337 }
338
339 static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
340 {
341 #ifdef CONFIG_SECCOMP
342 seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
343 #endif
344 }
345
346 static inline void task_context_switch_counts(struct seq_file *m,
347 struct task_struct *p)
348 {
349 seq_printf(m, "voluntary_ctxt_switches:\t%lu\n"
350 "nonvoluntary_ctxt_switches:\t%lu\n",
351 p->nvcsw,
352 p->nivcsw);
353 }
354
355 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
356 {
357 seq_puts(m, "Cpus_allowed:\t");
358 seq_cpumask(m, &task->cpus_allowed);
359 seq_putc(m, '\n');
360 seq_puts(m, "Cpus_allowed_list:\t");
361 seq_cpumask_list(m, &task->cpus_allowed);
362 seq_putc(m, '\n');
363 }
364
365 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
366 struct pid *pid, struct task_struct *task)
367 {
368 struct mm_struct *mm = get_task_mm(task);
369
370 task_name(m, task);
371 task_state(m, ns, pid, task);
372
373 if (mm) {
374 task_mem(m, mm);
375 mmput(mm);
376 }
377 task_sig(m, task);
378 task_cap(m, task);
379 task_seccomp(m, task);
380 task_cpus_allowed(m, task);
381 cpuset_task_status_allowed(m, task);
382 task_context_switch_counts(m, task);
383 return 0;
384 }
385
386 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
387 struct pid *pid, struct task_struct *task, int whole)
388 {
389 unsigned long vsize, eip, esp, wchan = ~0UL;
390 int priority, nice;
391 int tty_pgrp = -1, tty_nr = 0;
392 sigset_t sigign, sigcatch;
393 char state;
394 pid_t ppid = 0, pgid = -1, sid = -1;
395 int num_threads = 0;
396 int permitted;
397 struct mm_struct *mm;
398 unsigned long long start_time;
399 unsigned long cmin_flt = 0, cmaj_flt = 0;
400 unsigned long min_flt = 0, maj_flt = 0;
401 cputime_t cutime, cstime, utime, stime;
402 cputime_t cgtime, gtime;
403 unsigned long rsslim = 0;
404 char tcomm[sizeof(task->comm)];
405 unsigned long flags;
406
407 state = *get_task_state(task);
408 vsize = eip = esp = 0;
409 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
410 mm = get_task_mm(task);
411 if (mm) {
412 vsize = task_vsize(mm);
413 if (permitted) {
414 eip = KSTK_EIP(task);
415 esp = KSTK_ESP(task);
416 }
417 }
418
419 get_task_comm(tcomm, task);
420
421 sigemptyset(&sigign);
422 sigemptyset(&sigcatch);
423 cutime = cstime = utime = stime = 0;
424 cgtime = gtime = 0;
425
426 if (lock_task_sighand(task, &flags)) {
427 struct signal_struct *sig = task->signal;
428
429 if (sig->tty) {
430 struct pid *pgrp = tty_get_pgrp(sig->tty);
431 tty_pgrp = pid_nr_ns(pgrp, ns);
432 put_pid(pgrp);
433 tty_nr = new_encode_dev(tty_devnum(sig->tty));
434 }
435
436 num_threads = get_nr_threads(task);
437 collect_sigign_sigcatch(task, &sigign, &sigcatch);
438
439 cmin_flt = sig->cmin_flt;
440 cmaj_flt = sig->cmaj_flt;
441 cutime = sig->cutime;
442 cstime = sig->cstime;
443 cgtime = sig->cgtime;
444 rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
445
446 /* add up live thread stats at the group level */
447 if (whole) {
448 struct task_struct *t = task;
449 do {
450 min_flt += t->min_flt;
451 maj_flt += t->maj_flt;
452 gtime += task_gtime(t);
453 t = next_thread(t);
454 } while (t != task);
455
456 min_flt += sig->min_flt;
457 maj_flt += sig->maj_flt;
458 thread_group_cputime_adjusted(task, &utime, &stime);
459 gtime += sig->gtime;
460 }
461
462 sid = task_session_nr_ns(task, ns);
463 ppid = task_tgid_nr_ns(task->real_parent, ns);
464 pgid = task_pgrp_nr_ns(task, ns);
465
466 unlock_task_sighand(task, &flags);
467 }
468
469 if (permitted && (!whole || num_threads < 2))
470 wchan = get_wchan(task);
471 if (!whole) {
472 min_flt = task->min_flt;
473 maj_flt = task->maj_flt;
474 task_cputime_adjusted(task, &utime, &stime);
475 gtime = task_gtime(task);
476 }
477
478 /* scale priority and nice values from timeslices to -20..20 */
479 /* to make it look like a "normal" Unix priority/nice value */
480 priority = task_prio(task);
481 nice = task_nice(task);
482
483 /* Temporary variable needed for gcc-2.96 */
484 /* convert timespec -> nsec*/
485 start_time =
486 (unsigned long long)task->real_start_time.tv_sec * NSEC_PER_SEC
487 + task->real_start_time.tv_nsec;
488 /* convert nsec -> ticks */
489 start_time = nsec_to_clock_t(start_time);
490
491 seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
492 seq_put_decimal_ll(m, ' ', ppid);
493 seq_put_decimal_ll(m, ' ', pgid);
494 seq_put_decimal_ll(m, ' ', sid);
495 seq_put_decimal_ll(m, ' ', tty_nr);
496 seq_put_decimal_ll(m, ' ', tty_pgrp);
497 seq_put_decimal_ull(m, ' ', task->flags);
498 seq_put_decimal_ull(m, ' ', min_flt);
499 seq_put_decimal_ull(m, ' ', cmin_flt);
500 seq_put_decimal_ull(m, ' ', maj_flt);
501 seq_put_decimal_ull(m, ' ', cmaj_flt);
502 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime));
503 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime));
504 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime));
505 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime));
506 seq_put_decimal_ll(m, ' ', priority);
507 seq_put_decimal_ll(m, ' ', nice);
508 seq_put_decimal_ll(m, ' ', num_threads);
509 seq_put_decimal_ull(m, ' ', 0);
510 seq_put_decimal_ull(m, ' ', start_time);
511 seq_put_decimal_ull(m, ' ', vsize);
512 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
513 seq_put_decimal_ull(m, ' ', rsslim);
514 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
515 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
516 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
517 seq_put_decimal_ull(m, ' ', esp);
518 seq_put_decimal_ull(m, ' ', eip);
519 /* The signal information here is obsolete.
520 * It must be decimal for Linux 2.0 compatibility.
521 * Use /proc/#/status for real-time signals.
522 */
523 seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL);
524 seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
525 seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
526 seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
527 seq_put_decimal_ull(m, ' ', wchan);
528 seq_put_decimal_ull(m, ' ', 0);
529 seq_put_decimal_ull(m, ' ', 0);
530 seq_put_decimal_ll(m, ' ', task->exit_signal);
531 seq_put_decimal_ll(m, ' ', task_cpu(task));
532 seq_put_decimal_ull(m, ' ', task->rt_priority);
533 seq_put_decimal_ull(m, ' ', task->policy);
534 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
535 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
536 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
537
538 if (mm && permitted) {
539 seq_put_decimal_ull(m, ' ', mm->start_data);
540 seq_put_decimal_ull(m, ' ', mm->end_data);
541 seq_put_decimal_ull(m, ' ', mm->start_brk);
542 seq_put_decimal_ull(m, ' ', mm->arg_start);
543 seq_put_decimal_ull(m, ' ', mm->arg_end);
544 seq_put_decimal_ull(m, ' ', mm->env_start);
545 seq_put_decimal_ull(m, ' ', mm->env_end);
546 } else
547 seq_printf(m, " 0 0 0 0 0 0 0");
548
549 if (permitted)
550 seq_put_decimal_ll(m, ' ', task->exit_code);
551 else
552 seq_put_decimal_ll(m, ' ', 0);
553
554 seq_putc(m, '\n');
555 if (mm)
556 mmput(mm);
557 return 0;
558 }
559
560 int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
561 struct pid *pid, struct task_struct *task)
562 {
563 return do_task_stat(m, ns, pid, task, 0);
564 }
565
566 int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
567 struct pid *pid, struct task_struct *task)
568 {
569 return do_task_stat(m, ns, pid, task, 1);
570 }
571
572 int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
573 struct pid *pid, struct task_struct *task)
574 {
575 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
576 struct mm_struct *mm = get_task_mm(task);
577
578 if (mm) {
579 size = task_statm(mm, &shared, &text, &data, &resident);
580 mmput(mm);
581 }
582 /*
583 * For quick read, open code by putting numbers directly
584 * expected format is
585 * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
586 * size, resident, shared, text, data);
587 */
588 seq_put_decimal_ull(m, 0, size);
589 seq_put_decimal_ull(m, ' ', resident);
590 seq_put_decimal_ull(m, ' ', shared);
591 seq_put_decimal_ull(m, ' ', text);
592 seq_put_decimal_ull(m, ' ', 0);
593 seq_put_decimal_ull(m, ' ', data);
594 seq_put_decimal_ull(m, ' ', 0);
595 seq_putc(m, '\n');
596
597 return 0;
598 }
599
600 #ifdef CONFIG_CHECKPOINT_RESTORE
601 static struct pid *
602 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
603 {
604 struct task_struct *start, *task;
605 struct pid *pid = NULL;
606
607 read_lock(&tasklist_lock);
608
609 start = pid_task(proc_pid(inode), PIDTYPE_PID);
610 if (!start)
611 goto out;
612
613 /*
614 * Lets try to continue searching first, this gives
615 * us significant speedup on children-rich processes.
616 */
617 if (pid_prev) {
618 task = pid_task(pid_prev, PIDTYPE_PID);
619 if (task && task->real_parent == start &&
620 !(list_empty(&task->sibling))) {
621 if (list_is_last(&task->sibling, &start->children))
622 goto out;
623 task = list_first_entry(&task->sibling,
624 struct task_struct, sibling);
625 pid = get_pid(task_pid(task));
626 goto out;
627 }
628 }
629
630 /*
631 * Slow search case.
632 *
633 * We might miss some children here if children
634 * are exited while we were not holding the lock,
635 * but it was never promised to be accurate that
636 * much.
637 *
638 * "Just suppose that the parent sleeps, but N children
639 * exit after we printed their tids. Now the slow paths
640 * skips N extra children, we miss N tasks." (c)
641 *
642 * So one need to stop or freeze the leader and all
643 * its children to get a precise result.
644 */
645 list_for_each_entry(task, &start->children, sibling) {
646 if (pos-- == 0) {
647 pid = get_pid(task_pid(task));
648 break;
649 }
650 }
651
652 out:
653 read_unlock(&tasklist_lock);
654 return pid;
655 }
656
657 static int children_seq_show(struct seq_file *seq, void *v)
658 {
659 struct inode *inode = seq->private;
660 pid_t pid;
661
662 pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
663 return seq_printf(seq, "%d ", pid);
664 }
665
666 static void *children_seq_start(struct seq_file *seq, loff_t *pos)
667 {
668 return get_children_pid(seq->private, NULL, *pos);
669 }
670
671 static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos)
672 {
673 struct pid *pid;
674
675 pid = get_children_pid(seq->private, v, *pos + 1);
676 put_pid(v);
677
678 ++*pos;
679 return pid;
680 }
681
682 static void children_seq_stop(struct seq_file *seq, void *v)
683 {
684 put_pid(v);
685 }
686
687 static const struct seq_operations children_seq_ops = {
688 .start = children_seq_start,
689 .next = children_seq_next,
690 .stop = children_seq_stop,
691 .show = children_seq_show,
692 };
693
694 static int children_seq_open(struct inode *inode, struct file *file)
695 {
696 struct seq_file *m;
697 int ret;
698
699 ret = seq_open(file, &children_seq_ops);
700 if (ret)
701 return ret;
702
703 m = file->private_data;
704 m->private = inode;
705
706 return ret;
707 }
708
709 int children_seq_release(struct inode *inode, struct file *file)
710 {
711 seq_release(inode, file);
712 return 0;
713 }
714
715 const struct file_operations proc_tid_children_operations = {
716 .open = children_seq_open,
717 .read = seq_read,
718 .llseek = seq_lseek,
719 .release = children_seq_release,
720 };
721 #endif /* CONFIG_CHECKPOINT_RESTORE */