[COMMON] scsi: ufs: Add reset for PA_ERROR interrupt storming control.
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / misc / uid_sys_stats.c
1 /* drivers/misc/uid_sys_stats.c
2 *
3 * Copyright (C) 2014 - 2015 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16 #include <linux/atomic.h>
17 #include <linux/cpufreq_times.h>
18 #include <linux/err.h>
19 #include <linux/hashtable.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/mm.h>
24 #include <linux/proc_fs.h>
25 #include <linux/profile.h>
26 #include <linux/rtmutex.h>
27 #include <linux/sched/cputime.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31
32
33 #define UID_HASH_BITS 10
34 DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
35
36 static DEFINE_RT_MUTEX(uid_lock);
37 static struct proc_dir_entry *cpu_parent;
38 static struct proc_dir_entry *io_parent;
39 static struct proc_dir_entry *proc_parent;
40
41 struct io_stats {
42 u64 read_bytes;
43 u64 write_bytes;
44 u64 rchar;
45 u64 wchar;
46 u64 fsync;
47 };
48
49 #define UID_STATE_FOREGROUND 0
50 #define UID_STATE_BACKGROUND 1
51 #define UID_STATE_BUCKET_SIZE 2
52
53 #define UID_STATE_TOTAL_CURR 2
54 #define UID_STATE_TOTAL_LAST 3
55 #define UID_STATE_DEAD_TASKS 4
56 #define UID_STATE_SIZE 5
57
58 #define MAX_TASK_COMM_LEN 256
59
60 struct task_entry {
61 char comm[MAX_TASK_COMM_LEN];
62 pid_t pid;
63 struct io_stats io[UID_STATE_SIZE];
64 struct hlist_node hash;
65 };
66
67 struct uid_entry {
68 uid_t uid;
69 u64 utime;
70 u64 stime;
71 u64 active_utime;
72 u64 active_stime;
73 int state;
74 struct io_stats io[UID_STATE_SIZE];
75 struct hlist_node hash;
76 #ifdef CONFIG_UID_SYS_STATS_DEBUG
77 DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
78 #endif
79 };
80
81 static u64 compute_write_bytes(struct task_struct *task)
82 {
83 if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
84 return 0;
85
86 return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
87 }
88
89 static void compute_io_bucket_stats(struct io_stats *io_bucket,
90 struct io_stats *io_curr,
91 struct io_stats *io_last,
92 struct io_stats *io_dead)
93 {
94 /* tasks could switch to another uid group, but its io_last in the
95 * previous uid group could still be positive.
96 * therefore before each update, do an overflow check first
97 */
98 int64_t delta;
99
100 delta = io_curr->read_bytes + io_dead->read_bytes -
101 io_last->read_bytes;
102 io_bucket->read_bytes += delta > 0 ? delta : 0;
103 delta = io_curr->write_bytes + io_dead->write_bytes -
104 io_last->write_bytes;
105 io_bucket->write_bytes += delta > 0 ? delta : 0;
106 delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
107 io_bucket->rchar += delta > 0 ? delta : 0;
108 delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
109 io_bucket->wchar += delta > 0 ? delta : 0;
110 delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
111 io_bucket->fsync += delta > 0 ? delta : 0;
112
113 io_last->read_bytes = io_curr->read_bytes;
114 io_last->write_bytes = io_curr->write_bytes;
115 io_last->rchar = io_curr->rchar;
116 io_last->wchar = io_curr->wchar;
117 io_last->fsync = io_curr->fsync;
118
119 memset(io_dead, 0, sizeof(struct io_stats));
120 }
121
122 #ifdef CONFIG_UID_SYS_STATS_DEBUG
123 static void get_full_task_comm(struct task_entry *task_entry,
124 struct task_struct *task)
125 {
126 int i = 0, offset = 0, len = 0;
127 /* save one byte for terminating null character */
128 int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
129 char buf[unused_len];
130 struct mm_struct *mm = task->mm;
131
132 /* fill the first TASK_COMM_LEN bytes with thread name */
133 __get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
134 i = strlen(task_entry->comm);
135 while (i < TASK_COMM_LEN)
136 task_entry->comm[i++] = ' ';
137
138 /* next the executable file name */
139 if (mm) {
140 down_read(&mm->mmap_sem);
141 if (mm->exe_file) {
142 char *pathname = d_path(&mm->exe_file->f_path, buf,
143 unused_len);
144
145 if (!IS_ERR(pathname)) {
146 len = strlcpy(task_entry->comm + i, pathname,
147 unused_len);
148 i += len;
149 task_entry->comm[i++] = ' ';
150 unused_len--;
151 }
152 }
153 up_read(&mm->mmap_sem);
154 }
155 unused_len -= len;
156
157 /* fill the rest with command line argument
158 * replace each null or new line character
159 * between args in argv with whitespace */
160 len = get_cmdline(task, buf, unused_len);
161 while (offset < len) {
162 if (buf[offset] != '\0' && buf[offset] != '\n')
163 task_entry->comm[i++] = buf[offset];
164 else
165 task_entry->comm[i++] = ' ';
166 offset++;
167 }
168
169 /* get rid of trailing whitespaces in case when arg is memset to
170 * zero before being reset in userspace
171 */
172 while (task_entry->comm[i-1] == ' ')
173 i--;
174 task_entry->comm[i] = '\0';
175 }
176
177 static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
178 struct task_struct *task)
179 {
180 struct task_entry *task_entry;
181
182 hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
183 task->pid) {
184 if (task->pid == task_entry->pid) {
185 /* if thread name changed, update the entire command */
186 int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
187 - task_entry->comm;
188
189 if (strncmp(task_entry->comm, task->comm, len))
190 get_full_task_comm(task_entry, task);
191 return task_entry;
192 }
193 }
194 return NULL;
195 }
196
197 static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
198 struct task_struct *task)
199 {
200 struct task_entry *task_entry;
201 pid_t pid = task->pid;
202
203 task_entry = find_task_entry(uid_entry, task);
204 if (task_entry)
205 return task_entry;
206
207 task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
208 if (!task_entry)
209 return NULL;
210
211 get_full_task_comm(task_entry, task);
212
213 task_entry->pid = pid;
214 hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
215
216 return task_entry;
217 }
218
219 static void remove_uid_tasks(struct uid_entry *uid_entry)
220 {
221 struct task_entry *task_entry;
222 unsigned long bkt_task;
223 struct hlist_node *tmp_task;
224
225 hash_for_each_safe(uid_entry->task_entries, bkt_task,
226 tmp_task, task_entry, hash) {
227 hash_del(&task_entry->hash);
228 kfree(task_entry);
229 }
230 }
231
232 static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
233 {
234 struct task_entry *task_entry;
235 unsigned long bkt_task;
236
237 hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
238 memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
239 sizeof(struct io_stats));
240 }
241 }
242
243 static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
244 struct task_struct *task, int slot)
245 {
246 struct task_entry *task_entry = find_or_register_task(uid_entry, task);
247 struct io_stats *task_io_slot = &task_entry->io[slot];
248
249 task_io_slot->read_bytes += task->ioac.read_bytes;
250 task_io_slot->write_bytes += compute_write_bytes(task);
251 task_io_slot->rchar += task->ioac.rchar;
252 task_io_slot->wchar += task->ioac.wchar;
253 task_io_slot->fsync += task->ioac.syscfs;
254 }
255
256 static void compute_io_uid_tasks(struct uid_entry *uid_entry)
257 {
258 struct task_entry *task_entry;
259 unsigned long bkt_task;
260
261 hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
262 compute_io_bucket_stats(&task_entry->io[uid_entry->state],
263 &task_entry->io[UID_STATE_TOTAL_CURR],
264 &task_entry->io[UID_STATE_TOTAL_LAST],
265 &task_entry->io[UID_STATE_DEAD_TASKS]);
266 }
267 }
268
269 static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
270 {
271 struct task_entry *task_entry;
272 unsigned long bkt_task;
273
274 hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
275 /* Separated by comma because space exists in task comm */
276 seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
277 task_entry->comm,
278 (unsigned long)task_entry->pid,
279 task_entry->io[UID_STATE_FOREGROUND].rchar,
280 task_entry->io[UID_STATE_FOREGROUND].wchar,
281 task_entry->io[UID_STATE_FOREGROUND].read_bytes,
282 task_entry->io[UID_STATE_FOREGROUND].write_bytes,
283 task_entry->io[UID_STATE_BACKGROUND].rchar,
284 task_entry->io[UID_STATE_BACKGROUND].wchar,
285 task_entry->io[UID_STATE_BACKGROUND].read_bytes,
286 task_entry->io[UID_STATE_BACKGROUND].write_bytes,
287 task_entry->io[UID_STATE_FOREGROUND].fsync,
288 task_entry->io[UID_STATE_BACKGROUND].fsync);
289 }
290 }
291 #else
292 static void remove_uid_tasks(struct uid_entry *uid_entry) {};
293 static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
294 static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
295 struct task_struct *task, int slot) {};
296 static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
297 static void show_io_uid_tasks(struct seq_file *m,
298 struct uid_entry *uid_entry) {}
299 #endif
300
301 static struct uid_entry *find_uid_entry(uid_t uid)
302 {
303 struct uid_entry *uid_entry;
304 hash_for_each_possible(hash_table, uid_entry, hash, uid) {
305 if (uid_entry->uid == uid)
306 return uid_entry;
307 }
308 return NULL;
309 }
310
311 static struct uid_entry *find_or_register_uid(uid_t uid)
312 {
313 struct uid_entry *uid_entry;
314
315 uid_entry = find_uid_entry(uid);
316 if (uid_entry)
317 return uid_entry;
318
319 uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
320 if (!uid_entry)
321 return NULL;
322
323 uid_entry->uid = uid;
324 #ifdef CONFIG_UID_SYS_STATS_DEBUG
325 hash_init(uid_entry->task_entries);
326 #endif
327 hash_add(hash_table, &uid_entry->hash, uid);
328
329 return uid_entry;
330 }
331
332 static int uid_cputime_show(struct seq_file *m, void *v)
333 {
334 struct uid_entry *uid_entry = NULL;
335 struct task_struct *task, *temp;
336 struct user_namespace *user_ns = current_user_ns();
337 u64 utime;
338 u64 stime;
339 unsigned long bkt;
340 uid_t uid;
341
342 rt_mutex_lock(&uid_lock);
343
344 hash_for_each(hash_table, bkt, uid_entry, hash) {
345 uid_entry->active_stime = 0;
346 uid_entry->active_utime = 0;
347 }
348
349 rcu_read_lock();
350 do_each_thread(temp, task) {
351 uid = from_kuid_munged(user_ns, task_uid(task));
352 if (!uid_entry || uid_entry->uid != uid)
353 uid_entry = find_or_register_uid(uid);
354 if (!uid_entry) {
355 rcu_read_unlock();
356 rt_mutex_unlock(&uid_lock);
357 pr_err("%s: failed to find the uid_entry for uid %d\n",
358 __func__, uid);
359 return -ENOMEM;
360 }
361 task_cputime_adjusted(task, &utime, &stime);
362 uid_entry->active_utime += utime;
363 uid_entry->active_stime += stime;
364 } while_each_thread(temp, task);
365 rcu_read_unlock();
366
367 hash_for_each(hash_table, bkt, uid_entry, hash) {
368 u64 total_utime = uid_entry->utime +
369 uid_entry->active_utime;
370 u64 total_stime = uid_entry->stime +
371 uid_entry->active_stime;
372 seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
373 ktime_to_ms(total_utime), ktime_to_ms(total_stime));
374 }
375
376 rt_mutex_unlock(&uid_lock);
377 return 0;
378 }
379
380 static int uid_cputime_open(struct inode *inode, struct file *file)
381 {
382 return single_open(file, uid_cputime_show, PDE_DATA(inode));
383 }
384
385 static const struct file_operations uid_cputime_fops = {
386 .open = uid_cputime_open,
387 .read = seq_read,
388 .llseek = seq_lseek,
389 .release = single_release,
390 };
391
392 static int uid_remove_open(struct inode *inode, struct file *file)
393 {
394 return single_open(file, NULL, NULL);
395 }
396
397 static ssize_t uid_remove_write(struct file *file,
398 const char __user *buffer, size_t count, loff_t *ppos)
399 {
400 struct uid_entry *uid_entry;
401 struct hlist_node *tmp;
402 char uids[128];
403 char *start_uid, *end_uid = NULL;
404 long int uid_start = 0, uid_end = 0;
405
406 if (count >= sizeof(uids))
407 count = sizeof(uids) - 1;
408
409 if (copy_from_user(uids, buffer, count))
410 return -EFAULT;
411
412 uids[count] = '\0';
413 end_uid = uids;
414 start_uid = strsep(&end_uid, "-");
415
416 if (!start_uid || !end_uid)
417 return -EINVAL;
418
419 if (kstrtol(start_uid, 10, &uid_start) != 0 ||
420 kstrtol(end_uid, 10, &uid_end) != 0) {
421 return -EINVAL;
422 }
423
424 /* Also remove uids from /proc/uid_time_in_state */
425 cpufreq_task_times_remove_uids(uid_start, uid_end);
426
427 rt_mutex_lock(&uid_lock);
428
429 for (; uid_start <= uid_end; uid_start++) {
430 hash_for_each_possible_safe(hash_table, uid_entry, tmp,
431 hash, (uid_t)uid_start) {
432 if (uid_start == uid_entry->uid) {
433 remove_uid_tasks(uid_entry);
434 hash_del(&uid_entry->hash);
435 kfree(uid_entry);
436 }
437 }
438 }
439
440 rt_mutex_unlock(&uid_lock);
441 return count;
442 }
443
444 static const struct file_operations uid_remove_fops = {
445 .open = uid_remove_open,
446 .release = single_release,
447 .write = uid_remove_write,
448 };
449
450
451 static void add_uid_io_stats(struct uid_entry *uid_entry,
452 struct task_struct *task, int slot)
453 {
454 struct io_stats *io_slot = &uid_entry->io[slot];
455
456 io_slot->read_bytes += task->ioac.read_bytes;
457 io_slot->write_bytes += compute_write_bytes(task);
458 io_slot->rchar += task->ioac.rchar;
459 io_slot->wchar += task->ioac.wchar;
460 io_slot->fsync += task->ioac.syscfs;
461
462 add_uid_tasks_io_stats(uid_entry, task, slot);
463 }
464
465 static void update_io_stats_all_locked(void)
466 {
467 struct uid_entry *uid_entry = NULL;
468 struct task_struct *task, *temp;
469 struct user_namespace *user_ns = current_user_ns();
470 unsigned long bkt;
471 uid_t uid;
472
473 hash_for_each(hash_table, bkt, uid_entry, hash) {
474 memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
475 sizeof(struct io_stats));
476 set_io_uid_tasks_zero(uid_entry);
477 }
478
479 rcu_read_lock();
480 do_each_thread(temp, task) {
481 uid = from_kuid_munged(user_ns, task_uid(task));
482 if (!uid_entry || uid_entry->uid != uid)
483 uid_entry = find_or_register_uid(uid);
484 if (!uid_entry)
485 continue;
486 add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
487 } while_each_thread(temp, task);
488 rcu_read_unlock();
489
490 hash_for_each(hash_table, bkt, uid_entry, hash) {
491 compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
492 &uid_entry->io[UID_STATE_TOTAL_CURR],
493 &uid_entry->io[UID_STATE_TOTAL_LAST],
494 &uid_entry->io[UID_STATE_DEAD_TASKS]);
495 compute_io_uid_tasks(uid_entry);
496 }
497 }
498
499 static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
500 {
501 struct task_struct *task, *temp;
502 struct user_namespace *user_ns = current_user_ns();
503
504 memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
505 sizeof(struct io_stats));
506 set_io_uid_tasks_zero(uid_entry);
507
508 rcu_read_lock();
509 do_each_thread(temp, task) {
510 if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
511 continue;
512 add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
513 } while_each_thread(temp, task);
514 rcu_read_unlock();
515
516 compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
517 &uid_entry->io[UID_STATE_TOTAL_CURR],
518 &uid_entry->io[UID_STATE_TOTAL_LAST],
519 &uid_entry->io[UID_STATE_DEAD_TASKS]);
520 compute_io_uid_tasks(uid_entry);
521 }
522
523
524 static int uid_io_show(struct seq_file *m, void *v)
525 {
526 struct uid_entry *uid_entry;
527 unsigned long bkt;
528
529 rt_mutex_lock(&uid_lock);
530
531 update_io_stats_all_locked();
532
533 hash_for_each(hash_table, bkt, uid_entry, hash) {
534 seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
535 uid_entry->uid,
536 uid_entry->io[UID_STATE_FOREGROUND].rchar,
537 uid_entry->io[UID_STATE_FOREGROUND].wchar,
538 uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
539 uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
540 uid_entry->io[UID_STATE_BACKGROUND].rchar,
541 uid_entry->io[UID_STATE_BACKGROUND].wchar,
542 uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
543 uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
544 uid_entry->io[UID_STATE_FOREGROUND].fsync,
545 uid_entry->io[UID_STATE_BACKGROUND].fsync);
546
547 show_io_uid_tasks(m, uid_entry);
548 }
549
550 rt_mutex_unlock(&uid_lock);
551 return 0;
552 }
553
554 static int uid_io_open(struct inode *inode, struct file *file)
555 {
556 return single_open(file, uid_io_show, PDE_DATA(inode));
557 }
558
559 static const struct file_operations uid_io_fops = {
560 .open = uid_io_open,
561 .read = seq_read,
562 .llseek = seq_lseek,
563 .release = single_release,
564 };
565
566 static int uid_procstat_open(struct inode *inode, struct file *file)
567 {
568 return single_open(file, NULL, NULL);
569 }
570
571 static ssize_t uid_procstat_write(struct file *file,
572 const char __user *buffer, size_t count, loff_t *ppos)
573 {
574 struct uid_entry *uid_entry;
575 uid_t uid;
576 int argc, state;
577 char input[128];
578
579 if (count >= sizeof(input))
580 return -EINVAL;
581
582 if (copy_from_user(input, buffer, count))
583 return -EFAULT;
584
585 input[count] = '\0';
586
587 argc = sscanf(input, "%u %d", &uid, &state);
588 if (argc != 2)
589 return -EINVAL;
590
591 if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
592 return -EINVAL;
593
594 rt_mutex_lock(&uid_lock);
595
596 uid_entry = find_or_register_uid(uid);
597 if (!uid_entry) {
598 rt_mutex_unlock(&uid_lock);
599 return -EINVAL;
600 }
601
602 if (uid_entry->state == state) {
603 rt_mutex_unlock(&uid_lock);
604 return count;
605 }
606
607 update_io_stats_uid_locked(uid_entry);
608
609 uid_entry->state = state;
610
611 rt_mutex_unlock(&uid_lock);
612
613 return count;
614 }
615
616 static const struct file_operations uid_procstat_fops = {
617 .open = uid_procstat_open,
618 .release = single_release,
619 .write = uid_procstat_write,
620 };
621
622 static int process_notifier(struct notifier_block *self,
623 unsigned long cmd, void *v)
624 {
625 struct task_struct *task = v;
626 struct uid_entry *uid_entry;
627 u64 utime, stime;
628 uid_t uid;
629
630 if (!task)
631 return NOTIFY_OK;
632
633 rt_mutex_lock(&uid_lock);
634 uid = from_kuid_munged(current_user_ns(), task_uid(task));
635 uid_entry = find_or_register_uid(uid);
636 if (!uid_entry) {
637 pr_err("%s: failed to find uid %d\n", __func__, uid);
638 goto exit;
639 }
640
641 task_cputime_adjusted(task, &utime, &stime);
642 uid_entry->utime += utime;
643 uid_entry->stime += stime;
644
645 add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
646
647 exit:
648 rt_mutex_unlock(&uid_lock);
649 return NOTIFY_OK;
650 }
651
652 static struct notifier_block process_notifier_block = {
653 .notifier_call = process_notifier,
654 };
655
656 static int __init proc_uid_sys_stats_init(void)
657 {
658 hash_init(hash_table);
659
660 cpu_parent = proc_mkdir("uid_cputime", NULL);
661 if (!cpu_parent) {
662 pr_err("%s: failed to create uid_cputime proc entry\n",
663 __func__);
664 goto err;
665 }
666
667 proc_create_data("remove_uid_range", 0222, cpu_parent,
668 &uid_remove_fops, NULL);
669 proc_create_data("show_uid_stat", 0444, cpu_parent,
670 &uid_cputime_fops, NULL);
671
672 io_parent = proc_mkdir("uid_io", NULL);
673 if (!io_parent) {
674 pr_err("%s: failed to create uid_io proc entry\n",
675 __func__);
676 goto err;
677 }
678
679 proc_create_data("stats", 0444, io_parent,
680 &uid_io_fops, NULL);
681
682 proc_parent = proc_mkdir("uid_procstat", NULL);
683 if (!proc_parent) {
684 pr_err("%s: failed to create uid_procstat proc entry\n",
685 __func__);
686 goto err;
687 }
688
689 proc_create_data("set", 0222, proc_parent,
690 &uid_procstat_fops, NULL);
691
692 profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
693
694 return 0;
695
696 err:
697 remove_proc_subtree("uid_cputime", NULL);
698 remove_proc_subtree("uid_io", NULL);
699 remove_proc_subtree("uid_procstat", NULL);
700 return -ENOMEM;
701 }
702
703 early_initcall(proc_uid_sys_stats_init);