1 /* drivers/misc/uid_sys_stats.c
3 * Copyright (C) 2014 - 2015 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/atomic.h>
17 #include <linux/cpufreq_times.h>
18 #include <linux/err.h>
19 #include <linux/hashtable.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
24 #include <linux/proc_fs.h>
25 #include <linux/profile.h>
26 #include <linux/rtmutex.h>
27 #include <linux/sched/cputime.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
33 #define UID_HASH_BITS 10
34 DECLARE_HASHTABLE(hash_table
, UID_HASH_BITS
);
36 static DEFINE_RT_MUTEX(uid_lock
);
37 static struct proc_dir_entry
*cpu_parent
;
38 static struct proc_dir_entry
*io_parent
;
39 static struct proc_dir_entry
*proc_parent
;
49 #define UID_STATE_FOREGROUND 0
50 #define UID_STATE_BACKGROUND 1
51 #define UID_STATE_BUCKET_SIZE 2
53 #define UID_STATE_TOTAL_CURR 2
54 #define UID_STATE_TOTAL_LAST 3
55 #define UID_STATE_DEAD_TASKS 4
56 #define UID_STATE_SIZE 5
58 #define MAX_TASK_COMM_LEN 256
61 char comm
[MAX_TASK_COMM_LEN
];
63 struct io_stats io
[UID_STATE_SIZE
];
64 struct hlist_node hash
;
74 struct io_stats io
[UID_STATE_SIZE
];
75 struct hlist_node hash
;
76 #ifdef CONFIG_UID_SYS_STATS_DEBUG
77 DECLARE_HASHTABLE(task_entries
, UID_HASH_BITS
);
81 static u64
compute_write_bytes(struct task_struct
*task
)
83 if (task
->ioac
.write_bytes
<= task
->ioac
.cancelled_write_bytes
)
86 return task
->ioac
.write_bytes
- task
->ioac
.cancelled_write_bytes
;
89 static void compute_io_bucket_stats(struct io_stats
*io_bucket
,
90 struct io_stats
*io_curr
,
91 struct io_stats
*io_last
,
92 struct io_stats
*io_dead
)
94 /* tasks could switch to another uid group, but its io_last in the
95 * previous uid group could still be positive.
96 * therefore before each update, do an overflow check first
100 delta
= io_curr
->read_bytes
+ io_dead
->read_bytes
-
102 io_bucket
->read_bytes
+= delta
> 0 ? delta
: 0;
103 delta
= io_curr
->write_bytes
+ io_dead
->write_bytes
-
104 io_last
->write_bytes
;
105 io_bucket
->write_bytes
+= delta
> 0 ? delta
: 0;
106 delta
= io_curr
->rchar
+ io_dead
->rchar
- io_last
->rchar
;
107 io_bucket
->rchar
+= delta
> 0 ? delta
: 0;
108 delta
= io_curr
->wchar
+ io_dead
->wchar
- io_last
->wchar
;
109 io_bucket
->wchar
+= delta
> 0 ? delta
: 0;
110 delta
= io_curr
->fsync
+ io_dead
->fsync
- io_last
->fsync
;
111 io_bucket
->fsync
+= delta
> 0 ? delta
: 0;
113 io_last
->read_bytes
= io_curr
->read_bytes
;
114 io_last
->write_bytes
= io_curr
->write_bytes
;
115 io_last
->rchar
= io_curr
->rchar
;
116 io_last
->wchar
= io_curr
->wchar
;
117 io_last
->fsync
= io_curr
->fsync
;
119 memset(io_dead
, 0, sizeof(struct io_stats
));
122 #ifdef CONFIG_UID_SYS_STATS_DEBUG
123 static void get_full_task_comm(struct task_entry
*task_entry
,
124 struct task_struct
*task
)
126 int i
= 0, offset
= 0, len
= 0;
127 /* save one byte for terminating null character */
128 int unused_len
= MAX_TASK_COMM_LEN
- TASK_COMM_LEN
- 1;
129 char buf
[unused_len
];
130 struct mm_struct
*mm
= task
->mm
;
132 /* fill the first TASK_COMM_LEN bytes with thread name */
133 __get_task_comm(task_entry
->comm
, TASK_COMM_LEN
, task
);
134 i
= strlen(task_entry
->comm
);
135 while (i
< TASK_COMM_LEN
)
136 task_entry
->comm
[i
++] = ' ';
138 /* next the executable file name */
140 down_read(&mm
->mmap_sem
);
142 char *pathname
= d_path(&mm
->exe_file
->f_path
, buf
,
145 if (!IS_ERR(pathname
)) {
146 len
= strlcpy(task_entry
->comm
+ i
, pathname
,
149 task_entry
->comm
[i
++] = ' ';
153 up_read(&mm
->mmap_sem
);
157 /* fill the rest with command line argument
158 * replace each null or new line character
159 * between args in argv with whitespace */
160 len
= get_cmdline(task
, buf
, unused_len
);
161 while (offset
< len
) {
162 if (buf
[offset
] != '\0' && buf
[offset
] != '\n')
163 task_entry
->comm
[i
++] = buf
[offset
];
165 task_entry
->comm
[i
++] = ' ';
169 /* get rid of trailing whitespaces in case when arg is memset to
170 * zero before being reset in userspace
172 while (task_entry
->comm
[i
-1] == ' ')
174 task_entry
->comm
[i
] = '\0';
177 static struct task_entry
*find_task_entry(struct uid_entry
*uid_entry
,
178 struct task_struct
*task
)
180 struct task_entry
*task_entry
;
182 hash_for_each_possible(uid_entry
->task_entries
, task_entry
, hash
,
184 if (task
->pid
== task_entry
->pid
) {
185 /* if thread name changed, update the entire command */
186 int len
= strnchr(task_entry
->comm
, ' ', TASK_COMM_LEN
)
189 if (strncmp(task_entry
->comm
, task
->comm
, len
))
190 get_full_task_comm(task_entry
, task
);
197 static struct task_entry
*find_or_register_task(struct uid_entry
*uid_entry
,
198 struct task_struct
*task
)
200 struct task_entry
*task_entry
;
201 pid_t pid
= task
->pid
;
203 task_entry
= find_task_entry(uid_entry
, task
);
207 task_entry
= kzalloc(sizeof(struct task_entry
), GFP_ATOMIC
);
211 get_full_task_comm(task_entry
, task
);
213 task_entry
->pid
= pid
;
214 hash_add(uid_entry
->task_entries
, &task_entry
->hash
, (unsigned int)pid
);
219 static void remove_uid_tasks(struct uid_entry
*uid_entry
)
221 struct task_entry
*task_entry
;
222 unsigned long bkt_task
;
223 struct hlist_node
*tmp_task
;
225 hash_for_each_safe(uid_entry
->task_entries
, bkt_task
,
226 tmp_task
, task_entry
, hash
) {
227 hash_del(&task_entry
->hash
);
232 static void set_io_uid_tasks_zero(struct uid_entry
*uid_entry
)
234 struct task_entry
*task_entry
;
235 unsigned long bkt_task
;
237 hash_for_each(uid_entry
->task_entries
, bkt_task
, task_entry
, hash
) {
238 memset(&task_entry
->io
[UID_STATE_TOTAL_CURR
], 0,
239 sizeof(struct io_stats
));
243 static void add_uid_tasks_io_stats(struct uid_entry
*uid_entry
,
244 struct task_struct
*task
, int slot
)
246 struct task_entry
*task_entry
= find_or_register_task(uid_entry
, task
);
247 struct io_stats
*task_io_slot
= &task_entry
->io
[slot
];
249 task_io_slot
->read_bytes
+= task
->ioac
.read_bytes
;
250 task_io_slot
->write_bytes
+= compute_write_bytes(task
);
251 task_io_slot
->rchar
+= task
->ioac
.rchar
;
252 task_io_slot
->wchar
+= task
->ioac
.wchar
;
253 task_io_slot
->fsync
+= task
->ioac
.syscfs
;
256 static void compute_io_uid_tasks(struct uid_entry
*uid_entry
)
258 struct task_entry
*task_entry
;
259 unsigned long bkt_task
;
261 hash_for_each(uid_entry
->task_entries
, bkt_task
, task_entry
, hash
) {
262 compute_io_bucket_stats(&task_entry
->io
[uid_entry
->state
],
263 &task_entry
->io
[UID_STATE_TOTAL_CURR
],
264 &task_entry
->io
[UID_STATE_TOTAL_LAST
],
265 &task_entry
->io
[UID_STATE_DEAD_TASKS
]);
269 static void show_io_uid_tasks(struct seq_file
*m
, struct uid_entry
*uid_entry
)
271 struct task_entry
*task_entry
;
272 unsigned long bkt_task
;
274 hash_for_each(uid_entry
->task_entries
, bkt_task
, task_entry
, hash
) {
275 /* Separated by comma because space exists in task comm */
276 seq_printf(m
, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
278 (unsigned long)task_entry
->pid
,
279 task_entry
->io
[UID_STATE_FOREGROUND
].rchar
,
280 task_entry
->io
[UID_STATE_FOREGROUND
].wchar
,
281 task_entry
->io
[UID_STATE_FOREGROUND
].read_bytes
,
282 task_entry
->io
[UID_STATE_FOREGROUND
].write_bytes
,
283 task_entry
->io
[UID_STATE_BACKGROUND
].rchar
,
284 task_entry
->io
[UID_STATE_BACKGROUND
].wchar
,
285 task_entry
->io
[UID_STATE_BACKGROUND
].read_bytes
,
286 task_entry
->io
[UID_STATE_BACKGROUND
].write_bytes
,
287 task_entry
->io
[UID_STATE_FOREGROUND
].fsync
,
288 task_entry
->io
[UID_STATE_BACKGROUND
].fsync
);
292 static void remove_uid_tasks(struct uid_entry
*uid_entry
) {};
293 static void set_io_uid_tasks_zero(struct uid_entry
*uid_entry
) {};
294 static void add_uid_tasks_io_stats(struct uid_entry
*uid_entry
,
295 struct task_struct
*task
, int slot
) {};
296 static void compute_io_uid_tasks(struct uid_entry
*uid_entry
) {};
297 static void show_io_uid_tasks(struct seq_file
*m
,
298 struct uid_entry
*uid_entry
) {}
301 static struct uid_entry
*find_uid_entry(uid_t uid
)
303 struct uid_entry
*uid_entry
;
304 hash_for_each_possible(hash_table
, uid_entry
, hash
, uid
) {
305 if (uid_entry
->uid
== uid
)
311 static struct uid_entry
*find_or_register_uid(uid_t uid
)
313 struct uid_entry
*uid_entry
;
315 uid_entry
= find_uid_entry(uid
);
319 uid_entry
= kzalloc(sizeof(struct uid_entry
), GFP_ATOMIC
);
323 uid_entry
->uid
= uid
;
324 #ifdef CONFIG_UID_SYS_STATS_DEBUG
325 hash_init(uid_entry
->task_entries
);
327 hash_add(hash_table
, &uid_entry
->hash
, uid
);
332 static int uid_cputime_show(struct seq_file
*m
, void *v
)
334 struct uid_entry
*uid_entry
= NULL
;
335 struct task_struct
*task
, *temp
;
336 struct user_namespace
*user_ns
= current_user_ns();
342 rt_mutex_lock(&uid_lock
);
344 hash_for_each(hash_table
, bkt
, uid_entry
, hash
) {
345 uid_entry
->active_stime
= 0;
346 uid_entry
->active_utime
= 0;
350 do_each_thread(temp
, task
) {
351 uid
= from_kuid_munged(user_ns
, task_uid(task
));
352 if (!uid_entry
|| uid_entry
->uid
!= uid
)
353 uid_entry
= find_or_register_uid(uid
);
356 rt_mutex_unlock(&uid_lock
);
357 pr_err("%s: failed to find the uid_entry for uid %d\n",
361 task_cputime_adjusted(task
, &utime
, &stime
);
362 uid_entry
->active_utime
+= utime
;
363 uid_entry
->active_stime
+= stime
;
364 } while_each_thread(temp
, task
);
367 hash_for_each(hash_table
, bkt
, uid_entry
, hash
) {
368 u64 total_utime
= uid_entry
->utime
+
369 uid_entry
->active_utime
;
370 u64 total_stime
= uid_entry
->stime
+
371 uid_entry
->active_stime
;
372 seq_printf(m
, "%d: %llu %llu\n", uid_entry
->uid
,
373 ktime_to_ms(total_utime
), ktime_to_ms(total_stime
));
376 rt_mutex_unlock(&uid_lock
);
380 static int uid_cputime_open(struct inode
*inode
, struct file
*file
)
382 return single_open(file
, uid_cputime_show
, PDE_DATA(inode
));
385 static const struct file_operations uid_cputime_fops
= {
386 .open
= uid_cputime_open
,
389 .release
= single_release
,
392 static int uid_remove_open(struct inode
*inode
, struct file
*file
)
394 return single_open(file
, NULL
, NULL
);
397 static ssize_t
uid_remove_write(struct file
*file
,
398 const char __user
*buffer
, size_t count
, loff_t
*ppos
)
400 struct uid_entry
*uid_entry
;
401 struct hlist_node
*tmp
;
403 char *start_uid
, *end_uid
= NULL
;
404 long int uid_start
= 0, uid_end
= 0;
406 if (count
>= sizeof(uids
))
407 count
= sizeof(uids
) - 1;
409 if (copy_from_user(uids
, buffer
, count
))
414 start_uid
= strsep(&end_uid
, "-");
416 if (!start_uid
|| !end_uid
)
419 if (kstrtol(start_uid
, 10, &uid_start
) != 0 ||
420 kstrtol(end_uid
, 10, &uid_end
) != 0) {
424 /* Also remove uids from /proc/uid_time_in_state */
425 cpufreq_task_times_remove_uids(uid_start
, uid_end
);
427 rt_mutex_lock(&uid_lock
);
429 for (; uid_start
<= uid_end
; uid_start
++) {
430 hash_for_each_possible_safe(hash_table
, uid_entry
, tmp
,
431 hash
, (uid_t
)uid_start
) {
432 if (uid_start
== uid_entry
->uid
) {
433 remove_uid_tasks(uid_entry
);
434 hash_del(&uid_entry
->hash
);
440 rt_mutex_unlock(&uid_lock
);
444 static const struct file_operations uid_remove_fops
= {
445 .open
= uid_remove_open
,
446 .release
= single_release
,
447 .write
= uid_remove_write
,
451 static void add_uid_io_stats(struct uid_entry
*uid_entry
,
452 struct task_struct
*task
, int slot
)
454 struct io_stats
*io_slot
= &uid_entry
->io
[slot
];
456 io_slot
->read_bytes
+= task
->ioac
.read_bytes
;
457 io_slot
->write_bytes
+= compute_write_bytes(task
);
458 io_slot
->rchar
+= task
->ioac
.rchar
;
459 io_slot
->wchar
+= task
->ioac
.wchar
;
460 io_slot
->fsync
+= task
->ioac
.syscfs
;
462 add_uid_tasks_io_stats(uid_entry
, task
, slot
);
465 static void update_io_stats_all_locked(void)
467 struct uid_entry
*uid_entry
= NULL
;
468 struct task_struct
*task
, *temp
;
469 struct user_namespace
*user_ns
= current_user_ns();
473 hash_for_each(hash_table
, bkt
, uid_entry
, hash
) {
474 memset(&uid_entry
->io
[UID_STATE_TOTAL_CURR
], 0,
475 sizeof(struct io_stats
));
476 set_io_uid_tasks_zero(uid_entry
);
480 do_each_thread(temp
, task
) {
481 uid
= from_kuid_munged(user_ns
, task_uid(task
));
482 if (!uid_entry
|| uid_entry
->uid
!= uid
)
483 uid_entry
= find_or_register_uid(uid
);
486 add_uid_io_stats(uid_entry
, task
, UID_STATE_TOTAL_CURR
);
487 } while_each_thread(temp
, task
);
490 hash_for_each(hash_table
, bkt
, uid_entry
, hash
) {
491 compute_io_bucket_stats(&uid_entry
->io
[uid_entry
->state
],
492 &uid_entry
->io
[UID_STATE_TOTAL_CURR
],
493 &uid_entry
->io
[UID_STATE_TOTAL_LAST
],
494 &uid_entry
->io
[UID_STATE_DEAD_TASKS
]);
495 compute_io_uid_tasks(uid_entry
);
499 static void update_io_stats_uid_locked(struct uid_entry
*uid_entry
)
501 struct task_struct
*task
, *temp
;
502 struct user_namespace
*user_ns
= current_user_ns();
504 memset(&uid_entry
->io
[UID_STATE_TOTAL_CURR
], 0,
505 sizeof(struct io_stats
));
506 set_io_uid_tasks_zero(uid_entry
);
509 do_each_thread(temp
, task
) {
510 if (from_kuid_munged(user_ns
, task_uid(task
)) != uid_entry
->uid
)
512 add_uid_io_stats(uid_entry
, task
, UID_STATE_TOTAL_CURR
);
513 } while_each_thread(temp
, task
);
516 compute_io_bucket_stats(&uid_entry
->io
[uid_entry
->state
],
517 &uid_entry
->io
[UID_STATE_TOTAL_CURR
],
518 &uid_entry
->io
[UID_STATE_TOTAL_LAST
],
519 &uid_entry
->io
[UID_STATE_DEAD_TASKS
]);
520 compute_io_uid_tasks(uid_entry
);
524 static int uid_io_show(struct seq_file
*m
, void *v
)
526 struct uid_entry
*uid_entry
;
529 rt_mutex_lock(&uid_lock
);
531 update_io_stats_all_locked();
533 hash_for_each(hash_table
, bkt
, uid_entry
, hash
) {
534 seq_printf(m
, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
536 uid_entry
->io
[UID_STATE_FOREGROUND
].rchar
,
537 uid_entry
->io
[UID_STATE_FOREGROUND
].wchar
,
538 uid_entry
->io
[UID_STATE_FOREGROUND
].read_bytes
,
539 uid_entry
->io
[UID_STATE_FOREGROUND
].write_bytes
,
540 uid_entry
->io
[UID_STATE_BACKGROUND
].rchar
,
541 uid_entry
->io
[UID_STATE_BACKGROUND
].wchar
,
542 uid_entry
->io
[UID_STATE_BACKGROUND
].read_bytes
,
543 uid_entry
->io
[UID_STATE_BACKGROUND
].write_bytes
,
544 uid_entry
->io
[UID_STATE_FOREGROUND
].fsync
,
545 uid_entry
->io
[UID_STATE_BACKGROUND
].fsync
);
547 show_io_uid_tasks(m
, uid_entry
);
550 rt_mutex_unlock(&uid_lock
);
554 static int uid_io_open(struct inode
*inode
, struct file
*file
)
556 return single_open(file
, uid_io_show
, PDE_DATA(inode
));
559 static const struct file_operations uid_io_fops
= {
563 .release
= single_release
,
566 static int uid_procstat_open(struct inode
*inode
, struct file
*file
)
568 return single_open(file
, NULL
, NULL
);
571 static ssize_t
uid_procstat_write(struct file
*file
,
572 const char __user
*buffer
, size_t count
, loff_t
*ppos
)
574 struct uid_entry
*uid_entry
;
579 if (count
>= sizeof(input
))
582 if (copy_from_user(input
, buffer
, count
))
587 argc
= sscanf(input
, "%u %d", &uid
, &state
);
591 if (state
!= UID_STATE_BACKGROUND
&& state
!= UID_STATE_FOREGROUND
)
594 rt_mutex_lock(&uid_lock
);
596 uid_entry
= find_or_register_uid(uid
);
598 rt_mutex_unlock(&uid_lock
);
602 if (uid_entry
->state
== state
) {
603 rt_mutex_unlock(&uid_lock
);
607 update_io_stats_uid_locked(uid_entry
);
609 uid_entry
->state
= state
;
611 rt_mutex_unlock(&uid_lock
);
616 static const struct file_operations uid_procstat_fops
= {
617 .open
= uid_procstat_open
,
618 .release
= single_release
,
619 .write
= uid_procstat_write
,
622 static int process_notifier(struct notifier_block
*self
,
623 unsigned long cmd
, void *v
)
625 struct task_struct
*task
= v
;
626 struct uid_entry
*uid_entry
;
633 rt_mutex_lock(&uid_lock
);
634 uid
= from_kuid_munged(current_user_ns(), task_uid(task
));
635 uid_entry
= find_or_register_uid(uid
);
637 pr_err("%s: failed to find uid %d\n", __func__
, uid
);
641 task_cputime_adjusted(task
, &utime
, &stime
);
642 uid_entry
->utime
+= utime
;
643 uid_entry
->stime
+= stime
;
645 add_uid_io_stats(uid_entry
, task
, UID_STATE_DEAD_TASKS
);
648 rt_mutex_unlock(&uid_lock
);
652 static struct notifier_block process_notifier_block
= {
653 .notifier_call
= process_notifier
,
656 static int __init
proc_uid_sys_stats_init(void)
658 hash_init(hash_table
);
660 cpu_parent
= proc_mkdir("uid_cputime", NULL
);
662 pr_err("%s: failed to create uid_cputime proc entry\n",
667 proc_create_data("remove_uid_range", 0222, cpu_parent
,
668 &uid_remove_fops
, NULL
);
669 proc_create_data("show_uid_stat", 0444, cpu_parent
,
670 &uid_cputime_fops
, NULL
);
672 io_parent
= proc_mkdir("uid_io", NULL
);
674 pr_err("%s: failed to create uid_io proc entry\n",
679 proc_create_data("stats", 0444, io_parent
,
682 proc_parent
= proc_mkdir("uid_procstat", NULL
);
684 pr_err("%s: failed to create uid_procstat proc entry\n",
689 proc_create_data("set", 0222, proc_parent
,
690 &uid_procstat_fops
, NULL
);
692 profile_event_register(PROFILE_TASK_EXIT
, &process_notifier_block
);
697 remove_proc_subtree("uid_cputime", NULL
);
698 remove_proc_subtree("uid_io", NULL
);
699 remove_proc_subtree("uid_procstat", NULL
);
703 early_initcall(proc_uid_sys_stats_init
);