uid_sys_stats: reduce update_io_stats overhead
authorJin Qian <jinqian@google.com>
Fri, 14 Apr 2017 00:07:58 +0000 (17:07 -0700)
committerDanny Wood <danwood76@gmail.com>
Sun, 31 Mar 2019 08:48:11 +0000 (09:48 +0100)
Replaced read_lock with rcu_read_lock to reduce time that preemption
is disabled.

Added a function to update io stats for specific uid and moved
hash table lookup, user_namespace out of loops.

Bug: 37319300
Change-Id: I2b81b5cd3b6399b40d08c3c14b42cad044556970
Signed-off-by: Jin Qian <jinqian@google.com>
Signed-off-by: Francisco Franco <franciscofranco.1990@gmail.com>
drivers/misc/uid_sys_stats.c

index e16adc556a653fdb220cdb33a7b1630cac9f26b3..a22b3bc4f43fb91448c610aaa1217ac6a5feb783 100644 (file)
@@ -245,12 +245,14 @@ static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
        io_last->fsync -= task->ioac.syscfs;
 }
 
-static void update_io_stats_locked(void)
+static void update_io_stats_all_locked(void)
 {
        struct uid_entry *uid_entry;
        struct task_struct *task, *temp;
        struct io_stats *io_bucket, *io_curr, *io_last;
+       struct user_namespace *user_ns = current_user_ns();
        unsigned long bkt;
+       uid_t uid;
 
        BUG_ON(!rt_mutex_is_locked(&uid_lock));
 
@@ -258,15 +260,15 @@ static void update_io_stats_locked(void)
                memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
                        sizeof(struct io_stats));
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        do_each_thread(temp, task) {
-               uid_entry = find_or_register_uid(from_kuid_munged(
-                       current_user_ns(), task_uid(task)));
+               uid = from_kuid_munged(user_ns, task_uid(task));
+               uid_entry = find_or_register_uid(uid);
                if (!uid_entry)
                        continue;
                add_uid_io_curr_stats(uid_entry, task);
        } while_each_thread(temp, task);
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        hash_for_each(hash_table, bkt, uid_entry, hash) {
                io_bucket = &uid_entry->io[uid_entry->state];
@@ -289,6 +291,49 @@ static void update_io_stats_locked(void)
        }
 }
 
+static void update_io_stats_uid_locked(uid_t target_uid)
+{
+       struct uid_entry *uid_entry;
+       struct task_struct *task, *temp;
+       struct io_stats *io_bucket, *io_curr, *io_last;
+       struct user_namespace *user_ns = current_user_ns();
+
+       BUG_ON(!rt_mutex_is_locked(&uid_lock));
+
+       uid_entry = find_or_register_uid(target_uid);
+       if (!uid_entry)
+               return;
+
+       memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+               sizeof(struct io_stats));
+
+       rcu_read_lock();
+       do_each_thread(temp, task) {
+               if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+                       continue;
+               add_uid_io_curr_stats(uid_entry, task);
+       } while_each_thread(temp, task);
+       rcu_read_unlock();
+
+       io_bucket = &uid_entry->io[uid_entry->state];
+       io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+       io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+
+       io_bucket->read_bytes +=
+               io_curr->read_bytes - io_last->read_bytes;
+       io_bucket->write_bytes +=
+               io_curr->write_bytes - io_last->write_bytes;
+       io_bucket->rchar += io_curr->rchar - io_last->rchar;
+       io_bucket->wchar += io_curr->wchar - io_last->wchar;
+       io_bucket->fsync += io_curr->fsync - io_last->fsync;
+
+       io_last->read_bytes = io_curr->read_bytes;
+       io_last->write_bytes = io_curr->write_bytes;
+       io_last->rchar = io_curr->rchar;
+       io_last->wchar = io_curr->wchar;
+       io_last->fsync = io_curr->fsync;
+}
+
 static int uid_io_show(struct seq_file *m, void *v)
 {
        struct uid_entry *uid_entry;
@@ -296,7 +341,7 @@ static int uid_io_show(struct seq_file *m, void *v)
 
        rt_mutex_lock(&uid_lock);
 
-       update_io_stats_locked();
+       update_io_stats_all_locked();
 
        hash_for_each(hash_table, bkt, uid_entry, hash) {
                seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -371,7 +416,7 @@ static ssize_t uid_procstat_write(struct file *file,
                return count;
        }
 
-       update_io_stats_locked();
+       update_io_stats_uid_locked(uid);
 
        uid_entry->state = state;
 
@@ -411,7 +456,7 @@ static int process_notifier(struct notifier_block *self,
        uid_entry->power += task->cpu_power;
        task->cpu_power = ULLONG_MAX;
 
-       update_io_stats_locked();
+       update_io_stats_uid_locked(uid);
        clean_uid_io_last_stats(uid_entry, task);
 
 exit: