uid_sys_stats: change to use rt_mutex
authorWei Wang <wvw@google.com>
Mon, 13 Mar 2017 19:22:21 +0000 (12:22 -0700)
committerDanny Wood <danwood76@gmail.com>
Sun, 31 Mar 2019 08:48:11 +0000 (09:48 +0100)
We see this happens multiple times in heavy workload in systrace
and AMS stuck in uid_lock.

Running process:        Process 953
Running thread: android.ui
State:  Uninterruptible Sleep
Start:
1,025.628 ms
Duration:
27,955.949 ms
On CPU:
Running instead:        system_server
Args:
{kernel callsite when blocked:: "uid_procstat_write+0xb8/0x144"}

Changing to rt_mutex can mitigate the priority inversion

Bug: 34991231
Bug: 34193533
Test: on marlin
Change-Id: I28eb3971331cea60b1075740c792ab87d103262c
Signed-off-by: Wei Wang <wvw@google.com>
Signed-off-by: Francisco Franco <franciscofranco.1990@gmail.com>
drivers/misc/uid_sys_stats.c

index a6acc3e46461b1cdc296081701b18b5314ec726c..e16adc556a653fdb220cdb33a7b1630cac9f26b3 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/rtmutex.h>
 
 #define UID_HASH_BITS  10
 DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
 
-static DEFINE_MUTEX(uid_lock);
+static DEFINE_RT_MUTEX(uid_lock);
 static struct proc_dir_entry *cpu_parent;
 static struct proc_dir_entry *io_parent;
 static struct proc_dir_entry *proc_parent;
@@ -100,7 +101,7 @@ static int uid_cputime_show(struct seq_file *m, void *v)
        cputime_t stime;
        unsigned long bkt;
 
-       mutex_lock(&uid_lock);
+       rt_mutex_lock(&uid_lock);
 
        hash_for_each(hash_table, bkt, uid_entry, hash) {
                uid_entry->active_stime = 0;
@@ -114,7 +115,7 @@ static int uid_cputime_show(struct seq_file *m, void *v)
                        current_user_ns(), task_uid(task)));
                if (!uid_entry) {
                        read_unlock(&tasklist_lock);
-                       mutex_unlock(&uid_lock);
+                       rt_mutex_unlock(&uid_lock);
                        pr_err("%s: failed to find the uid_entry for uid %d\n",
                                __func__, from_kuid_munged(current_user_ns(),
                                task_uid(task)));
@@ -143,7 +144,7 @@ static int uid_cputime_show(struct seq_file *m, void *v)
                        total_power);
        }
 
-       mutex_unlock(&uid_lock);
+       rt_mutex_unlock(&uid_lock);
        return 0;
 }
 
@@ -190,7 +191,7 @@ static ssize_t uid_remove_write(struct file *file,
                kstrtol(end_uid, 10, &uid_end) != 0) {
                return -EINVAL;
        }
-       mutex_lock(&uid_lock);
+       rt_mutex_lock(&uid_lock);
 
        for (; uid_start <= uid_end; uid_start++) {
                hash_for_each_possible_safe(hash_table, uid_entry, tmp,
@@ -202,7 +203,7 @@ static ssize_t uid_remove_write(struct file *file,
                }
        }
 
-       mutex_unlock(&uid_lock);
+       rt_mutex_unlock(&uid_lock);
        return count;
 }
 
@@ -251,7 +252,7 @@ static void update_io_stats_locked(void)
        struct io_stats *io_bucket, *io_curr, *io_last;
        unsigned long bkt;
 
-       BUG_ON(!mutex_is_locked(&uid_lock));
+       BUG_ON(!rt_mutex_is_locked(&uid_lock));
 
        hash_for_each(hash_table, bkt, uid_entry, hash)
                memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
@@ -293,7 +294,7 @@ static int uid_io_show(struct seq_file *m, void *v)
        struct uid_entry *uid_entry;
        unsigned long bkt;
 
-       mutex_lock(&uid_lock);
+       rt_mutex_lock(&uid_lock);
 
        update_io_stats_locked();
 
@@ -312,7 +313,7 @@ static int uid_io_show(struct seq_file *m, void *v)
                        uid_entry->io[UID_STATE_BACKGROUND].fsync);
        }
 
-       mutex_unlock(&uid_lock);
+       rt_mutex_unlock(&uid_lock);
 
        return 0;
 }
@@ -357,16 +358,16 @@ static ssize_t uid_procstat_write(struct file *file,
        if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
                return -EINVAL;
 
-       mutex_lock(&uid_lock);
+       rt_mutex_lock(&uid_lock);
 
        uid_entry = find_or_register_uid(uid);
        if (!uid_entry) {
-               mutex_unlock(&uid_lock);
+               rt_mutex_unlock(&uid_lock);
                return -EINVAL;
        }
 
        if (uid_entry->state == state) {
-               mutex_unlock(&uid_lock);
+               rt_mutex_unlock(&uid_lock);
                return count;
        }
 
@@ -374,7 +375,7 @@ static ssize_t uid_procstat_write(struct file *file,
 
        uid_entry->state = state;
 
-       mutex_unlock(&uid_lock);
+       rt_mutex_unlock(&uid_lock);
 
        return count;
 }
@@ -396,7 +397,7 @@ static int process_notifier(struct notifier_block *self,
        if (!task)
                return NOTIFY_OK;
 
-       mutex_lock(&uid_lock);
+       rt_mutex_lock(&uid_lock);
        uid = from_kuid_munged(current_user_ns(), task_uid(task));
        uid_entry = find_or_register_uid(uid);
        if (!uid_entry) {
@@ -414,7 +415,7 @@ static int process_notifier(struct notifier_block *self,
        clean_uid_io_last_stats(uid_entry, task);
 
 exit:
-       mutex_unlock(&uid_lock);
+       rt_mutex_unlock(&uid_lock);
        return NOTIFY_OK;
 }