mm, oom_adj: don't loop through tasks in __set_oom_adj when not necessary
authorSuren Baghdasaryan <surenb@google.com>
Tue, 13 Oct 2020 23:58:35 +0000 (16:58 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 Oct 2020 08:07:08 +0000 (09:07 +0100)
[ Upstream commit 67197a4f28d28d0b073ab0427b03cb2ee5382578 ]

Currently __set_oom_adj loops through all processes in the system to keep
oom_score_adj and oom_score_adj_min in sync between processes sharing
their mm.  This is done for any task with more that one mm_users, which
includes processes with multiple threads (sharing mm and signals).
However for such processes the loop is unnecessary because their signal
structure is shared as well.

Android updates oom_score_adj whenever a tasks changes its role
(background/foreground/...) or binds to/unbinds from a service, making it
more/less important.  Such operation can happen frequently.  We noticed
that updates to oom_score_adj became more expensive and after further
investigation found out that the patch mentioned in "Fixes" introduced a
regression.  Using Pixel 4 with a typical Android workload, write time to
oom_score_adj increased from ~3.57us to ~362us.  Moreover this regression
linearly depends on the number of multi-threaded processes running on the
system.

Mark the mm with a new MMF_MULTIPROCESS flag bit when task is created with
(CLONE_VM && !CLONE_THREAD && !CLONE_VFORK).  Change __set_oom_adj to use
MMF_MULTIPROCESS instead of mm_users to decide whether oom_score_adj
update should be synchronized between multiple processes.  To prevent
races between clone() and __set_oom_adj(), when oom_score_adj of the
process being cloned might be modified from userspace, we use
oom_adj_mutex.  Its scope is changed to global.

The combination of (CLONE_VM && !CLONE_THREAD) is rarely used except for
the case of vfork().  To prevent performance regressions of vfork(), we
skip taking oom_adj_mutex and setting MMF_MULTIPROCESS when CLONE_VFORK is
specified.  Clearing the MMF_MULTIPROCESS flag (when the last process
sharing the mm exits) is left out of this patch to keep it simple and
because it is believed that this threading model is rare.  Should there
ever be a need for optimizing that case as well, it can be done by hooking
into the exit path, likely following the mm_update_next_owner pattern.

With the combination of (CLONE_VM && !CLONE_THREAD && !CLONE_VFORK) being
quite rare, the regression is gone after the change is applied.

[surenb@google.com: v3]
Link: https://lkml.kernel.org/r/20200902012558.2335613-1-surenb@google.com
Fixes: 44a70adec910 ("mm, oom_adj: make sure processes sharing mm have same view of oom_score_adj")
Reported-by: Tim Murray <timmurray@google.com>
Suggested-by: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Eugene Syromiatnikov <esyr@redhat.com>
Cc: Christian Kellner <christian@kellner.me>
Cc: Adrian Reber <areber@redhat.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Aleksa Sarai <cyphar@cyphar.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Alexey Gladkov <gladkov.alexey@gmail.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Andrei Vagin <avagin@gmail.com>
Cc: Bernd Edlinger <bernd.edlinger@hotmail.de>
Cc: John Johansen <john.johansen@canonical.com>
Cc: Yafang Shao <laoar.shao@gmail.com>
Link: https://lkml.kernel.org/r/20200824153036.3201505-1-surenb@google.com
Debugged-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/proc/base.c
include/linux/oom.h
include/linux/sched/coredump.h
kernel/fork.c
mm/oom_kill.c

index 64695dcf89f3bbcd89f47902bf32cba7404d9bbf..eeb81d9648c672bfea4f4233fe7b51642b4b5a2c 100644 (file)
@@ -1041,7 +1041,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
 
 static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
 {
-       static DEFINE_MUTEX(oom_adj_mutex);
        struct mm_struct *mm = NULL;
        struct task_struct *task;
        int err = 0;
@@ -1081,7 +1080,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
                struct task_struct *p = find_lock_task_mm(task);
 
                if (p) {
-                       if (atomic_read(&p->mm->mm_users) > 1) {
+                       if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
                                mm = p->mm;
                                mmgrab(mm);
                        }
index 6adac113e96d29b5059ed65ac0522237c9b94388..c84597595cb413d53f8628a72edf2d9596f936c0 100644 (file)
@@ -45,6 +45,7 @@ struct oom_control {
 };
 
 extern struct mutex oom_lock;
+extern struct mutex oom_adj_mutex;
 
 static inline void set_current_oom_origin(void)
 {
index ecdc6542070f11a01fc99665dc6489af59eebef0..dfd82eab29025d997221eb91e6a425fe3fe85376 100644 (file)
@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
 #define MMF_DISABLE_THP                24      /* disable THP for all VMAs */
 #define MMF_OOM_VICTIM         25      /* mm is the oom victim */
 #define MMF_OOM_REAP_QUEUED    26      /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS       27      /* mm is shared between processes */
 #define MMF_DISABLE_THP_MASK   (1 << MMF_DISABLE_THP)
 
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
index 0a328cf0cb136228837ccc4242fb255fe8c2a650..535aeb7ca145c0dc3a1d069f1986c81a296b0442 100644 (file)
@@ -1544,6 +1544,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
                free_task(tsk);
 }
 
+static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
+{
+       /* Skip if kernel thread */
+       if (!tsk->mm)
+               return;
+
+       /* Skip if spawning a thread or using vfork */
+       if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
+               return;
+
+       /* We need to synchronize with __set_oom_adj */
+       mutex_lock(&oom_adj_mutex);
+       set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
+       /* Update the values in case they were changed after copy_signal */
+       tsk->signal->oom_score_adj = current->signal->oom_score_adj;
+       tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
+       mutex_unlock(&oom_adj_mutex);
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -1952,6 +1971,8 @@ static __latent_entropy struct task_struct *copy_process(
        trace_task_newtask(p, clone_flags);
        uprobe_copy_process(p, clone_flags);
 
+       copy_oom_score_adj(clone_flags, p);
+
        return p;
 
 bad_fork_cancel_cgroup:
index 7a5c0b229c6ae17fbf9b448952c89def3ca97e4c..6482d743c5c88d3df2505a54383a7bb513d2321b 100644 (file)
@@ -53,6 +53,8 @@ int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks = 1;
 
 DEFINE_MUTEX(oom_lock);
+/* Serializes oom_score_adj and oom_score_adj_min updates */
+DEFINE_MUTEX(oom_adj_mutex);
 
 #ifdef CONFIG_NUMA
 /**