shm: optimize locking and ipc_namespace getting
authorVasiliy Kulikov <segoon@openwall.com>
Thu, 28 Jul 2011 23:56:40 +0000 (03:56 +0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 30 Jul 2011 18:44:20 +0000 (08:44 -1000)
shm_lock() does a lookup of shm segment in shm_ids(ns).ipcs_idr, which
is redundant as we already know shmid_kernel address.  An actual lock is
also not required for reads until we really want to destroy the segment.

exit_shm() and shm_destroy_orphaned() may avoid the loop by checking
whether there is at least one segment in current ipc_namespace.

The check of nsproxy and ipc_ns against NULL is redundant as exit_shm()
is called from do_exit() before the call to exit_notify(), so the
dereferencing current->nsproxy->ipc_ns is guaranteed to be safe.

Reported-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
ipc/shm.c

index fdaf8be65b759369482bb5aaa477346bed8741af..9fb044f3b345e5e61ea072e255ab3b67b5ce3539 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -131,6 +131,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
        return container_of(ipcp, struct shmid_kernel, shm_perm);
 }
 
+static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
+{
+       rcu_read_lock();
+       spin_lock(&ipcp->shm_perm.lock);
+}
+
 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
                                                int id)
 {
@@ -231,18 +237,15 @@ static void shm_close(struct vm_area_struct *vma)
        up_write(&shm_ids(ns).rw_mutex);
 }
 
+/* Called with ns->shm_ids(ns).rw_mutex locked */
 static int shm_try_destroy_current(int id, void *p, void *data)
 {
        struct ipc_namespace *ns = data;
-       struct shmid_kernel *shp = shm_lock(ns, id);
+       struct kern_ipc_perm *ipcp = p;
+       struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 
-       if (IS_ERR(shp))
-               return 0;
-
-       if (shp->shm_creator != current) {
-               shm_unlock(shp);
+       if (shp->shm_creator != current)
                return 0;
-       }
 
        /*
         * Mark it as orphaned to destroy the segment when
@@ -255,64 +258,56 @@ static int shm_try_destroy_current(int id, void *p, void *data)
         * Don't even try to destroy it.  If shm_rmid_forced=0 and IPC_RMID
         * is not set, it shouldn't be deleted here.
         */
-       if (!ns->shm_rmid_forced) {
-               shm_unlock(shp);
+       if (!ns->shm_rmid_forced)
                return 0;
-       }
 
-       if (shm_may_destroy(ns, shp))
+       if (shm_may_destroy(ns, shp)) {
+               shm_lock_by_ptr(shp);
                shm_destroy(ns, shp);
-       else
-               shm_unlock(shp);
+       }
        return 0;
 }
 
+/* Called with ns->shm_ids(ns).rw_mutex locked */
 static int shm_try_destroy_orphaned(int id, void *p, void *data)
 {
        struct ipc_namespace *ns = data;
-       struct shmid_kernel *shp = shm_lock(ns, id);
-
-       if (IS_ERR(shp))
-               return 0;
+       struct kern_ipc_perm *ipcp = p;
+       struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 
        /*
         * We want to destroy segments without users and with already
         * exit'ed originating process.
+        *
+        * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
         */
-       if (shp->shm_creator != NULL) {
-               shm_unlock(shp);
+       if (shp->shm_creator != NULL)
                return 0;
-       }
 
-       if (shm_may_destroy(ns, shp))
+       if (shm_may_destroy(ns, shp)) {
+               shm_lock_by_ptr(shp);
                shm_destroy(ns, shp);
-       else
-               shm_unlock(shp);
+       }
        return 0;
 }
 
 void shm_destroy_orphaned(struct ipc_namespace *ns)
 {
        down_write(&shm_ids(ns).rw_mutex);
-       idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
+       if (&shm_ids(ns).in_use)
+               idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
        up_write(&shm_ids(ns).rw_mutex);
 }
 
 
 void exit_shm(struct task_struct *task)
 {
-       struct nsproxy *nsp = task->nsproxy;
-       struct ipc_namespace *ns;
-
-       if (!nsp)
-               return;
-       ns = nsp->ipc_ns;
-       if (!ns)
-               return;
+       struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 
        /* Destroy all already created segments, but not mapped yet */
        down_write(&shm_ids(ns).rw_mutex);
-       idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
+       if (&shm_ids(ns).in_use)
+               idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
        up_write(&shm_ids(ns).rw_mutex);
 }