ima: replace iint spinblock with rwlock/read_lock
authorDmitry Kasatkin <dmitry.kasatkin@intel.com>
Wed, 8 Feb 2012 19:15:42 +0000 (14:15 -0500)
committerMimi Zohar <zohar@linux.vnet.ibm.com>
Fri, 7 Sep 2012 18:57:46 +0000 (14:57 -0400)
For performance, replace the iint spinlock with rwlock/read_lock.

Eric Paris questioned this change, from spinlocks to rwlocks, saying
"rwlocks have been shown to actually be slower on multi processor
systems in a number of cases due to the cache line bouncing required."

Based on performance measurements compiling the kernel on a cold
boot with multiple jobs with/without this patch, Dmitry Kasatkin
and I found that rwlocks performed better than spinlocks, but very
insignificantly.  For example with total compilation time around 6
minutes, with rwlocks time was 1 - 3 seconds shorter... but always
like that.

Changelog v2:
- new patch taken from the 'allocating iint improvements' patch

Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@intel.com>
Signed-off-by: Mimi Zohar <zohar@us.ibm.com>
security/integrity/iint.c

index c91a436e13ac62f14beb5611a7b68a82bb0bc569..d82a5a13d8551ca1b857a13c401f706340b88850 100644 (file)
@@ -22,7 +22,7 @@
 #include "integrity.h"
 
 static struct rb_root integrity_iint_tree = RB_ROOT;
-static DEFINE_SPINLOCK(integrity_iint_lock);
+static DEFINE_RWLOCK(integrity_iint_lock);
 static struct kmem_cache *iint_cache __read_mostly;
 
 int iint_initialized;
@@ -35,8 +35,6 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
        struct integrity_iint_cache *iint;
        struct rb_node *n = integrity_iint_tree.rb_node;
 
-       assert_spin_locked(&integrity_iint_lock);
-
        while (n) {
                iint = rb_entry(n, struct integrity_iint_cache, rb_node);
 
@@ -63,9 +61,9 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
        if (!IS_IMA(inode))
                return NULL;
 
-       spin_lock(&integrity_iint_lock);
+       read_lock(&integrity_iint_lock);
        iint = __integrity_iint_find(inode);
-       spin_unlock(&integrity_iint_lock);
+       read_unlock(&integrity_iint_lock);
 
        return iint;
 }
@@ -100,7 +98,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
        if (!iint)
                return NULL;
 
-       spin_lock(&integrity_iint_lock);
+       write_lock(&integrity_iint_lock);
 
        p = &integrity_iint_tree.rb_node;
        while (*p) {
@@ -119,7 +117,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
        rb_link_node(node, parent, p);
        rb_insert_color(node, &integrity_iint_tree);
 
-       spin_unlock(&integrity_iint_lock);
+       write_unlock(&integrity_iint_lock);
        return iint;
 }
 
@@ -136,10 +134,10 @@ void integrity_inode_free(struct inode *inode)
        if (!IS_IMA(inode))
                return;
 
-       spin_lock(&integrity_iint_lock);
+       write_lock(&integrity_iint_lock);
        iint = __integrity_iint_find(inode);
        rb_erase(&iint->rb_node, &integrity_iint_tree);
-       spin_unlock(&integrity_iint_lock);
+       write_unlock(&integrity_iint_lock);
 
        iint_free(iint);
 }