Reduce cpuset.c write_lock_irq() to read_lock()
authorPaul Menage <menage@google.com>
Mon, 16 Jul 2007 06:40:11 +0000 (23:40 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 16 Jul 2007 16:05:43 +0000 (09:05 -0700)
cpuset.c:update_nodemask() uses a write_lock_irq() on tasklist_lock to
block concurrent forks; a read_lock() suffices and is less intrusive.

Signed-off-by: Paul Menage<menage@google.com>
Acked-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/cpuset.c

index 4c49188cc49b67d6f172c5d7d12743109c1f9e3e..824b1c01f4107667abb7d0e4e8923678a15966c4 100644 (file)
@@ -981,10 +981,10 @@ static int update_nodemask(struct cpuset *cs, char *buf)
                mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
                if (!mmarray)
                        goto done;
-               write_lock_irq(&tasklist_lock);         /* block fork */
+               read_lock(&tasklist_lock);              /* block fork */
                if (atomic_read(&cs->count) <= ntasks)
                        break;                          /* got enough */
-               write_unlock_irq(&tasklist_lock);       /* try again */
+               read_unlock(&tasklist_lock);            /* try again */
                kfree(mmarray);
        }
 
@@ -1006,7 +1006,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
                        continue;
                mmarray[n++] = mm;
        } while_each_thread(g, p);
-       write_unlock_irq(&tasklist_lock);
+       read_unlock(&tasklist_lock);
 
        /*
         * Now that we've dropped the tasklist spinlock, we can