[PATCH] rcu file: use atomic primitives
authorNick Piggin <nickpiggin@yahoo.com.au>
Sun, 8 Jan 2006 09:02:19 +0000 (01:02 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 9 Jan 2006 04:13:48 +0000 (20:13 -0800)
Use atomic_inc_not_zero for rcu files instead of special case rcuref.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Documentation/RCU/rcuref.txt
fs/aio.c
fs/file_table.c
include/linux/fs.h
include/linux/radix-tree.h
include/linux/rcuref.h [deleted file]
kernel/rcupdate.c
kernel/rcutorture.c
security/selinux/hooks.c

index a23fee66064df4ff6770e9be02f79af4a0397745..3f60db41b2f0fdce0f6944cf527b3594f2ff8e8e 100644 (file)
@@ -1,74 +1,67 @@
-Refcounter framework for elements of lists/arrays protected by
-RCU.
+Refcounter design for elements of lists/arrays protected by RCU.
 
 Refcounting on elements of  lists which are protected by traditional
 reader/writer spinlocks or semaphores are straight forward as in:
 
-1.                                     2.
-add()                                  search_and_reference()
-{                                      {
-       alloc_object                            read_lock(&list_lock);
-       ...                                     search_for_element
-       atomic_set(&el->rc, 1);                 atomic_inc(&el->rc);
-       write_lock(&list_lock);                 ...
-       add_element                             read_unlock(&list_lock);
-       ...                                     ...
-       write_unlock(&list_lock);       }
+1.                             2.
+add()                          search_and_reference()
+{                              {
+    alloc_object                   read_lock(&list_lock);
+    ...                                    search_for_element
+    atomic_set(&el->rc, 1);        atomic_inc(&el->rc);
+    write_lock(&list_lock);         ...
+    add_element                            read_unlock(&list_lock);
+    ...                                    ...
+    write_unlock(&list_lock);  }
 }
 
 3.                                     4.
 release_referenced()                   delete()
 {                                      {
-       ...                             write_lock(&list_lock);
-       atomic_dec(&el->rc, relfunc)    ...
-       ...                             delete_element
-}                                      write_unlock(&list_lock);
-                                       ...
-                                       if (atomic_dec_and_test(&el->rc))
-                                               kfree(el);
-                                       ...
+    ...                                            write_lock(&list_lock);
+    atomic_dec(&el->rc, relfunc)           ...
+    ...                                            delete_element
+}                                          write_unlock(&list_lock);
+                                           ...
+                                           if (atomic_dec_and_test(&el->rc))
+                                               kfree(el);
+                                           ...
                                        }
 
 If this list/array is made lock free using rcu as in changing the
 write_lock in add() and delete() to spin_lock and changing read_lock
-in search_and_reference to rcu_read_lock(), the rcuref_get in
+in search_and_reference to rcu_read_lock(), the atomic_get in
 search_and_reference could potentially hold reference to an element which
-has already been deleted from the list/array.  rcuref_lf_get_rcu takes
+has already been deleted from the list/array.  atomic_inc_not_zero takes
 care of this scenario. search_and_reference should look as;
 
 1.                                     2.
 add()                                  search_and_reference()
 {                                      {
-       alloc_object                            rcu_read_lock();
-       ...                                     search_for_element
-       atomic_set(&el->rc, 1);                 if (rcuref_inc_lf(&el->rc)) {
-       write_lock(&list_lock);                         rcu_read_unlock();
-                                                       return FAIL;
-       add_element                             }
-       ...                                     ...
-       write_unlock(&list_lock);               rcu_read_unlock();
+    alloc_object                           rcu_read_lock();
+    ...                                            search_for_element
+    atomic_set(&el->rc, 1);                if (atomic_inc_not_zero(&el->rc)) {
+    write_lock(&list_lock);                    rcu_read_unlock();
+                                               return FAIL;
+    add_element                                    }
+    ...                                            ...
+    write_unlock(&list_lock);              rcu_read_unlock();
 }                                      }
 3.                                     4.
 release_referenced()                   delete()
 {                                      {
-       ...                             write_lock(&list_lock);
-       rcuref_dec(&el->rc, relfunc)    ...
-       ...                             delete_element
-}                                      write_unlock(&list_lock);
-                                       ...
-                                       if (rcuref_dec_and_test(&el->rc))
-                                               call_rcu(&el->head, el_free);
-                                       ...
+    ...                                            write_lock(&list_lock);
+    atomic_dec(&el->rc, relfunc)           ...
+    ...                                            delete_element
+}                                          write_unlock(&list_lock);
+                                           ...
+                                           if (atomic_dec_and_test(&el->rc))
+                                               call_rcu(&el->head, el_free);
+                                           ...
                                        }
 
 Sometimes, reference to the element need to be obtained in the
-update (write) stream.  In such cases, rcuref_inc_lf might be an overkill
-since the spinlock serialising list updates are held. rcuref_inc
+update (write) stream.  In such cases, atomic_inc_not_zero might be an
+overkill since the spinlock serialising list updates are held. atomic_inc
 is to be used in such cases.
-For arches which do not have cmpxchg rcuref_inc_lf
-api uses a hashed spinlock implementation and the same hashed spinlock
-is acquired in all rcuref_xxx primitives to preserve atomicity.
-Note: Use rcuref_inc api only if you need to use rcuref_inc_lf on the
-refcounter atleast at one place.  Mixing rcuref_inc and atomic_xxx api
-might lead to races. rcuref_inc_lf() must be used in lockfree
-RCU critical sections only.
+
index 5a28b69ad223b243121c8eb65495f9a519891d33..aec2b1916d1b27e4bc9411212359801b8ba70386 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -29,7 +29,6 @@
 #include <linux/highmem.h>
 #include <linux/workqueue.h>
 #include <linux/security.h>
-#include <linux/rcuref.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
@@ -514,7 +513,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
        /* Must be done under the lock to serialise against cancellation.
         * Call this aio_fput as it duplicates fput via the fput_work.
         */
-       if (unlikely(rcuref_dec_and_test(&req->ki_filp->f_count))) {
+       if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) {
                get_ioctx(ctx);
                spin_lock(&fput_lock);
                list_add(&req->ki_list, &fput_head);
index c3a5e2fd663b772d7eeb997171d6b052b82d680c..6142250104a6dbdc2acd0a5eca5ad7263105785b 100644 (file)
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(get_empty_filp);
 
 void fastcall fput(struct file *file)
 {
-       if (rcuref_dec_and_test(&file->f_count))
+       if (atomic_dec_and_test(&file->f_count))
                __fput(file);
 }
 
@@ -166,7 +166,7 @@ struct file fastcall *fget(unsigned int fd)
        rcu_read_lock();
        file = fcheck_files(files, fd);
        if (file) {
-               if (!rcuref_inc_lf(&file->f_count)) {
+               if (!atomic_inc_not_zero(&file->f_count)) {
                        /* File object ref couldn't be taken */
                        rcu_read_unlock();
                        return NULL;
@@ -198,7 +198,7 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
                rcu_read_lock();
                file = fcheck_files(files, fd);
                if (file) {
-                       if (rcuref_inc_lf(&file->f_count))
+                       if (atomic_inc_not_zero(&file->f_count))
                                *fput_needed = 1;
                        else
                                /* Didn't get the reference, someone's freed */
@@ -213,7 +213,7 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
 
 void put_filp(struct file *file)
 {
-       if (rcuref_dec_and_test(&file->f_count)) {
+       if (atomic_dec_and_test(&file->f_count)) {
                security_file_free(file);
                file_kill(file);
                file_free(file);
index 2c9c48d65630ed68464f377f8e2578e88f2497bf..ef29500b5df8964a6e79a704ebab8ca55d6779eb 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/config.h>
 #include <linux/limits.h>
 #include <linux/ioctl.h>
-#include <linux/rcuref.h>
 
 /*
  * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@@ -653,7 +652,7 @@ extern spinlock_t files_lock;
 #define file_list_lock() spin_lock(&files_lock);
 #define file_list_unlock() spin_unlock(&files_lock);
 
-#define get_file(x)    rcuref_inc(&(x)->f_count)
+#define get_file(x)    atomic_inc(&(x)->f_count)
 #define file_count(x)  atomic_read(&(x)->f_count)
 
 #define        MAX_NON_LFS     ((1UL<<31) - 1)
index 36e5d269612fec7ce193f2edee5fd816af353650..c57ff2fcb30a8d27990a2d4f5acc516714203b99 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef _LINUX_RADIX_TREE_H
 #define _LINUX_RADIX_TREE_H
 
+#include <linux/sched.h>
 #include <linux/preempt.h>
 #include <linux/types.h>
 
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
deleted file mode 100644 (file)
index e1adbba..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * rcuref.h
- *
- * Reference counting for elements of lists/arrays protected by
- * RCU.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2005
- *
- * Author: Dipankar Sarma <dipankar@in.ibm.com>
- *        Ravikiran Thirumalai <kiran_th@gmail.com>
- *
- * See Documentation/RCU/rcuref.txt for detailed user guide.
- *
- */
-
-#ifndef _RCUREF_H_
-#define _RCUREF_H_
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-
-/*
- * These APIs work on traditional atomic_t counters used in the
- * kernel for reference counting. Under special circumstances
- * where a lock-free get() operation races with a put() operation
- * these APIs can be used. See Documentation/RCU/rcuref.txt.
- */
-
-#ifdef __HAVE_ARCH_CMPXCHG
-
-/**
- * rcuref_inc - increment refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference
- * in a lock-free reader-side critical section.
- */
-static inline void rcuref_inc(atomic_t *rcuref)
-{
-       atomic_inc(rcuref);
-}
-
-/**
- * rcuref_dec - decrement refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference
- * in a lock-free reader-side critical section.
- */
-static inline void rcuref_dec(atomic_t *rcuref)
-{
-       atomic_dec(rcuref);
-}
-
-/**
- * rcuref_dec_and_test - decrement refcount for object and test
- * @rcuref: reference counter in the object.
- * @release: pointer to the function that will clean up the object
- *          when the last reference to the object is released.
- *          This pointer is required.
- *
- * Decrement the refcount, and if 0, return 1. Else return 0.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference
- * in a lock-free reader-side critical section.
- */
-static inline int rcuref_dec_and_test(atomic_t *rcuref)
-{
-       return atomic_dec_and_test(rcuref);
-}
-
-/*
- * cmpxchg is needed on UP too, if deletions to the list/array can happen
- * in interrupt context.
- */
-
-/**
- * rcuref_inc_lf - Take reference to an object in a read-side
- * critical section protected by RCU.
- * @rcuref: reference counter in the object in question.
- *
- * Try and increment the refcount by 1.  The increment might fail if
- * the reference counter has been through a 1 to 0 transition and
- * is no longer part of the lock-free list.
- * Returns non-zero on successful increment and zero otherwise.
- */
-static inline int rcuref_inc_lf(atomic_t *rcuref)
-{
-       int c, old;
-       c = atomic_read(rcuref);
-       while (c && (old = cmpxchg(&rcuref->counter, c, c + 1)) != c)
-               c = old;
-       return c;
-}
-
-#else                          /* !__HAVE_ARCH_CMPXCHG */
-
-extern spinlock_t __rcuref_hash[];
-
-/*
- * Use a hash table of locks to protect the reference count
- * since cmpxchg is not available in this arch.
- */
-#ifdef CONFIG_SMP
-#define RCUREF_HASH_SIZE       4
-#define RCUREF_HASH(k) \
-       (&__rcuref_hash[(((unsigned long)k)>>8) & (RCUREF_HASH_SIZE-1)])
-#else
-#define        RCUREF_HASH_SIZE        1
-#define RCUREF_HASH(k)         &__rcuref_hash[0]
-#endif                         /* CONFIG_SMP */
-
-/**
- * rcuref_inc - increment refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference in a lock-free
- * reader-side critical section.
- */
-static inline void rcuref_inc(atomic_t *rcuref)
-{
-       unsigned long flags;
-       spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
-       rcuref->counter += 1;
-       spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
-}
-
-/**
- * rcuref_dec - decrement refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference in a lock-free
- * reader-side critical section.
- */
-static inline void rcuref_dec(atomic_t *rcuref)
-{
-       unsigned long flags;
-       spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
-       rcuref->counter -= 1;
-       spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
-}
-
-/**
- * rcuref_dec_and_test - decrement refcount for object and test
- * @rcuref: reference counter in the object.
- * @release: pointer to the function that will clean up the object
- *          when the last reference to the object is released.
- *          This pointer is required.
- *
- * Decrement the refcount, and if 0, return 1. Else return 0.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference in a lock-free
- * reader-side critical section.
- */
-static inline int rcuref_dec_and_test(atomic_t *rcuref)
-{
-       unsigned long flags;
-       spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
-       rcuref->counter--;
-       if (!rcuref->counter) {
-               spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
-               return 1;
-       } else {
-               spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
-               return 0;
-       }
-}
-
-/**
- * rcuref_inc_lf - Take reference to an object of a lock-free collection
- * by traversing a lock-free list/array.
- * @rcuref: reference counter in the object in question.
- *
- * Try and increment the refcount by 1.  The increment might fail if
- * the reference counter has been through a 1 to 0 transition and
- * object is no longer part of the lock-free list.
- * Returns non-zero on successful increment and zero otherwise.
- */
-static inline int rcuref_inc_lf(atomic_t *rcuref)
-{
-       int ret;
-       unsigned long flags;
-       spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
-       if (rcuref->counter)
-               ret = rcuref->counter++;
-       else
-               ret = 0;
-       spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
-       return ret;
-}
-
-
-#endif /* !__HAVE_ARCH_CMPXCHG */
-
-#endif /* __KERNEL__ */
-#endif /* _RCUREF_H_ */
index 0a669bd2f6d106724dafef8b398acf10ed5d01ae..30b0bba0385978b0220b562f33ce4fb5468293e3 100644 (file)
@@ -46,7 +46,6 @@
 #include <linux/percpu.h>
 #include <linux/notifier.h>
 #include <linux/rcupdate.h>
-#include <linux/rcuref.h>
 #include <linux/cpu.h>
 
 /* Definition for rcupdate control block. */
@@ -74,19 +73,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
 static int maxbatch = 10000;
 
-#ifndef __HAVE_ARCH_CMPXCHG
-/*
- * We use an array of spinlocks for the rcurefs -- similar to ones in sparc
- * 32 bit atomic_t implementations, and a hash function similar to that
- * for our refcounting needs.
- * Can't help multiprocessors which donot have cmpxchg :(
- */
-
-spinlock_t __rcuref_hash[RCUREF_HASH_SIZE] = {
-       [0 ... (RCUREF_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
-};
-#endif
-
 /**
  * call_rcu - Queue an RCU callback for invocation after a grace period.
  * @head: structure to be used for queueing the RCU updates.
index 36efe088ad8197a491eda62c3fee90057efed657..75174c81529a66a10be6b932222d8500d8c41625 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/moduleparam.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
-#include <linux/rcuref.h>
 #include <linux/cpu.h>
 #include <linux/random.h>
 #include <linux/delay.h>
index 3d496eae1b47ee1ce6a5950f2ffe770c495da705..6647204e46366f59917f4779949988e0ad5912c1 100644 (file)
@@ -1663,7 +1663,7 @@ static inline void flush_unauthorized_files(struct files_struct * files)
                                                continue;
                                        }
                                        if (devnull) {
-                                               rcuref_inc(&devnull->f_count);
+                                               get_file(devnull);
                                        } else {
                                                devnull = dentry_open(dget(selinux_null), mntget(selinuxfs_mount), O_RDWR);
                                                if (!devnull) {