sunrpc/cache: change deferred-request hash table to use hlist.
authorNeilBrown <neilb@suse.de>
Thu, 12 Aug 2010 07:04:08 +0000 (17:04 +1000)
committerJ. Bruce Fields <bfields@redhat.com>
Wed, 22 Sep 2010 01:51:11 +0000 (21:51 -0400)
Being a hash table, hlist is the best option.

There is currently some ugliness were we treat "->next == NULL" as
a special case to avoid having to initialise the whole array.
This change nicely gets rid of that case.

Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
include/linux/sunrpc/cache.h
net/sunrpc/cache.c

index 52a7d7224e904a5d5e55a9439eaae08c29001eec..03496357f45548107bae1c1a0dc785dd241504ad 100644 (file)
@@ -133,7 +133,7 @@ struct cache_req {
  * delayed awaiting cache-fill
  */
 struct cache_deferred_req {
-       struct list_head        hash;   /* on hash chain */
+       struct hlist_node       hash;   /* on hash chain */
        struct list_head        recent; /* on fifo */
        struct cache_head       *item;  /* cache item we wait on */
        void                    *owner; /* we might need to discard all defered requests
index ca7c621cd97566512a072ba58a3ca5d8da4110ad..2a840519405633ec2ee69f8d9c825e64c7eba406 100644 (file)
@@ -506,13 +506,13 @@ EXPORT_SYMBOL_GPL(cache_purge);
 
 static DEFINE_SPINLOCK(cache_defer_lock);
 static LIST_HEAD(cache_defer_list);
-static struct list_head cache_defer_hash[DFR_HASHSIZE];
+static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 static int cache_defer_cnt;
 
 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 {
        list_del_init(&dreq->recent);
-       list_del_init(&dreq->hash);
+       hlist_del_init(&dreq->hash);
        cache_defer_cnt--;
 }
 
@@ -521,9 +521,7 @@ static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_he
        int hash = DFR_HASH(item);
 
        list_add(&dreq->recent, &cache_defer_list);
-       if (cache_defer_hash[hash].next == NULL)
-               INIT_LIST_HEAD(&cache_defer_hash[hash]);
-       list_add(&dreq->hash, &cache_defer_hash[hash]);
+       hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 }
 
 static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
@@ -588,7 +586,7 @@ static int cache_wait_req(struct cache_req *req, struct cache_head *item)
                 * to clean up
                 */
                spin_lock(&cache_defer_lock);
-               if (!list_empty(&sleeper.handle.hash)) {
+               if (!hlist_unhashed(&sleeper.handle.hash)) {
                        __unhash_deferred_req(&sleeper.handle);
                        spin_unlock(&cache_defer_lock);
                } else {
@@ -642,24 +640,18 @@ static void cache_revisit_request(struct cache_head *item)
 {
        struct cache_deferred_req *dreq;
        struct list_head pending;
-
-       struct list_head *lp;
+       struct hlist_node *lp, *tmp;
        int hash = DFR_HASH(item);
 
        INIT_LIST_HEAD(&pending);
        spin_lock(&cache_defer_lock);
 
-       lp = cache_defer_hash[hash].next;
-       if (lp) {
-               while (lp != &cache_defer_hash[hash]) {
-                       dreq = list_entry(lp, struct cache_deferred_req, hash);
-                       lp = lp->next;
-                       if (dreq->item == item) {
-                               __unhash_deferred_req(dreq);
-                               list_add(&dreq->recent, &pending);
-                       }
+       hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
+               if (dreq->item == item) {
+                       __unhash_deferred_req(dreq);
+                       list_add(&dreq->recent, &pending);
                }
-       }
+
        spin_unlock(&cache_defer_lock);
 
        while (!list_empty(&pending)) {