rhashtable: Move hash_rnd into bucket_table
authorHerbert Xu <herbert@gondor.apana.org.au>
Mon, 9 Mar 2015 22:27:55 +0000 (09:27 +1100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 11 Mar 2015 20:28:25 +0000 (16:28 -0400)
Currently hash_rnd is a parameter that users can set.  However,
no existing users set this parameter.  It is also something that
people are unlikely to want to set directly since it's just a
random number.

In preparation for allowing the reseeding/rehashing of rhashtable,
this patch moves hash_rnd into bucket_table so that it's now an
internal state rather than a parameter.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/rhashtable.h
lib/rhashtable.c

index d438eeb08bff407043b32d5f52f58d08fac8838f..5ef8ea551556f1bba6ed3e0e5f52a9b1022770d6 100644 (file)
@@ -49,12 +49,14 @@ struct rhash_head {
 /**
  * struct bucket_table - Table of hash buckets
  * @size: Number of hash buckets
+ * @hash_rnd: Random seed to fold into hash
  * @locks_mask: Mask to apply before accessing locks[]
  * @locks: Array of spinlocks protecting individual buckets
  * @buckets: size * hash buckets
  */
 struct bucket_table {
        size_t                  size;
+       u32                     hash_rnd;
        unsigned int            locks_mask;
        spinlock_t              *locks;
 
@@ -72,7 +74,6 @@ struct rhashtable;
  * @key_len: Length of key
  * @key_offset: Offset of key in struct to be hashed
  * @head_offset: Offset of rhash_head in struct to be hashed
- * @hash_rnd: Seed to use while hashing
  * @max_shift: Maximum number of shifts while expanding
  * @min_shift: Minimum number of shifts while shrinking
  * @nulls_base: Base value to generate nulls marker
@@ -85,7 +86,6 @@ struct rhashtable_params {
        size_t                  key_len;
        size_t                  key_offset;
        size_t                  head_offset;
-       u32                     hash_rnd;
        size_t                  max_shift;
        size_t                  min_shift;
        u32                     nulls_base;
index b5344ef4c6846c4f9256c1d0d418f774284c8fcc..ba15dceee27fde3f04a8dbf6b6e59db4662549d8 100644 (file)
@@ -66,25 +66,28 @@ static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
        return hash & (tbl->size - 1);
 }
 
-static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
+static u32 obj_raw_hashfn(struct rhashtable *ht, const void *ptr)
 {
+       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
        u32 hash;
 
        if (unlikely(!ht->p.key_len))
-               hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+               hash = ht->p.obj_hashfn(ptr, tbl->hash_rnd);
        else
                hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
-                                   ht->p.hash_rnd);
+                                   tbl->hash_rnd);
 
        return hash >> HASH_RESERVED_SPACE;
 }
 
 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
 {
-       return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
+       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+       return ht->p.hashfn(key, len, tbl->hash_rnd) >> HASH_RESERVED_SPACE;
 }
 
-static u32 head_hashfn(const struct rhashtable *ht,
+static u32 head_hashfn(struct rhashtable *ht,
                       const struct bucket_table *tbl,
                       const struct rhash_head *he)
 {
@@ -92,7 +95,7 @@ static u32 head_hashfn(const struct rhashtable *ht,
 }
 
 #ifdef CONFIG_PROVE_LOCKING
-static void debug_dump_buckets(const struct rhashtable *ht,
+static void debug_dump_buckets(struct rhashtable *ht,
                               const struct bucket_table *tbl)
 {
        struct rhash_head *he;
@@ -385,6 +388,8 @@ int rhashtable_expand(struct rhashtable *ht)
        if (new_tbl == NULL)
                return -ENOMEM;
 
+       new_tbl->hash_rnd = old_tbl->hash_rnd;
+
        atomic_inc(&ht->shift);
 
        /* Make insertions go into the new, empty table right away. Deletions
@@ -476,6 +481,8 @@ int rhashtable_shrink(struct rhashtable *ht)
        if (new_tbl == NULL)
                return -ENOMEM;
 
+       new_tbl->hash_rnd = tbl->hash_rnd;
+
        rcu_assign_pointer(ht->future_tbl, new_tbl);
        synchronize_rcu();
 
@@ -1099,14 +1106,13 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
        if (tbl == NULL)
                return -ENOMEM;
 
+       get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+
        atomic_set(&ht->nelems, 0);
        atomic_set(&ht->shift, ilog2(tbl->size));
        RCU_INIT_POINTER(ht->tbl, tbl);
        RCU_INIT_POINTER(ht->future_tbl, tbl);
 
-       if (!ht->p.hash_rnd)
-               get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
-
        INIT_WORK(&ht->run_work, rht_deferred_worker);
 
        return 0;