There is in fact no need to wait for an RCU grace period in the
rehash function, since all insertions are guaranteed to go into
the new table through spin locks.
This patch uses call_rcu to free the old/rehashed table at our
leisure.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @walkers: List of active walkers
+ * @rcu: RCU structure for freeing the table
* @buckets: size * hash buckets
*/
struct bucket_table {
unsigned int locks_mask;
spinlock_t *locks;
struct list_head walkers;
+ struct rcu_head rcu;
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
kvfree(tbl);
}
+static void bucket_table_free_rcu(struct rcu_head *head)
+{
+ bucket_table_free(container_of(head, struct bucket_table, rcu));
+}
+
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size_t nbuckets)
{
* table, and thus no references to the old table will
* remain.
*/
- synchronize_rcu();
-
- bucket_table_free(old_tbl);
+ call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
}
/**