* which depends on requirement of user
* - some extra bytes (caller can require it while creating hash)
*/
-typedef struct cfs_hash_bucket {
+struct cfs_hash_bucket {
union cfs_hash_lock hsb_lock; /**< bucket lock */
__u32 hsb_count; /**< current entries */
__u32 hsb_version; /**< change version */
unsigned int hsb_index; /**< index of bucket */
int hsb_depmax; /**< max depth on bucket */
long hsb_head[0]; /**< hash-head array */
-} cfs_hash_bucket_t;
+};
/**
* cfs_hash bucket descriptor, it's normally in stack of caller
*/
typedef struct cfs_hash_bd {
- cfs_hash_bucket_t *bd_bucket; /**< address of bucket */
+ struct cfs_hash_bucket *bd_bucket; /**< address of bucket */
unsigned int bd_offset; /**< offset in bucket */
} cfs_hash_bd_t;
/** hash list operations */
struct cfs_hash_hlist_ops *hs_hops;
/** hash buckets-table */
- cfs_hash_bucket_t **hs_buckets;
+ struct cfs_hash_bucket **hs_buckets;
/** total number of items on this hash-table */
atomic_t hs_count;
/** hash flags, see cfs_hash_tag for detail */
/** refcount on this hash table */
atomic_t hs_refcount;
/** rehash buckets-table */
- cfs_hash_bucket_t **hs_rehash_buckets;
+ struct cfs_hash_bucket **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
/** serialize debug members */
spinlock_t hs_dep_lock;
static inline int
cfs_hash_bkt_size(cfs_hash_t *hs)
{
- return offsetof(cfs_hash_bucket_t, hsb_head[0]) +
+ return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
hs->hs_extra_bytes;
}
}
static void
-cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
+cfs_hash_bd_from_key(cfs_hash_t *hs, struct cfs_hash_bucket **bkts,
unsigned int bits, const void *key, cfs_hash_bd_t *bd)
{
unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
{
- cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
- cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
+ struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
+ struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
int rc;
if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
unsigned n, int excl)
{
- cfs_hash_bucket_t *prev = NULL;
+ struct cfs_hash_bucket *prev = NULL;
int i;
/**
cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
unsigned n, int excl)
{
- cfs_hash_bucket_t *prev = NULL;
+ struct cfs_hash_bucket *prev = NULL;
int i;
cfs_hash_for_each_bd(bds, n, i) {
EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
static void
-cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
+cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
int bkt_size, int prev_size, int size)
{
int i;
* needed, the newly allocated buckets if allocation was needed and
* successful, and NULL on error.
*/
-static cfs_hash_bucket_t **
-cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
+static struct cfs_hash_bucket **
+cfs_hash_buckets_realloc(cfs_hash_t *hs, struct cfs_hash_bucket **old_bkts,
unsigned int old_size, unsigned int new_size)
{
- cfs_hash_bucket_t **new_bkts;
+ struct cfs_hash_bucket **new_bkts;
int i;
LASSERT(old_size == 0 || old_bkts != NULL);
cfs_hash_rehash_worker(cfs_workitem_t *wi)
{
cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
- cfs_hash_bucket_t **bkts;
+ struct cfs_hash_bucket **bkts;
cfs_hash_bd_t bd;
unsigned int old_size;
unsigned int new_size;
}
EXPORT_SYMBOL(cfs_hash_debug_header);
-static cfs_hash_bucket_t **
+static struct cfs_hash_bucket **
cfs_hash_full_bkts(cfs_hash_t *hs)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */