From: Lisa Nguyen Date: Tue, 22 Oct 2013 01:16:26 +0000 (-0700) Subject: staging: lustre: Remove typedef and update cfs_hash struct X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=6da6eabe1038cb438460a8295a37270a8adebb19;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git staging: lustre: Remove typedef and update cfs_hash struct Remove typedef keyword and rename the cfs_hash_t struct to cfs_hash in libcfs_hash.h. These changes resolve the "Do not add new typedefs" warning generated by checkpatch.pl and meet kernel coding style. Struct variables in other header and source files that depend on libcfs_hash.h are updated as well. Signed-off-by: Lisa Nguyen Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h index 61e4fcadf22f..9d5ee1a69c0c 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h @@ -210,7 +210,7 @@ enum cfs_hash_tag { * locations; additions must take care to only insert into the new bucket. */ -typedef struct cfs_hash { +struct cfs_hash { /** serialize with rehash, or serialize all operations if * the hash-table has CFS_HASH_NO_BKTLOCK */ union cfs_hash_lock hs_lock; @@ -272,7 +272,7 @@ typedef struct cfs_hash { #endif /** name of htable */ char hs_name[0]; -} cfs_hash_t; +}; typedef struct cfs_hash_lock_ops { /** lock the hash table */ @@ -287,20 +287,20 @@ typedef struct cfs_hash_lock_ops { typedef struct cfs_hash_hlist_ops { /** return hlist_head of hash-head of @bd */ - struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, struct cfs_hash_bd *bd); + struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd); /** return hash-head size */ - int (*hop_hhead_size)(cfs_hash_t *hs); + int (*hop_hhead_size)(struct cfs_hash *hs); /** add @hnode to hash-head of @bd */ - int (*hop_hnode_add)(cfs_hash_t *hs, + int (*hop_hnode_add)(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode); /** remove @hnode from hash-head of @bd */ - int (*hop_hnode_del)(cfs_hash_t *hs, + int (*hop_hnode_del)(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode); } cfs_hash_hlist_ops_t; typedef struct cfs_hash_ops { /** return hashed value from @key */ - unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask); + unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask); /** return key address of @hnode */ void * (*hs_key)(struct hlist_node *hnode); /** copy key from @hnode to @key */ @@ -313,13 +313,13 @@ typedef struct cfs_hash_ops { /** return object address of @hnode, i.e: container_of(...hnode) */ void * (*hs_object)(struct hlist_node *hnode); /** get refcount of item, always called with holding bucket-lock */ - void (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode); + void (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode); /** release refcount of item */ - void (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode); + void (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode); /** release refcount of item, always called with holding bucket-lock */ - void (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode); + void (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode); /** it's called before removing of @hnode */ - void (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode); + void (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode); } cfs_hash_ops_t; /** total number of buckets in @hs */ @@ -340,41 +340,41 @@ typedef struct cfs_hash_ops { #define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits) static inline int -cfs_hash_with_no_lock(cfs_hash_t *hs) +cfs_hash_with_no_lock(struct cfs_hash *hs) { /* caller will serialize all operations for this hash-table */ return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0; } static inline int -cfs_hash_with_no_bktlock(cfs_hash_t *hs) +cfs_hash_with_no_bktlock(struct cfs_hash *hs) { /* no bucket lock, one single lock to protect the hash-table */ return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0; } static inline int -cfs_hash_with_rw_bktlock(cfs_hash_t *hs) +cfs_hash_with_rw_bktlock(struct cfs_hash *hs) { /* rwlock to protect hash bucket */ return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0; } static inline int -cfs_hash_with_spin_bktlock(cfs_hash_t *hs) +cfs_hash_with_spin_bktlock(struct cfs_hash *hs) { /* spinlock to protect hash bucket */ return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0; } static inline int -cfs_hash_with_add_tail(cfs_hash_t *hs) +cfs_hash_with_add_tail(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0; } static inline int -cfs_hash_with_no_itemref(cfs_hash_t *hs) +cfs_hash_with_no_itemref(struct cfs_hash *hs) { /* hash-table doesn't keep refcount on item, * item can't be removed from hash unless it's @@ -383,73 +383,73 @@ cfs_hash_with_no_itemref(cfs_hash_t *hs) } static inline int -cfs_hash_with_bigname(cfs_hash_t *hs) +cfs_hash_with_bigname(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_BIGNAME) != 0; } static inline int -cfs_hash_with_counter(cfs_hash_t *hs) +cfs_hash_with_counter(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_COUNTER) != 0; } static inline int -cfs_hash_with_rehash(cfs_hash_t *hs) +cfs_hash_with_rehash(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_REHASH) != 0; } static inline int -cfs_hash_with_rehash_key(cfs_hash_t *hs) +cfs_hash_with_rehash_key(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0; } static inline int -cfs_hash_with_shrink(cfs_hash_t *hs) +cfs_hash_with_shrink(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_SHRINK) != 0; } static inline int -cfs_hash_with_assert_empty(cfs_hash_t *hs) +cfs_hash_with_assert_empty(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0; } static inline int -cfs_hash_with_depth(cfs_hash_t *hs) +cfs_hash_with_depth(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_DEPTH) != 0; } static inline int -cfs_hash_with_nblk_change(cfs_hash_t *hs) +cfs_hash_with_nblk_change(struct cfs_hash *hs) { return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0; } static inline int -cfs_hash_is_exiting(cfs_hash_t *hs) +cfs_hash_is_exiting(struct cfs_hash *hs) { /* cfs_hash_destroy is called */ return hs->hs_exiting; } static inline int -cfs_hash_is_rehashing(cfs_hash_t *hs) +cfs_hash_is_rehashing(struct cfs_hash *hs) { /* rehash is launched */ return hs->hs_rehash_bits != 0; } static inline int -cfs_hash_is_iterating(cfs_hash_t *hs) +cfs_hash_is_iterating(struct cfs_hash *hs) { /* someone is calling cfs_hash_for_each_* */ return hs->hs_iterating || hs->hs_iterators != 0; } static inline int -cfs_hash_bkt_size(cfs_hash_t *hs) +cfs_hash_bkt_size(struct cfs_hash *hs) { return offsetof(struct cfs_hash_bucket, hsb_head[0]) + hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) + @@ -459,19 +459,19 @@ cfs_hash_bkt_size(cfs_hash_t *hs) #define CFS_HOP(hs, op) (hs)->hs_ops->hs_ ## op static inline unsigned -cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask) +cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask) { return CFS_HOP(hs, hash)(hs, key, mask); } static inline void * -cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode) +cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode) { return CFS_HOP(hs, key)(hnode); } static inline void -cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key) +cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key) { if (CFS_HOP(hs, keycpy) != NULL) CFS_HOP(hs, keycpy)(hnode, key); @@ -481,25 +481,25 @@ cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key) * Returns 1 on a match, */ static inline int -cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) +cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) { return CFS_HOP(hs, keycmp)(key, hnode); } static inline void * -cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode) +cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode) { return CFS_HOP(hs, object)(hnode); } static inline void -cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode) +cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode) { return CFS_HOP(hs, get)(hs, hnode); } static inline void -cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { LASSERT(CFS_HOP(hs, put_locked) != NULL); @@ -507,7 +507,7 @@ cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) } static inline void -cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode) +cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode) { LASSERT(CFS_HOP(hs, put) != NULL); @@ -515,36 +515,36 @@ cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode) } static inline void -cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode) +cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode) { if (CFS_HOP(hs, exit)) CFS_HOP(hs, exit)(hs, hnode); } -static inline void cfs_hash_lock(cfs_hash_t *hs, int excl) +static inline void cfs_hash_lock(struct cfs_hash *hs, int excl) { hs->hs_lops->hs_lock(&hs->hs_lock, excl); } -static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl) +static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl) { hs->hs_lops->hs_unlock(&hs->hs_lock, excl); } -static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs, +static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs, atomic_t *condition) { LASSERT(cfs_hash_with_no_bktlock(hs)); return atomic_dec_and_lock(condition, &hs->hs_lock.spin); } -static inline void cfs_hash_bd_lock(cfs_hash_t *hs, +static inline void cfs_hash_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd, int excl) { hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl); } -static inline void cfs_hash_bd_unlock(cfs_hash_t *hs, +static inline void cfs_hash_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bd, int excl) { hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl); @@ -554,21 +554,21 @@ static inline void cfs_hash_bd_unlock(cfs_hash_t *hs, * operations on cfs_hash bucket (bd: bucket descriptor), * they are normally for hash-table without rehash */ -void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bd); +void cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd); -static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key, +static inline void cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd, int excl) { cfs_hash_bd_get(hs, key, bd); cfs_hash_bd_lock(hs, bd, excl); } -static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, struct cfs_hash_bd *bd) +static inline unsigned cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) { return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits); } -static inline void cfs_hash_bd_index_set(cfs_hash_t *hs, +static inline void cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index, struct cfs_hash_bd *bd) { bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits]; @@ -576,7 +576,7 @@ static inline void cfs_hash_bd_index_set(cfs_hash_t *hs, } static inline void * -cfs_hash_bd_extra_get(cfs_hash_t *hs, struct cfs_hash_bd *bd) +cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) { return (void *)bd->bd_bucket + cfs_hash_bkt_size(hs) - hs->hs_extra_bytes; @@ -614,14 +614,14 @@ cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) return 0; } -void cfs_hash_bd_add_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, +void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode); -void cfs_hash_bd_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, +void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode); -void cfs_hash_bd_move_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd_old, +void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, struct cfs_hash_bd *bd_new, struct hlist_node *hnode); -static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static inline int cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd, atomic_t *condition) { LASSERT(cfs_hash_with_spin_bktlock(hs)); @@ -629,21 +629,21 @@ static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, struct cfs_hash_bd *b &bd->bd_bucket->hsb_lock.spin); } -static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs, +static inline struct hlist_head *cfs_hash_bd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) { return hs->hs_hops->hop_hhead(hs, bd); } -struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs, +struct hlist_node *cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key); -struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs, +struct hlist_node *cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key); -struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs, +struct hlist_node *cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key, struct hlist_node *hnode, int insist_add); -struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs, +struct hlist_node *cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key, struct hlist_node *hnode); @@ -651,87 +651,87 @@ struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs, * operations on cfs_hash bucket (bd: bucket descriptor), * they are safe for hash-table with rehash */ -void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bds); -void cfs_hash_dual_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl); -void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl); +void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds); +void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl); +void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl); -static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key, +static inline void cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds, int excl) { cfs_hash_dual_bd_get(hs, key, bds); cfs_hash_dual_bd_lock(hs, bds, excl); } -struct hlist_node *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, +struct hlist_node *cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, const void *key); -struct hlist_node *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, +struct hlist_node *cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, const void *key, struct hlist_node *hnode, int insist_add); -struct hlist_node *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, +struct hlist_node *cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, const void *key, struct hlist_node *hnode); /* Hash init/cleanup functions */ -cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, +struct cfs_hash *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, unsigned bkt_bits, unsigned extra_bytes, unsigned min_theta, unsigned max_theta, cfs_hash_ops_t *ops, unsigned flags); -cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs); -void cfs_hash_putref(cfs_hash_t *hs); +struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs); +void cfs_hash_putref(struct cfs_hash *hs); /* Hash addition functions */ -void cfs_hash_add(cfs_hash_t *hs, const void *key, +void cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode); -int cfs_hash_add_unique(cfs_hash_t *hs, const void *key, +int cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode); -void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key, +void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode); /* Hash deletion functions */ -void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode); -void *cfs_hash_del_key(cfs_hash_t *hs, const void *key); +void *cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode); +void *cfs_hash_del_key(struct cfs_hash *hs, const void *key); /* Hash lookup/for_each functions */ #define CFS_HASH_LOOP_HOG 1024 -typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, struct cfs_hash_bd *bd, +typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *node, void *data); -void *cfs_hash_lookup(cfs_hash_t *hs, const void *key); -void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data); -void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data); -int cfs_hash_for_each_nolock(cfs_hash_t *hs, +void *cfs_hash_lookup(struct cfs_hash *hs, const void *key); +void cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); +void cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); +int cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); -int cfs_hash_for_each_empty(cfs_hash_t *hs, +int cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); -void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key, +void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, cfs_hash_for_each_cb_t, void *data); typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data); -void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data); +void cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data); -void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex, +void cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, cfs_hash_for_each_cb_t, void *data); -int cfs_hash_is_empty(cfs_hash_t *hs); -__u64 cfs_hash_size_get(cfs_hash_t *hs); +int cfs_hash_is_empty(struct cfs_hash *hs); +__u64 cfs_hash_size_get(struct cfs_hash *hs); /* * Rehash - Theta is calculated to be the average chained * hash depth assuming a perfectly uniform hash function. */ -void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs); -void cfs_hash_rehash_cancel(cfs_hash_t *hs); -int cfs_hash_rehash(cfs_hash_t *hs, int do_rehash); -void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key, +void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs); +void cfs_hash_rehash_cancel(struct cfs_hash *hs); +int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash); +void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, void *new_key, struct hlist_node *hnode); #if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 /* Validate hnode references the correct key */ static inline void -cfs_hash_key_validate(cfs_hash_t *hs, const void *key, +cfs_hash_key_validate(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) { LASSERT(cfs_hash_keycmp(hs, key, hnode)); @@ -739,7 +739,7 @@ cfs_hash_key_validate(cfs_hash_t *hs, const void *key, /* Validate hnode is in the correct bucket */ static inline void -cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { struct cfs_hash_bd bds[2]; @@ -752,11 +752,11 @@ cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd, #else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */ static inline void -cfs_hash_key_validate(cfs_hash_t *hs, const void *key, +cfs_hash_key_validate(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) {} static inline void -cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) {} #endif /* CFS_HASH_DEBUG_LEVEL */ @@ -778,13 +778,13 @@ static inline int __cfs_hash_theta_frac(int theta) (__cfs_hash_theta_int(theta) * 1000); } -static inline int __cfs_hash_theta(cfs_hash_t *hs) +static inline int __cfs_hash_theta(struct cfs_hash *hs) { return (atomic_read(&hs->hs_count) << CFS_HASH_THETA_BITS) >> hs->hs_cur_bits; } -static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max) +static inline void __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max) { LASSERT(min < max); hs->hs_min_theta = (__u16)min; @@ -794,7 +794,7 @@ static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max) /* Generic debug formatting routines mainly for proc handler */ struct seq_file; int cfs_hash_debug_header(struct seq_file *m); -int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m); +int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m); /* * Generic djb2 hash algorithm for character arrays. diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h index 478cd35ea7a1..d5b8225ef1a7 100644 --- a/drivers/staging/lustre/lustre/include/lu_object.h +++ b/drivers/staging/lustre/lustre/include/lu_object.h @@ -622,7 +622,7 @@ struct lu_site { /** * objects hash table */ - cfs_hash_t *ls_obj_hash; + struct cfs_hash *ls_obj_hash; /** * index of bucket on hash table while purging */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h index 122441f45a68..bc2b82ffae92 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h @@ -375,7 +375,7 @@ struct ldlm_namespace { ldlm_side_t ns_client; /** Resource hash table for namespace. */ - cfs_hash_t *ns_rs_hash; + struct cfs_hash *ns_rs_hash; /** serialize */ spinlock_t ns_lock; diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h index d61c020a4643..2feb38b51af2 100644 --- a/drivers/staging/lustre/lustre/include/lustre_export.h +++ b/drivers/staging/lustre/lustre/include/lustre_export.h @@ -197,12 +197,12 @@ struct obd_export { /** Connection count value from last succesful reconnect rpc */ __u32 exp_conn_cnt; /** Hash list of all ldlm locks granted on this export */ - cfs_hash_t *exp_lock_hash; + struct cfs_hash *exp_lock_hash; /** * Hash list for Posix lock deadlock detection, added with * ldlm_lock::l_exp_flock_hash. */ - cfs_hash_t *exp_flock_hash; + struct cfs_hash *exp_flock_hash; struct list_head exp_outstanding_replies; struct list_head exp_uncommitted_replies; spinlock_t exp_uncommitted_replies_lock; diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index cf2b90d1d08b..72edf01b58a2 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -1427,7 +1427,7 @@ struct nrs_fifo_req { struct nrs_crrn_net { struct ptlrpc_nrs_resource cn_res; cfs_binheap_t *cn_binheap; - cfs_hash_t *cn_cli_hash; + struct cfs_hash *cn_cli_hash; /** * Used when a new scheduling round commences, in order to synchronize * all clients with the new round number. @@ -1568,7 +1568,7 @@ struct nrs_orr_key { struct nrs_orr_data { struct ptlrpc_nrs_resource od_res; cfs_binheap_t *od_binheap; - cfs_hash_t *od_obj_hash; + struct cfs_hash *od_obj_hash; struct kmem_cache *od_cache; /** * Used when a new scheduling round commences, in order to synchronize diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index 4fd4af9f29c9..d0aea15b7c39 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -429,7 +429,7 @@ struct client_obd { /* ptlrpc work for writeback in ptlrpcd context */ void *cl_writeback_work; /* hash tables for osc_quota_info */ - cfs_hash_t *cl_quota_hash[MAXQUOTAS]; + struct cfs_hash *cl_quota_hash[MAXQUOTAS]; }; #define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid) @@ -556,7 +556,7 @@ struct lov_obd { __u32 lov_tgt_size; /* size of tgts array */ int lov_connects; int lov_pool_count; - cfs_hash_t *lov_pools_hash_body; /* used for key access */ + struct cfs_hash *lov_pools_hash_body; /* used for key access */ struct list_head lov_pool_list; /* used for sequential access */ struct proc_dir_entry *lov_pool_proc_entry; enum lustre_sec_part lov_sp_me; @@ -855,11 +855,11 @@ struct obd_device { * protection of other bits using _bh lock */ unsigned long obd_recovery_expired:1; /* uuid-export hash body */ - cfs_hash_t *obd_uuid_hash; + struct cfs_hash *obd_uuid_hash; /* nid-export hash body */ - cfs_hash_t *obd_nid_hash; + struct cfs_hash *obd_nid_hash; /* nid stats body */ - cfs_hash_t *obd_nid_stats_hash; + struct cfs_hash *obd_nid_stats_hash; struct list_head obd_nid_stats; atomic_t obd_refcount; wait_queue_head_t obd_refcount_waitq; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c index c68ed2766333..39fcdacc51ed 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c @@ -745,7 +745,7 @@ void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, * Export handle<->flock hash operations. */ static unsigned -ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask) +ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_u64_hash(*(__u64 *)key, mask); } @@ -772,7 +772,7 @@ ldlm_export_flock_object(struct hlist_node *hnode) } static void -ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode) +ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_lock *lock; struct ldlm_flock *flock; @@ -787,7 +787,7 @@ ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode) } static void -ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode) +ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_lock *lock; struct ldlm_flock *flock; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index 98f809376e78..3900a69742fc 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -1891,7 +1891,7 @@ static int reprocess_one_queue(struct ldlm_resource *res, void *closure) return LDLM_ITER_CONTINUE; } -static int ldlm_reprocess_res(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); @@ -2040,7 +2040,7 @@ struct export_cl_data { * Iterator function for ldlm_cancel_locks_for_export. * Cancels passed locks. */ -int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, struct cfs_hash_bd *bd, +int ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *data) { diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index a100a0b96381..fde9bcd1d48d 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -937,7 +937,7 @@ EXPORT_SYMBOL(ldlm_put_ref); * Export handle<->lock hash operations. */ static unsigned -ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask) +ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask); } @@ -973,7 +973,7 @@ ldlm_export_lock_object(struct hlist_node *hnode) } static void -ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode) +ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_lock *lock; @@ -982,7 +982,7 @@ ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode) } static void -ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode) +ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_lock *lock; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c index 2512b2984f24..c3b6ad55d39b 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c @@ -1925,7 +1925,7 @@ struct ldlm_cli_cancel_arg { void *lc_opaque; }; -static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); @@ -2023,7 +2023,7 @@ static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure) return helper->iter(lock, helper->closure); } -static int ldlm_res_iter_helper(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c index f50af90118fb..77e022bf8bcc 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c @@ -389,7 +389,7 @@ int ldlm_namespace_proc_register(struct ldlm_namespace *ns) #endif /* LPROCFS */ -static unsigned ldlm_res_hop_hash(cfs_hash_t *hs, +static unsigned ldlm_res_hop_hash(struct cfs_hash *hs, const void *key, unsigned mask) { const struct ldlm_res_id *id = key; @@ -401,7 +401,7 @@ static unsigned ldlm_res_hop_hash(cfs_hash_t *hs, return val & mask; } -static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs, +static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs, const void *key, unsigned mask) { const struct ldlm_res_id *id = key; @@ -453,7 +453,7 @@ static void *ldlm_res_hop_object(struct hlist_node *hnode) return hlist_entry(hnode, struct ldlm_resource, lr_hash); } -static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode) +static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_resource *res; @@ -461,7 +461,7 @@ static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode) ldlm_resource_getref(res); } -static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_resource *res; @@ -470,7 +470,7 @@ static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) ldlm_resource_putref_locked(res); } -static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode) +static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_resource *res; @@ -743,7 +743,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, } while (1); } -static int ldlm_resource_clean(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); @@ -756,7 +756,7 @@ static int ldlm_resource_clean(cfs_hash_t *hs, struct cfs_hash_bd *bd, return 0; } -static int ldlm_resource_complain(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); @@ -1352,7 +1352,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level) } EXPORT_SYMBOL(ldlm_dump_all_namespaces); -static int ldlm_res_hash_dump(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c index 46186e19ea6c..e3e0578b27f9 100644 --- a/drivers/staging/lustre/lustre/libcfs/hash.c +++ b/drivers/staging/lustre/lustre/libcfs/hash.c @@ -209,7 +209,7 @@ static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops = }; static void -cfs_hash_lock_setup(cfs_hash_t *hs) +cfs_hash_lock_setup(struct cfs_hash *hs) { if (cfs_hash_with_no_lock(hs)) { hs->hs_lops = &cfs_hash_nl_lops; @@ -246,13 +246,13 @@ typedef struct { } cfs_hash_head_t; static int -cfs_hash_hh_hhead_size(cfs_hash_t *hs) +cfs_hash_hh_hhead_size(struct cfs_hash *hs) { return sizeof(cfs_hash_head_t); } static struct hlist_head * -cfs_hash_hh_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) +cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) { cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0]; @@ -260,7 +260,7 @@ cfs_hash_hh_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) } static int -cfs_hash_hh_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd)); @@ -268,7 +268,7 @@ cfs_hash_hh_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, } static int -cfs_hash_hh_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { hlist_del_init(hnode); @@ -285,13 +285,13 @@ typedef struct { } cfs_hash_head_dep_t; static int -cfs_hash_hd_hhead_size(cfs_hash_t *hs) +cfs_hash_hd_hhead_size(struct cfs_hash *hs) { return sizeof(cfs_hash_head_dep_t); } static struct hlist_head * -cfs_hash_hd_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) +cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) { cfs_hash_head_dep_t *head; @@ -300,7 +300,7 @@ cfs_hash_hd_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) } static int -cfs_hash_hd_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd), @@ -310,7 +310,7 @@ cfs_hash_hd_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, } static int -cfs_hash_hd_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd), @@ -329,13 +329,13 @@ typedef struct { } cfs_hash_dhead_t; static int -cfs_hash_dh_hhead_size(cfs_hash_t *hs) +cfs_hash_dh_hhead_size(struct cfs_hash *hs) { return sizeof(cfs_hash_dhead_t); } static struct hlist_head * -cfs_hash_dh_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) +cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) { cfs_hash_dhead_t *head; @@ -344,7 +344,7 @@ cfs_hash_dh_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) } static int -cfs_hash_dh_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd), @@ -359,7 +359,7 @@ cfs_hash_dh_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, } static int -cfs_hash_dh_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnd) { cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd), @@ -384,13 +384,13 @@ typedef struct { } cfs_hash_dhead_dep_t; static int -cfs_hash_dd_hhead_size(cfs_hash_t *hs) +cfs_hash_dd_hhead_size(struct cfs_hash *hs) { return sizeof(cfs_hash_dhead_dep_t); } static struct hlist_head * -cfs_hash_dd_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) +cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) { cfs_hash_dhead_dep_t *head; @@ -399,7 +399,7 @@ cfs_hash_dd_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd) } static int -cfs_hash_dd_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd), @@ -414,7 +414,7 @@ cfs_hash_dd_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd, } static int -cfs_hash_dd_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnd) { cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd), @@ -457,7 +457,7 @@ static cfs_hash_hlist_ops_t cfs_hash_dd_hops = { }; static void -cfs_hash_hlist_setup(cfs_hash_t *hs) +cfs_hash_hlist_setup(struct cfs_hash *hs) { if (cfs_hash_with_add_tail(hs)) { hs->hs_hops = cfs_hash_with_depth(hs) ? @@ -469,7 +469,7 @@ cfs_hash_hlist_setup(cfs_hash_t *hs) } static void -cfs_hash_bd_from_key(cfs_hash_t *hs, struct cfs_hash_bucket **bkts, +cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts, unsigned int bits, const void *key, struct cfs_hash_bd *bd) { unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1); @@ -481,7 +481,7 @@ cfs_hash_bd_from_key(cfs_hash_t *hs, struct cfs_hash_bucket **bkts, } void -cfs_hash_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bd) +cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd) { /* NB: caller should hold hs->hs_rwlock if REHASH is set */ if (likely(hs->hs_rehash_buckets == NULL)) { @@ -496,7 +496,7 @@ cfs_hash_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bd) EXPORT_SYMBOL(cfs_hash_bd_get); static inline void -cfs_hash_bd_dep_record(cfs_hash_t *hs, struct cfs_hash_bd *bd, int dep_cur) +cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) { if (likely(dep_cur <= bd->bd_bucket->hsb_depmax)) return; @@ -519,7 +519,7 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, struct cfs_hash_bd *bd, int dep_cur) } void -cfs_hash_bd_add_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { int rc; @@ -539,7 +539,7 @@ cfs_hash_bd_add_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, EXPORT_SYMBOL(cfs_hash_bd_add_locked); void -cfs_hash_bd_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode) { hs->hs_hops->hop_hnode_del(hs, bd, hnode); @@ -560,7 +560,7 @@ cfs_hash_bd_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, EXPORT_SYMBOL(cfs_hash_bd_del_locked); void -cfs_hash_bd_move_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd_old, +cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, struct cfs_hash_bd *bd_new, struct hlist_node *hnode) { struct cfs_hash_bucket *obkt = bd_old->bd_bucket; @@ -617,7 +617,7 @@ typedef enum cfs_hash_lookup_intent { } cfs_hash_lookup_intent_t; static struct hlist_node * -cfs_hash_bd_lookup_intent(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key, struct hlist_node *hnode, cfs_hash_lookup_intent_t intent) @@ -658,7 +658,7 @@ cfs_hash_bd_lookup_intent(cfs_hash_t *hs, struct cfs_hash_bd *bd, } struct hlist_node * -cfs_hash_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, const void *key) +cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key) { return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, CFS_HS_LOOKUP_IT_FIND); @@ -666,7 +666,7 @@ cfs_hash_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, const void *ke EXPORT_SYMBOL(cfs_hash_bd_lookup_locked); struct hlist_node * -cfs_hash_bd_peek_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, const void *key) +cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key) { return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, CFS_HS_LOOKUP_IT_PEEK); @@ -674,7 +674,7 @@ cfs_hash_bd_peek_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, const void *key) EXPORT_SYMBOL(cfs_hash_bd_peek_locked); struct hlist_node * -cfs_hash_bd_findadd_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key, struct hlist_node *hnode, int noref) { @@ -685,7 +685,7 @@ cfs_hash_bd_findadd_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, EXPORT_SYMBOL(cfs_hash_bd_findadd_locked); struct hlist_node * -cfs_hash_bd_finddel_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key, struct hlist_node *hnode) { /* hnode can be NULL, we find the first item with @key */ @@ -695,7 +695,7 @@ cfs_hash_bd_finddel_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, EXPORT_SYMBOL(cfs_hash_bd_finddel_locked); static void -cfs_hash_multi_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds, +cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, unsigned n, int excl) { struct cfs_hash_bucket *prev = NULL; @@ -718,7 +718,7 @@ cfs_hash_multi_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds, } static void -cfs_hash_multi_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds, +cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, unsigned n, int excl) { struct cfs_hash_bucket *prev = NULL; @@ -733,7 +733,7 @@ cfs_hash_multi_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds, } static struct hlist_node * -cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, +cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, unsigned n, const void *key) { struct hlist_node *ehnode; @@ -749,7 +749,7 @@ cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, } static struct hlist_node * -cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs, +cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, unsigned n, const void *key, struct hlist_node *hnode, int noref) { @@ -780,7 +780,7 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs, } static struct hlist_node * -cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, +cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, unsigned n, const void *key, struct hlist_node *hnode) { @@ -824,7 +824,7 @@ cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) } void -cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bds) +cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds) { /* NB: caller should hold hs_lock.rw if REHASH is set */ cfs_hash_bd_from_key(hs, hs->hs_buckets, @@ -844,21 +844,21 @@ cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bds) EXPORT_SYMBOL(cfs_hash_dual_bd_get); void -cfs_hash_dual_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl) +cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) { cfs_hash_multi_bd_lock(hs, bds, 2, excl); } EXPORT_SYMBOL(cfs_hash_dual_bd_lock); void -cfs_hash_dual_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl) +cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) { cfs_hash_multi_bd_unlock(hs, bds, 2, excl); } EXPORT_SYMBOL(cfs_hash_dual_bd_unlock); struct hlist_node * -cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, +cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, const void *key) { return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key); @@ -866,7 +866,7 @@ cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked); struct hlist_node * -cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, +cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, const void *key, struct hlist_node *hnode, int noref) { @@ -876,7 +876,7 @@ cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked); struct hlist_node * -cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds, +cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, const void *key, struct hlist_node *hnode) { return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode); @@ -903,7 +903,7 @@ cfs_hash_buckets_free(struct cfs_hash_bucket **buckets, * successful, and NULL on error. */ static struct cfs_hash_bucket ** -cfs_hash_buckets_realloc(cfs_hash_t *hs, struct cfs_hash_bucket **old_bkts, +cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, unsigned int old_size, unsigned int new_size) { struct cfs_hash_bucket **new_bkts; @@ -969,7 +969,7 @@ static int cfs_hash_rehash_worker(cfs_workitem_t *wi); #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 static int cfs_hash_dep_print(cfs_workitem_t *wi) { - cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi); + struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); int dep; int bkt; int off; @@ -990,13 +990,13 @@ static int cfs_hash_dep_print(cfs_workitem_t *wi) return 0; } -static void cfs_hash_depth_wi_init(cfs_hash_t *hs) +static void cfs_hash_depth_wi_init(struct cfs_hash *hs) { spin_lock_init(&hs->hs_dep_lock); cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print); } -static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) +static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) { if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi)) return; @@ -1012,18 +1012,18 @@ static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */ -static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {} -static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {} +static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {} +static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {} #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */ -cfs_hash_t * +struct cfs_hash * cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, unsigned bkt_bits, unsigned extra_bytes, unsigned min_theta, unsigned max_theta, cfs_hash_ops_t *ops, unsigned flags) { - cfs_hash_t *hs; + struct cfs_hash *hs; int len; CLASSERT(CFS_HASH_THETA_BITS < 15); @@ -1051,7 +1051,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, len = (flags & CFS_HASH_BIGNAME) == 0 ? CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; - LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len])); + LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len])); if (hs == NULL) return NULL; @@ -1084,7 +1084,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, if (hs->hs_buckets != NULL) return hs; - LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len])); + LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len])); return NULL; } EXPORT_SYMBOL(cfs_hash_create); @@ -1093,7 +1093,7 @@ EXPORT_SYMBOL(cfs_hash_create); * Cleanup libcfs hash @hs. */ static void -cfs_hash_destroy(cfs_hash_t *hs) +cfs_hash_destroy(struct cfs_hash *hs) { struct hlist_node *hnode; struct hlist_node *pos; @@ -1148,10 +1148,10 @@ cfs_hash_destroy(cfs_hash_t *hs) 0, CFS_HASH_NBKT(hs)); i = cfs_hash_with_bigname(hs) ? CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN; - LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i])); + LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i])); } -cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs) +struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs) { if (atomic_inc_not_zero(&hs->hs_refcount)) return hs; @@ -1159,7 +1159,7 @@ cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs) } EXPORT_SYMBOL(cfs_hash_getref); -void cfs_hash_putref(cfs_hash_t *hs) +void cfs_hash_putref(struct cfs_hash *hs) { if (atomic_dec_and_test(&hs->hs_refcount)) cfs_hash_destroy(hs); @@ -1167,7 +1167,7 @@ void cfs_hash_putref(cfs_hash_t *hs) EXPORT_SYMBOL(cfs_hash_putref); static inline int -cfs_hash_rehash_bits(cfs_hash_t *hs) +cfs_hash_rehash_bits(struct cfs_hash *hs) { if (cfs_hash_with_no_lock(hs) || !cfs_hash_with_rehash(hs)) @@ -1204,7 +1204,7 @@ cfs_hash_rehash_bits(cfs_hash_t *hs) * - too many elements */ static inline int -cfs_hash_rehash_inline(cfs_hash_t *hs) +cfs_hash_rehash_inline(struct cfs_hash *hs) { return !cfs_hash_with_nblk_change(hs) && atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG; @@ -1215,7 +1215,7 @@ cfs_hash_rehash_inline(cfs_hash_t *hs) * ops->hs_get function will be called when the item is added. */ void -cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) +cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) { struct cfs_hash_bd bd; int bits; @@ -1238,7 +1238,7 @@ cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) EXPORT_SYMBOL(cfs_hash_add); static struct hlist_node * -cfs_hash_find_or_add(cfs_hash_t *hs, const void *key, +cfs_hash_find_or_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode, int noref) { struct hlist_node *ehnode; @@ -1270,7 +1270,7 @@ cfs_hash_find_or_add(cfs_hash_t *hs, const void *key, * Returns 0 on success or -EALREADY on key collisions. */ int -cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) +cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) { return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ? -EALREADY : 0; @@ -1284,7 +1284,7 @@ EXPORT_SYMBOL(cfs_hash_add_unique); * Otherwise ops->hs_get is called on the item which was added. */ void * -cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key, +cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) { hnode = cfs_hash_find_or_add(hs, key, hnode, 0); @@ -1301,7 +1301,7 @@ EXPORT_SYMBOL(cfs_hash_findadd_unique); * on the removed object. */ void * -cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) +cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) { void *obj = NULL; int bits = 0; @@ -1341,7 +1341,7 @@ EXPORT_SYMBOL(cfs_hash_del); * will be returned and ops->hs_put is called on the removed object. */ void * -cfs_hash_del_key(cfs_hash_t *hs, const void *key) +cfs_hash_del_key(struct cfs_hash *hs, const void *key) { return cfs_hash_del(hs, key, NULL); } @@ -1356,7 +1356,7 @@ EXPORT_SYMBOL(cfs_hash_del_key); * in the hash @hs NULL is returned. */ void * -cfs_hash_lookup(cfs_hash_t *hs, const void *key) +cfs_hash_lookup(struct cfs_hash *hs, const void *key) { void *obj = NULL; struct hlist_node *hnode; @@ -1377,7 +1377,7 @@ cfs_hash_lookup(cfs_hash_t *hs, const void *key) EXPORT_SYMBOL(cfs_hash_lookup); static void -cfs_hash_for_each_enter(cfs_hash_t *hs) +cfs_hash_for_each_enter(struct cfs_hash *hs) { LASSERT(!cfs_hash_is_exiting(hs)); @@ -1403,7 +1403,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs) } static void -cfs_hash_for_each_exit(cfs_hash_t *hs) +cfs_hash_for_each_exit(struct cfs_hash *hs) { int remained; int bits; @@ -1434,7 +1434,7 @@ cfs_hash_for_each_exit(cfs_hash_t *hs) * cfs_hash_bd_del_locked */ static __u64 -cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, +cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data, int remove_safe) { struct hlist_node *hnode; @@ -1492,7 +1492,7 @@ typedef struct { } cfs_hash_cond_arg_t; static int -cfs_hash_cond_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *data) { cfs_hash_cond_arg_t *cond = data; @@ -1508,7 +1508,7 @@ cfs_hash_cond_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, * any object be reference. */ void -cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data) +cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data) { cfs_hash_cond_arg_t arg = { .func = func, @@ -1520,7 +1520,7 @@ cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data) EXPORT_SYMBOL(cfs_hash_cond_del); void -cfs_hash_for_each(cfs_hash_t *hs, +cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data) { cfs_hash_for_each_tight(hs, func, data, 0); @@ -1528,7 +1528,7 @@ cfs_hash_for_each(cfs_hash_t *hs, EXPORT_SYMBOL(cfs_hash_for_each); void -cfs_hash_for_each_safe(cfs_hash_t *hs, +cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data) { cfs_hash_for_each_tight(hs, func, data, 1); @@ -1536,7 +1536,7 @@ cfs_hash_for_each_safe(cfs_hash_t *hs, EXPORT_SYMBOL(cfs_hash_for_each_safe); static int -cfs_hash_peek(cfs_hash_t *hs, struct cfs_hash_bd *bd, +cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *data) { *(int *)data = 0; @@ -1544,7 +1544,7 @@ cfs_hash_peek(cfs_hash_t *hs, struct cfs_hash_bd *bd, } int -cfs_hash_is_empty(cfs_hash_t *hs) +cfs_hash_is_empty(struct cfs_hash *hs) { int empty = 1; @@ -1554,7 +1554,7 @@ cfs_hash_is_empty(cfs_hash_t *hs) EXPORT_SYMBOL(cfs_hash_is_empty); __u64 -cfs_hash_size_get(cfs_hash_t *hs) +cfs_hash_size_get(struct cfs_hash *hs) { return cfs_hash_with_counter(hs) ? atomic_read(&hs->hs_count) : @@ -1578,7 +1578,7 @@ EXPORT_SYMBOL(cfs_hash_size_get); * two cases, so iteration has to be stopped on change. */ static int -cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data) +cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data) { struct hlist_node *hnode; struct hlist_node *tmp; @@ -1639,7 +1639,7 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data) } int -cfs_hash_for_each_nolock(cfs_hash_t *hs, +cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data) { if (cfs_hash_with_no_lock(hs) || @@ -1672,7 +1672,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_nolock); * the required locking is in place to prevent concurrent insertions. */ int -cfs_hash_for_each_empty(cfs_hash_t *hs, +cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data) { unsigned i = 0; @@ -1696,7 +1696,7 @@ cfs_hash_for_each_empty(cfs_hash_t *hs, EXPORT_SYMBOL(cfs_hash_for_each_empty); void -cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex, +cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, cfs_hash_for_each_cb_t func, void *data) { struct hlist_head *hhead; @@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each); * is held so the callback must never sleep. */ void -cfs_hash_for_each_key(cfs_hash_t *hs, const void *key, +cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, cfs_hash_for_each_cb_t func, void *data) { struct hlist_node *hnode; @@ -1772,7 +1772,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_key); * theta thresholds for @hs are tunable via cfs_hash_set_theta(). */ void -cfs_hash_rehash_cancel_locked(cfs_hash_t *hs) +cfs_hash_rehash_cancel_locked(struct cfs_hash *hs) { int i; @@ -1801,7 +1801,7 @@ cfs_hash_rehash_cancel_locked(cfs_hash_t *hs) EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked); void -cfs_hash_rehash_cancel(cfs_hash_t *hs) +cfs_hash_rehash_cancel(struct cfs_hash *hs) { cfs_hash_lock(hs, 1); cfs_hash_rehash_cancel_locked(hs); @@ -1810,7 +1810,7 @@ cfs_hash_rehash_cancel(cfs_hash_t *hs) EXPORT_SYMBOL(cfs_hash_rehash_cancel); int -cfs_hash_rehash(cfs_hash_t *hs, int do_rehash) +cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) { int rc; @@ -1840,7 +1840,7 @@ cfs_hash_rehash(cfs_hash_t *hs, int do_rehash) EXPORT_SYMBOL(cfs_hash_rehash); static int -cfs_hash_rehash_bd(cfs_hash_t *hs, struct cfs_hash_bd *old) +cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) { struct cfs_hash_bd new; struct hlist_head *hhead; @@ -1873,7 +1873,7 @@ cfs_hash_rehash_bd(cfs_hash_t *hs, struct cfs_hash_bd *old) static int cfs_hash_rehash_worker(cfs_workitem_t *wi) { - cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_rehash_wi); + struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); struct cfs_hash_bucket **bkts; struct cfs_hash_bd bd; unsigned int old_size; @@ -1980,7 +1980,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi) * the registered cfs_hash_get() and cfs_hash_put() functions will * not be called. */ -void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key, +void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, void *new_key, struct hlist_node *hnode) { struct cfs_hash_bd bds[3]; @@ -2029,7 +2029,7 @@ int cfs_hash_debug_header(struct seq_file *m) EXPORT_SYMBOL(cfs_hash_debug_header); static struct cfs_hash_bucket ** -cfs_hash_full_bkts(cfs_hash_t *hs) +cfs_hash_full_bkts(struct cfs_hash *hs) { /* NB: caller should hold hs->hs_rwlock if REHASH is set */ if (hs->hs_rehash_buckets == NULL) @@ -2041,7 +2041,7 @@ cfs_hash_full_bkts(cfs_hash_t *hs) } static unsigned int -cfs_hash_full_nbkt(cfs_hash_t *hs) +cfs_hash_full_nbkt(struct cfs_hash *hs) { /* NB: caller should hold hs->hs_rwlock if REHASH is set */ if (hs->hs_rehash_buckets == NULL) @@ -2052,7 +2052,7 @@ cfs_hash_full_nbkt(cfs_hash_t *hs) CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs); } -int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m) +int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m) { int dist[8] = { 0, }; int maxdep = -1; diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c index b4d03d7e134b..c4d1580b7be5 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_dev.c +++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c @@ -297,7 +297,7 @@ static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id) ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT); } -static int vvp_pgcache_obj_get(cfs_hash_t *hs, struct cfs_hash_bd *bd, +static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *data) { struct vvp_pgcache_id *id = data; diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c index dd3c07d5c4de..a1701dfe4083 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pool.c +++ b/drivers/staging/lustre/lustre/lov/lov_pool.c @@ -86,7 +86,7 @@ void lov_pool_putref_locked(struct pool_desc *pool) * Chapter 6.4. * Addison Wesley, 1973 */ -static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask) +static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask) { int i; __u32 result; @@ -125,7 +125,7 @@ static void *pool_hashobject(struct hlist_node *hnode) return hlist_entry(hnode, struct pool_desc, pool_hash); } -static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode) +static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct pool_desc *pool; @@ -133,7 +133,7 @@ static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode) lov_pool_getref(pool); } -static void pool_hashrefcount_put_locked(cfs_hash_t *hs, +static void pool_hashrefcount_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct pool_desc *pool; diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c index 7b0e9d26b6c1..1a926036724b 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_object.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c @@ -577,9 +577,9 @@ static void cl_env_init0(struct cl_env *cle, void *debug) * The implementation of using hash table to connect cl_env and thread */ -static cfs_hash_t *cl_env_hash; +static struct cfs_hash *cl_env_hash; -static unsigned cl_env_hops_hash(cfs_hash_t *lh, +static unsigned cl_env_hops_hash(struct cfs_hash *lh, const void *key, unsigned mask) { #if BITS_PER_LONG == 64 @@ -604,7 +604,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn) return (key == cle->ce_owner); } -static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn) +static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn) { struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node); LASSERT(cle->ce_magic == &cl_env_init0); diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c index 68fe71c8a2a9..f6fae16fc7f7 100644 --- a/drivers/staging/lustre/lustre/obdclass/genops.c +++ b/drivers/staging/lustre/lustre/obdclass/genops.c @@ -816,7 +816,7 @@ struct obd_export *class_new_export(struct obd_device *obd, struct obd_uuid *cluuid) { struct obd_export *export; - cfs_hash_t *hash = NULL; + struct cfs_hash *hash = NULL; int rc = 0; OBD_ALLOC_PTR(export); @@ -1384,7 +1384,7 @@ EXPORT_SYMBOL(obd_export_nid2str); int obd_export_evict_by_nid(struct obd_device *obd, const char *nid) { - cfs_hash_t *nid_hash; + struct cfs_hash *nid_hash; struct obd_export *doomed_exp = NULL; int exports_evicted = 0; @@ -1432,7 +1432,7 @@ EXPORT_SYMBOL(obd_export_evict_by_nid); int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid) { - cfs_hash_t *uuid_hash; + struct cfs_hash *uuid_hash; struct obd_export *doomed_exp = NULL; struct obd_uuid doomed_uuid; int exports_evicted = 0; diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c index c312b9e763a7..02d76f8dbcb9 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c @@ -898,7 +898,7 @@ static void lprocfs_free_client_stats(struct nid_stat *client_stat) void lprocfs_free_per_client_stats(struct obd_device *obd) { - cfs_hash_t *hash = obd->obd_nid_stats_hash; + struct cfs_hash *hash = obd->obd_nid_stats_hash; struct nid_stat *stat; /* we need extra list - because hash_exit called to early */ @@ -1422,7 +1422,7 @@ void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats) } EXPORT_SYMBOL(lprocfs_init_ldlm_stats); -int lprocfs_exp_print_uuid(cfs_hash_t *hs, struct cfs_hash_bd *bd, +int lprocfs_exp_print_uuid(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *data) { @@ -1453,7 +1453,7 @@ struct exp_hash_cb_data { bool first; }; -int lprocfs_exp_print_hash(cfs_hash_t *hs, struct cfs_hash_bd *bd, +int lprocfs_exp_print_hash(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *cb_data) { diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index 468498c6ad5c..212823ab937b 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c @@ -175,7 +175,7 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o) top = o->lo_header; set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { - cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash; + struct cfs_hash *obj_hash = o->lo_dev->ld_site->ls_obj_hash; struct cfs_hash_bd bd; cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); @@ -589,7 +589,7 @@ static struct lu_object *lu_object_new(const struct lu_env *env, const struct lu_object_conf *conf) { struct lu_object *o; - cfs_hash_t *hs; + struct cfs_hash *hs; struct cfs_hash_bd bd; struct lu_site_bkt_data *bkt; @@ -618,7 +618,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, struct lu_object *o; struct lu_object *shadow; struct lu_site *s; - cfs_hash_t *hs; + struct cfs_hash *hs; struct cfs_hash_bd bd; __u64 version = 0; @@ -788,7 +788,7 @@ struct lu_site_print_arg { }; static int -lu_site_obj_print(cfs_hash_t *hs, struct cfs_hash_bd *bd, +lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *data) { struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; @@ -874,7 +874,7 @@ static int lu_htable_order(void) return bits; } -static unsigned lu_obj_hop_hash(cfs_hash_t *hs, +static unsigned lu_obj_hop_hash(struct cfs_hash *hs, const void *key, unsigned mask) { struct lu_fid *fid = (struct lu_fid *)key; @@ -914,7 +914,7 @@ static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); } -static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) +static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct lu_object_header *h; @@ -929,7 +929,7 @@ static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) } } -static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { LBUG(); /* we should never called it */ } @@ -1788,7 +1788,7 @@ typedef struct lu_site_stats{ unsigned lss_busy; } lu_site_stats_t; -static void lu_site_stats_get(cfs_hash_t *hs, +static void lu_site_stats_get(struct cfs_hash *hs, lu_site_stats_t *stats, int populated) { struct cfs_hash_bd bd; @@ -2072,7 +2072,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, struct lu_site_bkt_data *bkt; struct lu_object *shadow; wait_queue_t waiter; - cfs_hash_t *hs; + struct cfs_hash *hs; struct cfs_hash_bd bd; __u64 version = 0; diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c index 73e2571f463f..362ae541b209 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_config.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c @@ -1692,7 +1692,7 @@ EXPORT_SYMBOL(class_manual_cleanup); */ static unsigned -uuid_hash(cfs_hash_t *hs, const void *key, unsigned mask) +uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid, sizeof(((struct obd_uuid *)key)->uuid), mask); @@ -1731,7 +1731,7 @@ uuid_export_object(struct hlist_node *hnode) } static void -uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode) +uuid_export_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; @@ -1740,7 +1740,7 @@ uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode) } static void -uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +uuid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; @@ -1763,7 +1763,7 @@ static cfs_hash_ops_t uuid_hash_ops = { */ static unsigned -nid_hash(cfs_hash_t *hs, const void *key, unsigned mask) +nid_hash(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask); } @@ -1801,7 +1801,7 @@ nid_export_object(struct hlist_node *hnode) } static void -nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode) +nid_export_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; @@ -1810,7 +1810,7 @@ nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode) } static void -nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +nid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; @@ -1855,7 +1855,7 @@ nidstats_object(struct hlist_node *hnode) } static void -nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode) +nidstats_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct nid_stat *ns; @@ -1864,7 +1864,7 @@ nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode) } static void -nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +nidstats_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct nid_stat *ns; diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c index 9720c0e865c8..6045a78a2baa 100644 --- a/drivers/staging/lustre/lustre/osc/osc_quota.c +++ b/drivers/staging/lustre/lustre/osc/osc_quota.c @@ -139,7 +139,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], * Hash operations for uid/gid <-> osc_quota_info */ static unsigned -oqi_hashfn(cfs_hash_t *hs, const void *key, unsigned mask) +oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_u32_hash(*((__u32*)key), mask); } @@ -172,17 +172,17 @@ oqi_object(struct hlist_node *hnode) } static void -oqi_get(cfs_hash_t *hs, struct hlist_node *hnode) +oqi_get(struct cfs_hash *hs, struct hlist_node *hnode) { } static void -oqi_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +oqi_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { } static void -oqi_exit(cfs_hash_t *hs, struct hlist_node *hnode) +oqi_exit(struct cfs_hash *hs, struct hlist_node *hnode) { struct osc_quota_info *oqi; diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c index 17ca84208873..6756356faac1 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/connection.c +++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c @@ -41,7 +41,7 @@ #include "ptlrpc_internal.h" -static cfs_hash_t *conn_hash = NULL; +static struct cfs_hash *conn_hash = NULL; static cfs_hash_ops_t conn_hash_ops; struct ptlrpc_connection * @@ -161,7 +161,7 @@ EXPORT_SYMBOL(ptlrpc_connection_fini); * Hash operations for net_peer<->connection */ static unsigned -conn_hashfn(cfs_hash_t *hs, const void *key, unsigned mask) +conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask); } @@ -195,7 +195,7 @@ conn_object(struct hlist_node *hnode) } static void -conn_get(cfs_hash_t *hs, struct hlist_node *hnode) +conn_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct ptlrpc_connection *conn; @@ -204,7 +204,7 @@ conn_get(cfs_hash_t *hs, struct hlist_node *hnode) } static void -conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +conn_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct ptlrpc_connection *conn; @@ -213,7 +213,7 @@ conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) } static void -conn_exit(cfs_hash_t *hs, struct hlist_node *hnode) +conn_exit(struct cfs_hash *hs, struct hlist_node *hnode) { struct ptlrpc_connection *conn;