struct rcu_head gc_rcu;
};
/*
- * Once inet_peer is queued for deletion (refcnt == -1), following field
+ * Once inet_peer is queued for deletion (refcnt == 0), following field
* is not available: rid
* We can share memory with rcu_head to help keep inet_peer small.
*/
/* following fields might be frequently dirtied */
__u32 dtime; /* the time of last use of not referenced entries */
- atomic_t refcnt;
+ refcount_t refcnt;
};
struct inet_peer_base {
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
- if (!atomic_read(&p->refcnt)) {
+ if (refcount_read(&p->refcnt) == 1) {
list_del(&p->gc_list);
kmem_cache_free(peer_cachep, p);
}
int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
if (cmp == 0) {
/* Before taking a reference, check if this entry was
- * deleted (refcnt=-1)
+ * deleted (refcnt=0)
*/
- if (!atomic_add_unless(&u->refcnt, 1, -1))
+ if (!refcount_inc_not_zero(&u->refcnt)) {
u = NULL;
+ }
return u;
}
if (cmp == -1)
while (stackptr > stack) {
stackptr--;
p = rcu_deref_locked(**stackptr, base);
- if (atomic_read(&p->refcnt) == 0) {
+ if (refcount_read(&p->refcnt) == 1) {
smp_rmb();
delta = (__u32)jiffies - p->dtime;
- if (delta >= ttl &&
- atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
+ if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) {
p->gc_next = gchead;
gchead = p;
}
relookup:
p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
- atomic_inc(&p->refcnt);
+ refcount_inc(&p->refcnt);
write_sequnlock_bh(&base->lock);
return p;
}
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
p->daddr = *daddr;
- atomic_set(&p->refcnt, 1);
+ refcount_set(&p->refcnt, 2);
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
{
p->dtime = (__u32)jiffies;
smp_mb__before_atomic();
- atomic_dec(&p->refcnt);
+ refcount_dec(&p->refcnt);
}
EXPORT_SYMBOL_GPL(inet_putpeer);