From e4c4e448cf557921ffbbbd6d6ddac81fdceacb4f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 30 Jul 2009 03:15:07 +0000 Subject: [PATCH] neigh: Convert garbage collection from softirq to workqueue Current neigh_periodic_timer() function is fired by timer IRQ, and scans one hash bucket each round (very litle work in fact) As we are supposed to scan whole hash table in 15 seconds, this means neigh_periodic_timer() can be fired very often. (depending on the number of concurrent hash entries we stored in this table) Converting this to a workqueue permits scanning whole table, minimizing icache pollution, and firing this work every 15 seconds, independantly of hash table size. This 15 seconds delay is not a hard number, as work is a deferrable one. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/neighbour.h | 4 +- net/core/neighbour.c | 89 ++++++++++++++++++++--------------------- 2 files changed, 45 insertions(+), 48 deletions(-) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index d8d790e56d3d..18b69b6cecaf 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -24,6 +24,7 @@ #include #include +#include #include /* @@ -167,7 +168,7 @@ struct neigh_table int gc_thresh2; int gc_thresh3; unsigned long last_flush; - struct timer_list gc_timer; + struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; @@ -178,7 +179,6 @@ struct neigh_table struct neighbour **hash_buckets; unsigned int hash_mask; __u32 hash_rnd; - unsigned int hash_chain_gc; struct pneigh_entry **phash_buckets; }; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index c6f9ad8e4c7a..e587e6819698 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -692,75 +692,74 @@ static void neigh_connect(struct neighbour *neigh) hh->hh_output = neigh->ops->hh_output; } -static void neigh_periodic_timer(unsigned long arg) +static void neigh_periodic_work(struct work_struct *work) { - struct neigh_table *tbl = (struct neigh_table *)arg; + struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); struct neighbour *n, **np; - unsigned long expire, now = jiffies; + unsigned int i; NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); - write_lock(&tbl->lock); + write_lock_bh(&tbl->lock); /* * periodically recompute ReachableTime from random function */ - if (time_after(now, tbl->last_rand + 300 * HZ)) { + if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { struct neigh_parms *p; - tbl->last_rand = now; + tbl->last_rand = jiffies; for (p = &tbl->parms; p; p = p->next) p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); } - np = &tbl->hash_buckets[tbl->hash_chain_gc]; - tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); + for (i = 0 ; i <= tbl->hash_mask; i++) { + np = &tbl->hash_buckets[i]; - while ((n = *np) != NULL) { - unsigned int state; + while ((n = *np) != NULL) { + unsigned int state; - write_lock(&n->lock); + write_lock(&n->lock); - state = n->nud_state; - if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { - write_unlock(&n->lock); - goto next_elt; - } + state = n->nud_state; + if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { + write_unlock(&n->lock); + goto next_elt; + } - if (time_before(n->used, n->confirmed)) - n->used = n->confirmed; + if (time_before(n->used, n->confirmed)) + n->used = n->confirmed; - if (atomic_read(&n->refcnt) == 1 && - (state == NUD_FAILED || - time_after(now, n->used + n->parms->gc_staletime))) { - *np = n->next; - n->dead = 1; + if (atomic_read(&n->refcnt) == 1 && + (state == NUD_FAILED || + time_after(jiffies, n->used + n->parms->gc_staletime))) { + *np = n->next; + n->dead = 1; + write_unlock(&n->lock); + neigh_cleanup_and_release(n); + continue; + } write_unlock(&n->lock); - neigh_cleanup_and_release(n); - continue; - } - write_unlock(&n->lock); next_elt: - np = &n->next; + np = &n->next; + } + /* + * It's fine to release lock here, even if hash table + * grows while we are preempted. + */ + write_unlock_bh(&tbl->lock); + cond_resched(); + write_lock_bh(&tbl->lock); } - /* Cycle through all hash buckets every base_reachable_time/2 ticks. * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 * base_reachable_time. */ - expire = tbl->parms.base_reachable_time >> 1; - expire /= (tbl->hash_mask + 1); - if (!expire) - expire = 1; - - if (expire>HZ) - mod_timer(&tbl->gc_timer, round_jiffies(now + expire)); - else - mod_timer(&tbl->gc_timer, now + expire); - - write_unlock(&tbl->lock); + schedule_delayed_work(&tbl->gc_work, + tbl->parms.base_reachable_time >> 1); + write_unlock_bh(&tbl->lock); } static __inline__ int neigh_max_probes(struct neighbour *n) @@ -1442,10 +1441,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); rwlock_init(&tbl->lock); - setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl); - tbl->gc_timer.expires = now + 1; - add_timer(&tbl->gc_timer); - + INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); + schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); skb_queue_head_init_class(&tbl->proxy_queue, &neigh_table_proxy_queue_class); @@ -1482,7 +1479,8 @@ int neigh_table_clear(struct neigh_table *tbl) struct neigh_table **tp; /* It is not clean... Fix it to unload IPv6 module safely */ - del_timer_sync(&tbl->gc_timer); + cancel_delayed_work(&tbl->gc_work); + flush_scheduled_work(); del_timer_sync(&tbl->proxy_timer); pneigh_queue_purge(&tbl->proxy_queue); neigh_ifdown(tbl, NULL); @@ -1752,7 +1750,6 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, .ndtc_last_rand = jiffies_to_msecs(rand_delta), .ndtc_hash_rnd = tbl->hash_rnd, .ndtc_hash_mask = tbl->hash_mask, - .ndtc_hash_chain_gc = tbl->hash_chain_gc, .ndtc_proxy_qlen = tbl->proxy_queue.qlen, }; -- 2.20.1