netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache {
- struct hh_cache *hh_next; /* Next entry */
atomic_t hh_refcnt; /* number of users */
/*
* We want hh_output, hh_len, hh_lock and hh_data be a in a separate
* They are mostly read, but hh_refcnt may be changed quite frequently,
* incurring cache line ping pongs.
*/
- __be16 hh_type ____cacheline_aligned_in_smp;
- /* protocol identifier, f.e ETH_P_IP
- * NOTE: For VLANs, this will be the
- * encapuslated type. --BLG
- */
- u16 hh_len; /* length of header */
+ u16 hh_len ____cacheline_aligned_in_smp;
+ u16 __pad;
int (*hh_output)(struct sk_buff *skb);
seqlock_t hh_lock;
if (neigh_del_timer(neigh))
printk(KERN_WARNING "Impossible event.\n");
- while ((hh = neigh->hh) != NULL) {
- neigh->hh = hh->hh_next;
- hh->hh_next = NULL;
+ hh = neigh->hh;
+ if (hh) {
+ neigh->hh = NULL;
write_seqlock_bh(&hh->hh_lock);
hh->hh_output = neigh_blackhole;
neigh->output = neigh->ops->output;
- for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh = neigh->hh;
+ if (hh)
hh->hh_output = neigh->ops->output;
}
neigh->output = neigh->ops->connected_output;
- for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh = neigh->hh;
+ if (hh)
hh->hh_output = neigh->ops->hh_output;
}
update = neigh->dev->header_ops->cache_update;
if (update) {
- for (hh = neigh->hh; hh; hh = hh->hh_next) {
+ hh = neigh->hh;
+ if (hh) {
write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
write_sequnlock_bh(&hh->hh_lock);
}
EXPORT_SYMBOL(neigh_event_ns);
-static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst,
- __be16 protocol)
+static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst)
{
struct hh_cache *hh;
smp_rmb(); /* paired with smp_wmb() in neigh_hh_init() */
- for (hh = n->hh; hh; hh = hh->hh_next) {
- if (hh->hh_type == protocol) {
- atomic_inc(&hh->hh_refcnt);
- if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
- hh_cache_put(hh);
- return true;
- }
+ hh = n->hh;
+ if (hh) {
+ atomic_inc(&hh->hh_refcnt);
+ if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
+ hh_cache_put(hh);
+ return true;
}
return false;
}
struct hh_cache *hh;
struct net_device *dev = dst->dev;
- if (likely(neigh_hh_lookup(n, dst, protocol)))
+ if (likely(neigh_hh_lookup(n, dst)))
return;
/* slow path */
return;
seqlock_init(&hh->hh_lock);
- hh->hh_type = protocol;
atomic_set(&hh->hh_refcnt, 2);
if (dev->header_ops->cache(n, hh, protocol)) {
write_lock_bh(&n->lock);
/* must check if another thread already did the insert */
- if (neigh_hh_lookup(n, dst, protocol)) {
+ if (neigh_hh_lookup(n, dst)) {
kfree(hh);
goto end;
}
else
hh->hh_output = n->ops->output;
- hh->hh_next = n->hh;
smp_wmb(); /* paired with smp_rmb() in neigh_hh_lookup() */
n->hh = hh;