struct ip6_tnl **tnls[2];
};
-/* lock for the tunnel lists */
-static DEFINE_RWLOCK(ip6_tnl_lock);
+/*
+ * Locking : hash tables are protected by RCU and a spinlock
+ */
+static DEFINE_SPINLOCK(ip6_tnl_lock);
static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
{
* else %NULL
**/
+#define for_each_ip6_tunnel_rcu(start) \
+ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
static struct ip6_tnl *
ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
{
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- for (t = ip6n->tnls_r_l[h0 ^ h1]; t; t = t->next) {
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr) &&
(t->dev->flags & IFF_UP))
return t;
}
- if ((t = ip6n->tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP))
+ t = rcu_dereference(ip6n->tnls_wc[0]);
+ if (t && (t->dev->flags & IFF_UP))
return t;
return NULL;
{
struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms);
+ spin_lock_bh(&ip6_tnl_lock);
t->next = *tp;
- write_lock_bh(&ip6_tnl_lock);
- *tp = t;
- write_unlock_bh(&ip6_tnl_lock);
+ rcu_assign_pointer(*tp, t);
+ spin_unlock_bh(&ip6_tnl_lock);
}
/**
for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) {
if (t == *tp) {
- write_lock_bh(&ip6_tnl_lock);
+ spin_lock_bh(&ip6_tnl_lock);
*tp = t->next;
- write_unlock_bh(&ip6_tnl_lock);
+ spin_unlock_bh(&ip6_tnl_lock);
break;
}
}
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev) {
- write_lock_bh(&ip6_tnl_lock);
+ spin_lock_bh(&ip6_tnl_lock);
ip6n->tnls_wc[0] = NULL;
- write_unlock_bh(&ip6_tnl_lock);
+ spin_unlock_bh(&ip6_tnl_lock);
} else {
ip6_tnl_unlink(ip6n, t);
}
in trouble since we might need the source address for further
processing of the error. */
- read_lock(&ip6_tnl_lock);
+ rcu_read_lock();
if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
&ipv6h->saddr)) == NULL)
goto out;
*msg = rel_msg;
out:
- read_unlock(&ip6_tnl_lock);
+ rcu_read_unlock();
return err;
}
struct ip6_tnl *t;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- read_lock(&ip6_tnl_lock);
+ rcu_read_lock();
if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
&ipv6h->daddr)) != NULL) {
if (t->parms.proto != ipproto && t->parms.proto != 0) {
- read_unlock(&ip6_tnl_lock);
+ rcu_read_unlock();
goto discard;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- read_unlock(&ip6_tnl_lock);
+ rcu_read_unlock();
goto discard;
}
if (!ip6_tnl_rcv_ctl(t)) {
t->dev->stats.rx_dropped++;
- read_unlock(&ip6_tnl_lock);
+ rcu_read_unlock();
goto discard;
}
secpath_reset(skb);
t->dev->stats.rx_packets++;
t->dev->stats.rx_bytes += skb->len;
netif_rx(skb);
- read_unlock(&ip6_tnl_lock);
+ rcu_read_unlock();
return 0;
}
- read_unlock(&ip6_tnl_lock);
+ rcu_read_unlock();
return 1;
discard: