__u8 encap_limit; /* tunnel encapsulation limit */
} __packed;
-struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
void ip6_tnl_dst_reset(struct ip6_tnl *t);
-void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
}
if (!fl6->flowi6_mark)
- dst = ip6_tnl_dst_check(tunnel);
+ dst = ip6_tnl_dst_get(tunnel);
if (!dst) {
ndst = ip6_route_output(net, NULL, fl6);
ip6tunnel_xmit(NULL, skb, dev);
if (ndst)
- ip6_tnl_dst_store(tunnel, ndst);
+ ip6_tnl_dst_set(tunnel, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
* Locking : hash tables are protected by RCU and RTNL
*/
-struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
+struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
{
struct dst_entry *dst = t->dst_cache;
return dst;
}
-EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
void ip6_tnl_dst_reset(struct ip6_tnl *t)
{
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
-void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
+void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *) dst;
t->dst_cookie = rt6_get_cookie(rt);
dst_release(t->dst_cache);
t->dst_cache = dst;
}
-EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
/**
* ip6_tnl_lookup - fetch tunnel matching the end-point addresses
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
} else if (!fl6->flowi6_mark)
- dst = ip6_tnl_dst_check(t);
+ dst = ip6_tnl_dst_get(t);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
goto tx_err_link_failure;
ipv6h->daddr = fl6->daddr;
ip6tunnel_xmit(NULL, skb, dev);
if (ndst)
- ip6_tnl_dst_store(t, ndst);
+ ip6_tnl_dst_set(t, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;