**/
static int
-ip6_tnl_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
int *type, int *code, int *msg, __be32 *info, int offset)
{
struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
if ((t = ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL)
goto out;
+ if (t->parms.proto != ipproto && t->parms.proto != 0)
+ goto out;
+
err = 0;
switch (*type) {
struct flowi fl;
struct rtable *rt;
- err = ip6_tnl_err(skb, opt, &rel_type, &rel_code, &rel_msg, &rel_info,
- offset);
+ err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
+ &rel_msg, &rel_info, offset);
if (err < 0)
return err;
__u32 rel_info = info;
int err;
- err = ip6_tnl_err(skb, opt, &rel_type, &rel_code, &rel_msg, &rel_info,
- offset);
+ err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
+ &rel_msg, &rel_info, offset);
if (err < 0)
return err;
**/
static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
+ __u8 ipproto,
void (*dscp_ecn_decapsulate)(struct ip6_tnl *t,
struct ipv6hdr *ipv6h,
struct sk_buff *skb))
read_lock(&ip6_tnl_lock);
if ((t = ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
+ if (t->parms.proto != ipproto && t->parms.proto != 0) {
+ read_unlock(&ip6_tnl_lock);
+ goto discard;
+ }
+
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
read_unlock(&ip6_tnl_lock);
goto discard;
static int ip4ip6_rcv(struct sk_buff *skb)
{
- return ip6_tnl_rcv(skb, ETH_P_IP, ip4ip6_dscp_ecn_decapsulate);
+ return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
+ ip4ip6_dscp_ecn_decapsulate);
}
static int ip6ip6_rcv(struct sk_buff *skb)
{
- return ip6_tnl_rcv(skb, ETH_P_IPV6, ip6ip6_dscp_ecn_decapsulate);
+ return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
+ ip6ip6_dscp_ecn_decapsulate);
}
struct ipv6_tel_txoption {
__u32 mtu;
int err;
- if (!ip6_tnl_xmit_ctl(t))
+ if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
+ !ip6_tnl_xmit_ctl(t))
return -1;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
__u32 mtu;
int err;
- if (!ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
+ if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+ !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
return -1;
if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
t->parms.encap_limit = p->encap_limit;
t->parms.flowinfo = p->flowinfo;
t->parms.link = p->link;
+ t->parms.proto = p->proto;
ip6_tnl_dst_reset(t);
ip6_tnl_link_config(t);
return 0;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
break;
err = -EINVAL;
- if (p.proto != IPPROTO_IPV6)
+ if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
+ p.proto != 0)
break;
t = ip6_tnl_locate(&p, cmd == SIOCADDTUNNEL);
if (dev != ip6_fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- t->fl.proto = IPPROTO_IPV6;
t->dev = dev;
strcpy(t->parms.name, dev->name);
}
{
struct ip6_tnl *t = netdev_priv(dev);
ip6_tnl_dev_init_gen(dev);
+ t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
tnls_wc[0] = t;
return 0;