ipv6: Add redirect support to all protocol icmp error handlers.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / ip6_tunnel.c
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 *
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11 *
12 * RFC 2473
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/if_tunnel.h>
33 #include <linux/net.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_arp.h>
37 #include <linux/icmpv6.h>
38 #include <linux/init.h>
39 #include <linux/route.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/netfilter_ipv6.h>
42 #include <linux/slab.h>
43
44 #include <asm/uaccess.h>
45 #include <linux/atomic.h>
46
47 #include <net/icmp.h>
48 #include <net/ip.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_route.h>
51 #include <net/addrconf.h>
52 #include <net/ip6_tunnel.h>
53 #include <net/xfrm.h>
54 #include <net/dsfield.h>
55 #include <net/inet_ecn.h>
56 #include <net/net_namespace.h>
57 #include <net/netns/generic.h>
58
59 MODULE_AUTHOR("Ville Nuorvala");
60 MODULE_DESCRIPTION("IPv6 tunneling device");
61 MODULE_LICENSE("GPL");
62 MODULE_ALIAS_NETDEV("ip6tnl0");
63
64 #ifdef IP6_TNL_DEBUG
65 #define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
66 #else
67 #define IP6_TNL_TRACE(x...) do {;} while(0)
68 #endif
69
70 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
71 #define IPV6_TCLASS_SHIFT 20
72
73 #define HASH_SIZE 32
74
75 #define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
76 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
77 (HASH_SIZE - 1))
78
79 static int ip6_tnl_dev_init(struct net_device *dev);
80 static void ip6_tnl_dev_setup(struct net_device *dev);
81
82 static int ip6_tnl_net_id __read_mostly;
83 struct ip6_tnl_net {
84 /* the IPv6 tunnel fallback device */
85 struct net_device *fb_tnl_dev;
86 /* lists for storing tunnels in use */
87 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
88 struct ip6_tnl __rcu *tnls_wc[1];
89 struct ip6_tnl __rcu **tnls[2];
90 };
91
92 /* often modified stats are per cpu, other are shared (netdev->stats) */
93 struct pcpu_tstats {
94 unsigned long rx_packets;
95 unsigned long rx_bytes;
96 unsigned long tx_packets;
97 unsigned long tx_bytes;
98 } __attribute__((aligned(4*sizeof(unsigned long))));
99
100 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
101 {
102 struct pcpu_tstats sum = { 0 };
103 int i;
104
105 for_each_possible_cpu(i) {
106 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
107
108 sum.rx_packets += tstats->rx_packets;
109 sum.rx_bytes += tstats->rx_bytes;
110 sum.tx_packets += tstats->tx_packets;
111 sum.tx_bytes += tstats->tx_bytes;
112 }
113 dev->stats.rx_packets = sum.rx_packets;
114 dev->stats.rx_bytes = sum.rx_bytes;
115 dev->stats.tx_packets = sum.tx_packets;
116 dev->stats.tx_bytes = sum.tx_bytes;
117 return &dev->stats;
118 }
119
120 /*
121 * Locking : hash tables are protected by RCU and RTNL
122 */
123
124 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
125 {
126 struct dst_entry *dst = t->dst_cache;
127
128 if (dst && dst->obsolete &&
129 dst->ops->check(dst, t->dst_cookie) == NULL) {
130 t->dst_cache = NULL;
131 dst_release(dst);
132 return NULL;
133 }
134
135 return dst;
136 }
137
138 static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
139 {
140 dst_release(t->dst_cache);
141 t->dst_cache = NULL;
142 }
143
144 static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
145 {
146 struct rt6_info *rt = (struct rt6_info *) dst;
147 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
148 dst_release(t->dst_cache);
149 t->dst_cache = dst;
150 }
151
152 /**
153 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
154 * @remote: the address of the tunnel exit-point
155 * @local: the address of the tunnel entry-point
156 *
157 * Return:
158 * tunnel matching given end-points if found,
159 * else fallback tunnel if its device is up,
160 * else %NULL
161 **/
162
163 #define for_each_ip6_tunnel_rcu(start) \
164 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
165
166 static struct ip6_tnl *
167 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
168 {
169 unsigned int h0 = HASH(remote);
170 unsigned int h1 = HASH(local);
171 struct ip6_tnl *t;
172 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
173
174 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
175 if (ipv6_addr_equal(local, &t->parms.laddr) &&
176 ipv6_addr_equal(remote, &t->parms.raddr) &&
177 (t->dev->flags & IFF_UP))
178 return t;
179 }
180 t = rcu_dereference(ip6n->tnls_wc[0]);
181 if (t && (t->dev->flags & IFF_UP))
182 return t;
183
184 return NULL;
185 }
186
187 /**
188 * ip6_tnl_bucket - get head of list matching given tunnel parameters
189 * @p: parameters containing tunnel end-points
190 *
191 * Description:
192 * ip6_tnl_bucket() returns the head of the list matching the
193 * &struct in6_addr entries laddr and raddr in @p.
194 *
195 * Return: head of IPv6 tunnel list
196 **/
197
198 static struct ip6_tnl __rcu **
199 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
200 {
201 const struct in6_addr *remote = &p->raddr;
202 const struct in6_addr *local = &p->laddr;
203 unsigned int h = 0;
204 int prio = 0;
205
206 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
207 prio = 1;
208 h = HASH(remote) ^ HASH(local);
209 }
210 return &ip6n->tnls[prio][h];
211 }
212
213 /**
214 * ip6_tnl_link - add tunnel to hash table
215 * @t: tunnel to be added
216 **/
217
218 static void
219 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
220 {
221 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
222
223 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
224 rcu_assign_pointer(*tp, t);
225 }
226
227 /**
228 * ip6_tnl_unlink - remove tunnel from hash table
229 * @t: tunnel to be removed
230 **/
231
232 static void
233 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
234 {
235 struct ip6_tnl __rcu **tp;
236 struct ip6_tnl *iter;
237
238 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
239 (iter = rtnl_dereference(*tp)) != NULL;
240 tp = &iter->next) {
241 if (t == iter) {
242 rcu_assign_pointer(*tp, t->next);
243 break;
244 }
245 }
246 }
247
248 static void ip6_dev_free(struct net_device *dev)
249 {
250 free_percpu(dev->tstats);
251 free_netdev(dev);
252 }
253
254 /**
255 * ip6_tnl_create - create a new tunnel
256 * @p: tunnel parameters
257 * @pt: pointer to new tunnel
258 *
259 * Description:
260 * Create tunnel matching given parameters.
261 *
262 * Return:
263 * created tunnel or NULL
264 **/
265
266 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
267 {
268 struct net_device *dev;
269 struct ip6_tnl *t;
270 char name[IFNAMSIZ];
271 int err;
272 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
273
274 if (p->name[0])
275 strlcpy(name, p->name, IFNAMSIZ);
276 else
277 sprintf(name, "ip6tnl%%d");
278
279 dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
280 if (dev == NULL)
281 goto failed;
282
283 dev_net_set(dev, net);
284
285 t = netdev_priv(dev);
286 t->parms = *p;
287 err = ip6_tnl_dev_init(dev);
288 if (err < 0)
289 goto failed_free;
290
291 if ((err = register_netdevice(dev)) < 0)
292 goto failed_free;
293
294 strcpy(t->parms.name, dev->name);
295
296 dev_hold(dev);
297 ip6_tnl_link(ip6n, t);
298 return t;
299
300 failed_free:
301 ip6_dev_free(dev);
302 failed:
303 return NULL;
304 }
305
306 /**
307 * ip6_tnl_locate - find or create tunnel matching given parameters
308 * @p: tunnel parameters
309 * @create: != 0 if allowed to create new tunnel if no match found
310 *
311 * Description:
312 * ip6_tnl_locate() first tries to locate an existing tunnel
313 * based on @parms. If this is unsuccessful, but @create is set a new
314 * tunnel device is created and registered for use.
315 *
316 * Return:
317 * matching tunnel or NULL
318 **/
319
320 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
321 struct ip6_tnl_parm *p, int create)
322 {
323 const struct in6_addr *remote = &p->raddr;
324 const struct in6_addr *local = &p->laddr;
325 struct ip6_tnl __rcu **tp;
326 struct ip6_tnl *t;
327 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
328
329 for (tp = ip6_tnl_bucket(ip6n, p);
330 (t = rtnl_dereference(*tp)) != NULL;
331 tp = &t->next) {
332 if (ipv6_addr_equal(local, &t->parms.laddr) &&
333 ipv6_addr_equal(remote, &t->parms.raddr))
334 return t;
335 }
336 if (!create)
337 return NULL;
338 return ip6_tnl_create(net, p);
339 }
340
341 /**
342 * ip6_tnl_dev_uninit - tunnel device uninitializer
343 * @dev: the device to be destroyed
344 *
345 * Description:
346 * ip6_tnl_dev_uninit() removes tunnel from its list
347 **/
348
349 static void
350 ip6_tnl_dev_uninit(struct net_device *dev)
351 {
352 struct ip6_tnl *t = netdev_priv(dev);
353 struct net *net = dev_net(dev);
354 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
355
356 if (dev == ip6n->fb_tnl_dev)
357 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
358 else
359 ip6_tnl_unlink(ip6n, t);
360 ip6_tnl_dst_reset(t);
361 dev_put(dev);
362 }
363
364 /**
365 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
366 * @skb: received socket buffer
367 *
368 * Return:
369 * 0 if none was found,
370 * else index to encapsulation limit
371 **/
372
373 static __u16
374 parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
375 {
376 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
377 __u8 nexthdr = ipv6h->nexthdr;
378 __u16 off = sizeof (*ipv6h);
379
380 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
381 __u16 optlen = 0;
382 struct ipv6_opt_hdr *hdr;
383 if (raw + off + sizeof (*hdr) > skb->data &&
384 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
385 break;
386
387 hdr = (struct ipv6_opt_hdr *) (raw + off);
388 if (nexthdr == NEXTHDR_FRAGMENT) {
389 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
390 if (frag_hdr->frag_off)
391 break;
392 optlen = 8;
393 } else if (nexthdr == NEXTHDR_AUTH) {
394 optlen = (hdr->hdrlen + 2) << 2;
395 } else {
396 optlen = ipv6_optlen(hdr);
397 }
398 if (nexthdr == NEXTHDR_DEST) {
399 __u16 i = off + 2;
400 while (1) {
401 struct ipv6_tlv_tnl_enc_lim *tel;
402
403 /* No more room for encapsulation limit */
404 if (i + sizeof (*tel) > off + optlen)
405 break;
406
407 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
408 /* return index of option if found and valid */
409 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
410 tel->length == 1)
411 return i;
412 /* else jump to next option */
413 if (tel->type)
414 i += tel->length + 2;
415 else
416 i++;
417 }
418 }
419 nexthdr = hdr->nexthdr;
420 off += optlen;
421 }
422 return 0;
423 }
424
425 /**
426 * ip6_tnl_err - tunnel error handler
427 *
428 * Description:
429 * ip6_tnl_err() should handle errors in the tunnel according
430 * to the specifications in RFC 2473.
431 **/
432
433 static int
434 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
435 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
436 {
437 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
438 struct ip6_tnl *t;
439 int rel_msg = 0;
440 u8 rel_type = ICMPV6_DEST_UNREACH;
441 u8 rel_code = ICMPV6_ADDR_UNREACH;
442 __u32 rel_info = 0;
443 __u16 len;
444 int err = -ENOENT;
445
446 /* If the packet doesn't contain the original IPv6 header we are
447 in trouble since we might need the source address for further
448 processing of the error. */
449
450 rcu_read_lock();
451 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
452 &ipv6h->saddr)) == NULL)
453 goto out;
454
455 if (t->parms.proto != ipproto && t->parms.proto != 0)
456 goto out;
457
458 err = 0;
459
460 switch (*type) {
461 __u32 teli;
462 struct ipv6_tlv_tnl_enc_lim *tel;
463 __u32 mtu;
464 case ICMPV6_DEST_UNREACH:
465 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
466 t->parms.name);
467 rel_msg = 1;
468 break;
469 case ICMPV6_TIME_EXCEED:
470 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
471 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
472 t->parms.name);
473 rel_msg = 1;
474 }
475 break;
476 case ICMPV6_PARAMPROB:
477 teli = 0;
478 if ((*code) == ICMPV6_HDR_FIELD)
479 teli = parse_tlv_tnl_enc_lim(skb, skb->data);
480
481 if (teli && teli == *info - 2) {
482 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
483 if (tel->encap_limit == 0) {
484 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
485 t->parms.name);
486 rel_msg = 1;
487 }
488 } else {
489 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
490 t->parms.name);
491 }
492 break;
493 case ICMPV6_PKT_TOOBIG:
494 mtu = *info - offset;
495 if (mtu < IPV6_MIN_MTU)
496 mtu = IPV6_MIN_MTU;
497 t->dev->mtu = mtu;
498
499 if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
500 rel_type = ICMPV6_PKT_TOOBIG;
501 rel_code = 0;
502 rel_info = mtu;
503 rel_msg = 1;
504 }
505 break;
506 }
507
508 *type = rel_type;
509 *code = rel_code;
510 *info = rel_info;
511 *msg = rel_msg;
512
513 out:
514 rcu_read_unlock();
515 return err;
516 }
517
518 static int
519 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
520 u8 type, u8 code, int offset, __be32 info)
521 {
522 int rel_msg = 0;
523 u8 rel_type = type;
524 u8 rel_code = code;
525 __u32 rel_info = ntohl(info);
526 int err;
527 struct sk_buff *skb2;
528 const struct iphdr *eiph;
529 struct rtable *rt;
530 struct flowi4 fl4;
531
532 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
533 &rel_msg, &rel_info, offset);
534 if (err < 0)
535 return err;
536
537 if (rel_msg == 0)
538 return 0;
539
540 switch (rel_type) {
541 case ICMPV6_DEST_UNREACH:
542 if (rel_code != ICMPV6_ADDR_UNREACH)
543 return 0;
544 rel_type = ICMP_DEST_UNREACH;
545 rel_code = ICMP_HOST_UNREACH;
546 break;
547 case ICMPV6_PKT_TOOBIG:
548 if (rel_code != 0)
549 return 0;
550 rel_type = ICMP_DEST_UNREACH;
551 rel_code = ICMP_FRAG_NEEDED;
552 break;
553 case NDISC_REDIRECT:
554 rel_type = ICMP_REDIRECT;
555 rel_code = ICMP_REDIR_HOST;
556 default:
557 return 0;
558 }
559
560 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
561 return 0;
562
563 skb2 = skb_clone(skb, GFP_ATOMIC);
564 if (!skb2)
565 return 0;
566
567 skb_dst_drop(skb2);
568
569 skb_pull(skb2, offset);
570 skb_reset_network_header(skb2);
571 eiph = ip_hdr(skb2);
572
573 /* Try to guess incoming interface */
574 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
575 eiph->saddr, 0,
576 0, 0,
577 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
578 if (IS_ERR(rt))
579 goto out;
580
581 skb2->dev = rt->dst.dev;
582
583 /* route "incoming" packet */
584 if (rt->rt_flags & RTCF_LOCAL) {
585 ip_rt_put(rt);
586 rt = NULL;
587 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
588 eiph->daddr, eiph->saddr,
589 0, 0,
590 IPPROTO_IPIP,
591 RT_TOS(eiph->tos), 0);
592 if (IS_ERR(rt) ||
593 rt->dst.dev->type != ARPHRD_TUNNEL) {
594 if (!IS_ERR(rt))
595 ip_rt_put(rt);
596 goto out;
597 }
598 skb_dst_set(skb2, &rt->dst);
599 } else {
600 ip_rt_put(rt);
601 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
602 skb2->dev) ||
603 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
604 goto out;
605 }
606
607 /* change mtu on this route */
608 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
609 if (rel_info > dst_mtu(skb_dst(skb2)))
610 goto out;
611
612 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info);
613 }
614 if (rel_type == ICMP_REDIRECT) {
615 if (skb_dst(skb2)->ops->redirect)
616 skb_dst(skb2)->ops->redirect(skb_dst(skb2), skb2);
617 }
618
619 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
620
621 out:
622 kfree_skb(skb2);
623 return 0;
624 }
625
626 static int
627 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
628 u8 type, u8 code, int offset, __be32 info)
629 {
630 int rel_msg = 0;
631 u8 rel_type = type;
632 u8 rel_code = code;
633 __u32 rel_info = ntohl(info);
634 int err;
635
636 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
637 &rel_msg, &rel_info, offset);
638 if (err < 0)
639 return err;
640
641 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
642 struct rt6_info *rt;
643 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
644
645 if (!skb2)
646 return 0;
647
648 skb_dst_drop(skb2);
649 skb_pull(skb2, offset);
650 skb_reset_network_header(skb2);
651
652 /* Try to guess incoming interface */
653 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
654 NULL, 0, 0);
655
656 if (rt && rt->dst.dev)
657 skb2->dev = rt->dst.dev;
658
659 icmpv6_send(skb2, rel_type, rel_code, rel_info);
660
661 if (rt)
662 dst_release(&rt->dst);
663
664 kfree_skb(skb2);
665 }
666
667 return 0;
668 }
669
670 static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
671 const struct ipv6hdr *ipv6h,
672 struct sk_buff *skb)
673 {
674 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
675
676 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
677 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
678
679 if (INET_ECN_is_ce(dsfield))
680 IP_ECN_set_ce(ip_hdr(skb));
681 }
682
683 static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
684 const struct ipv6hdr *ipv6h,
685 struct sk_buff *skb)
686 {
687 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
688 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
689
690 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
691 IP6_ECN_set_ce(ipv6_hdr(skb));
692 }
693
694 static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
695 const struct in6_addr *laddr,
696 const struct in6_addr *raddr)
697 {
698 struct ip6_tnl_parm *p = &t->parms;
699 int ltype = ipv6_addr_type(laddr);
700 int rtype = ipv6_addr_type(raddr);
701 __u32 flags = 0;
702
703 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
704 flags = IP6_TNL_F_CAP_PER_PACKET;
705 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
706 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
707 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
708 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
709 if (ltype&IPV6_ADDR_UNICAST)
710 flags |= IP6_TNL_F_CAP_XMIT;
711 if (rtype&IPV6_ADDR_UNICAST)
712 flags |= IP6_TNL_F_CAP_RCV;
713 }
714 return flags;
715 }
716
717 /* called with rcu_read_lock() */
718 static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
719 const struct in6_addr *laddr,
720 const struct in6_addr *raddr)
721 {
722 struct ip6_tnl_parm *p = &t->parms;
723 int ret = 0;
724 struct net *net = dev_net(t->dev);
725
726 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
727 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
728 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
729 struct net_device *ldev = NULL;
730
731 if (p->link)
732 ldev = dev_get_by_index_rcu(net, p->link);
733
734 if ((ipv6_addr_is_multicast(laddr) ||
735 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
736 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
737 ret = 1;
738 }
739 return ret;
740 }
741
742 /**
743 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
744 * @skb: received socket buffer
745 * @protocol: ethernet protocol ID
746 * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
747 *
748 * Return: 0
749 **/
750
751 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
752 __u8 ipproto,
753 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
754 const struct ipv6hdr *ipv6h,
755 struct sk_buff *skb))
756 {
757 struct ip6_tnl *t;
758 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
759
760 rcu_read_lock();
761
762 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
763 &ipv6h->daddr)) != NULL) {
764 struct pcpu_tstats *tstats;
765
766 if (t->parms.proto != ipproto && t->parms.proto != 0) {
767 rcu_read_unlock();
768 goto discard;
769 }
770
771 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
772 rcu_read_unlock();
773 goto discard;
774 }
775
776 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
777 t->dev->stats.rx_dropped++;
778 rcu_read_unlock();
779 goto discard;
780 }
781 secpath_reset(skb);
782 skb->mac_header = skb->network_header;
783 skb_reset_network_header(skb);
784 skb->protocol = htons(protocol);
785 skb->pkt_type = PACKET_HOST;
786 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
787
788 tstats = this_cpu_ptr(t->dev->tstats);
789 tstats->rx_packets++;
790 tstats->rx_bytes += skb->len;
791
792 __skb_tunnel_rx(skb, t->dev);
793
794 dscp_ecn_decapsulate(t, ipv6h, skb);
795
796 netif_rx(skb);
797
798 rcu_read_unlock();
799 return 0;
800 }
801 rcu_read_unlock();
802 return 1;
803
804 discard:
805 kfree_skb(skb);
806 return 0;
807 }
808
809 static int ip4ip6_rcv(struct sk_buff *skb)
810 {
811 return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
812 ip4ip6_dscp_ecn_decapsulate);
813 }
814
815 static int ip6ip6_rcv(struct sk_buff *skb)
816 {
817 return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
818 ip6ip6_dscp_ecn_decapsulate);
819 }
820
821 struct ipv6_tel_txoption {
822 struct ipv6_txoptions ops;
823 __u8 dst_opt[8];
824 };
825
826 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
827 {
828 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
829
830 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
831 opt->dst_opt[3] = 1;
832 opt->dst_opt[4] = encap_limit;
833 opt->dst_opt[5] = IPV6_TLV_PADN;
834 opt->dst_opt[6] = 1;
835
836 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
837 opt->ops.opt_nflen = 8;
838 }
839
840 /**
841 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
842 * @t: the outgoing tunnel device
843 * @hdr: IPv6 header from the incoming packet
844 *
845 * Description:
846 * Avoid trivial tunneling loop by checking that tunnel exit-point
847 * doesn't match source of incoming packet.
848 *
849 * Return:
850 * 1 if conflict,
851 * 0 else
852 **/
853
854 static inline bool
855 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
856 {
857 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
858 }
859
860 static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
861 {
862 struct ip6_tnl_parm *p = &t->parms;
863 int ret = 0;
864 struct net *net = dev_net(t->dev);
865
866 if (p->flags & IP6_TNL_F_CAP_XMIT) {
867 struct net_device *ldev = NULL;
868
869 rcu_read_lock();
870 if (p->link)
871 ldev = dev_get_by_index_rcu(net, p->link);
872
873 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
874 pr_warn("%s xmit: Local address not yet configured!\n",
875 p->name);
876 else if (!ipv6_addr_is_multicast(&p->raddr) &&
877 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
878 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
879 p->name);
880 else
881 ret = 1;
882 rcu_read_unlock();
883 }
884 return ret;
885 }
886 /**
887 * ip6_tnl_xmit2 - encapsulate packet and send
888 * @skb: the outgoing socket buffer
889 * @dev: the outgoing tunnel device
890 * @dsfield: dscp code for outer header
891 * @fl: flow of tunneled packet
892 * @encap_limit: encapsulation limit
893 * @pmtu: Path MTU is stored if packet is too big
894 *
895 * Description:
896 * Build new header and do some sanity checks on the packet before sending
897 * it.
898 *
899 * Return:
900 * 0 on success
901 * -1 fail
902 * %-EMSGSIZE message too big. return mtu in this case.
903 **/
904
905 static int ip6_tnl_xmit2(struct sk_buff *skb,
906 struct net_device *dev,
907 __u8 dsfield,
908 struct flowi6 *fl6,
909 int encap_limit,
910 __u32 *pmtu)
911 {
912 struct net *net = dev_net(dev);
913 struct ip6_tnl *t = netdev_priv(dev);
914 struct net_device_stats *stats = &t->dev->stats;
915 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
916 struct ipv6_tel_txoption opt;
917 struct dst_entry *dst = NULL, *ndst = NULL;
918 struct net_device *tdev;
919 int mtu;
920 unsigned int max_headroom = sizeof(struct ipv6hdr);
921 u8 proto;
922 int err = -1;
923 int pkt_len;
924
925 if (!fl6->flowi6_mark)
926 dst = ip6_tnl_dst_check(t);
927 if (!dst) {
928 ndst = ip6_route_output(net, NULL, fl6);
929
930 if (ndst->error)
931 goto tx_err_link_failure;
932 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
933 if (IS_ERR(ndst)) {
934 err = PTR_ERR(ndst);
935 ndst = NULL;
936 goto tx_err_link_failure;
937 }
938 dst = ndst;
939 }
940
941 tdev = dst->dev;
942
943 if (tdev == dev) {
944 stats->collisions++;
945 net_warn_ratelimited("%s: Local routing loop detected!\n",
946 t->parms.name);
947 goto tx_err_dst_release;
948 }
949 mtu = dst_mtu(dst) - sizeof (*ipv6h);
950 if (encap_limit >= 0) {
951 max_headroom += 8;
952 mtu -= 8;
953 }
954 if (mtu < IPV6_MIN_MTU)
955 mtu = IPV6_MIN_MTU;
956 if (skb_dst(skb))
957 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
958 if (skb->len > mtu) {
959 *pmtu = mtu;
960 err = -EMSGSIZE;
961 goto tx_err_dst_release;
962 }
963
964 /*
965 * Okay, now see if we can stuff it in the buffer as-is.
966 */
967 max_headroom += LL_RESERVED_SPACE(tdev);
968
969 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
970 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
971 struct sk_buff *new_skb;
972
973 if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
974 goto tx_err_dst_release;
975
976 if (skb->sk)
977 skb_set_owner_w(new_skb, skb->sk);
978 consume_skb(skb);
979 skb = new_skb;
980 }
981 skb_dst_drop(skb);
982 if (fl6->flowi6_mark) {
983 skb_dst_set(skb, dst);
984 ndst = NULL;
985 } else {
986 skb_dst_set_noref(skb, dst);
987 }
988 skb->transport_header = skb->network_header;
989
990 proto = fl6->flowi6_proto;
991 if (encap_limit >= 0) {
992 init_tel_txopt(&opt, encap_limit);
993 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
994 }
995 skb_push(skb, sizeof(struct ipv6hdr));
996 skb_reset_network_header(skb);
997 ipv6h = ipv6_hdr(skb);
998 *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000);
999 dsfield = INET_ECN_encapsulate(0, dsfield);
1000 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
1001 ipv6h->hop_limit = t->parms.hop_limit;
1002 ipv6h->nexthdr = proto;
1003 ipv6h->saddr = fl6->saddr;
1004 ipv6h->daddr = fl6->daddr;
1005 nf_reset(skb);
1006 pkt_len = skb->len;
1007 err = ip6_local_out(skb);
1008
1009 if (net_xmit_eval(err) == 0) {
1010 struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats);
1011
1012 tstats->tx_bytes += pkt_len;
1013 tstats->tx_packets++;
1014 } else {
1015 stats->tx_errors++;
1016 stats->tx_aborted_errors++;
1017 }
1018 if (ndst)
1019 ip6_tnl_dst_store(t, ndst);
1020 return 0;
1021 tx_err_link_failure:
1022 stats->tx_carrier_errors++;
1023 dst_link_failure(skb);
1024 tx_err_dst_release:
1025 dst_release(ndst);
1026 return err;
1027 }
1028
1029 static inline int
1030 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1031 {
1032 struct ip6_tnl *t = netdev_priv(dev);
1033 const struct iphdr *iph = ip_hdr(skb);
1034 int encap_limit = -1;
1035 struct flowi6 fl6;
1036 __u8 dsfield;
1037 __u32 mtu;
1038 int err;
1039
1040 if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
1041 !ip6_tnl_xmit_ctl(t))
1042 return -1;
1043
1044 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1045 encap_limit = t->parms.encap_limit;
1046
1047 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
1048 fl6.flowi6_proto = IPPROTO_IPIP;
1049
1050 dsfield = ipv4_get_dsfield(iph);
1051
1052 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1053 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1054 & IPV6_TCLASS_MASK;
1055 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1056 fl6.flowi6_mark = skb->mark;
1057
1058 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1059 if (err != 0) {
1060 /* XXX: send ICMP error even if DF is not set. */
1061 if (err == -EMSGSIZE)
1062 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1063 htonl(mtu));
1064 return -1;
1065 }
1066
1067 return 0;
1068 }
1069
1070 static inline int
1071 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1072 {
1073 struct ip6_tnl *t = netdev_priv(dev);
1074 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1075 int encap_limit = -1;
1076 __u16 offset;
1077 struct flowi6 fl6;
1078 __u8 dsfield;
1079 __u32 mtu;
1080 int err;
1081
1082 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
1083 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
1084 return -1;
1085
1086 offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
1087 if (offset > 0) {
1088 struct ipv6_tlv_tnl_enc_lim *tel;
1089 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
1090 if (tel->encap_limit == 0) {
1091 icmpv6_send(skb, ICMPV6_PARAMPROB,
1092 ICMPV6_HDR_FIELD, offset + 2);
1093 return -1;
1094 }
1095 encap_limit = tel->encap_limit - 1;
1096 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1097 encap_limit = t->parms.encap_limit;
1098
1099 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
1100 fl6.flowi6_proto = IPPROTO_IPV6;
1101
1102 dsfield = ipv6_get_dsfield(ipv6h);
1103 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1104 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
1105 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1106 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
1107 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1108 fl6.flowi6_mark = skb->mark;
1109
1110 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1111 if (err != 0) {
1112 if (err == -EMSGSIZE)
1113 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1114 return -1;
1115 }
1116
1117 return 0;
1118 }
1119
1120 static netdev_tx_t
1121 ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1122 {
1123 struct ip6_tnl *t = netdev_priv(dev);
1124 struct net_device_stats *stats = &t->dev->stats;
1125 int ret;
1126
1127 switch (skb->protocol) {
1128 case htons(ETH_P_IP):
1129 ret = ip4ip6_tnl_xmit(skb, dev);
1130 break;
1131 case htons(ETH_P_IPV6):
1132 ret = ip6ip6_tnl_xmit(skb, dev);
1133 break;
1134 default:
1135 goto tx_err;
1136 }
1137
1138 if (ret < 0)
1139 goto tx_err;
1140
1141 return NETDEV_TX_OK;
1142
1143 tx_err:
1144 stats->tx_errors++;
1145 stats->tx_dropped++;
1146 kfree_skb(skb);
1147 return NETDEV_TX_OK;
1148 }
1149
1150 static void ip6_tnl_link_config(struct ip6_tnl *t)
1151 {
1152 struct net_device *dev = t->dev;
1153 struct ip6_tnl_parm *p = &t->parms;
1154 struct flowi6 *fl6 = &t->fl.u.ip6;
1155
1156 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1157 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1158
1159 /* Set up flowi template */
1160 fl6->saddr = p->laddr;
1161 fl6->daddr = p->raddr;
1162 fl6->flowi6_oif = p->link;
1163 fl6->flowlabel = 0;
1164
1165 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1166 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1167 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1168 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1169
1170 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1171 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1172
1173 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1174 dev->flags |= IFF_POINTOPOINT;
1175 else
1176 dev->flags &= ~IFF_POINTOPOINT;
1177
1178 dev->iflink = p->link;
1179
1180 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1181 int strict = (ipv6_addr_type(&p->raddr) &
1182 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1183
1184 struct rt6_info *rt = rt6_lookup(dev_net(dev),
1185 &p->raddr, &p->laddr,
1186 p->link, strict);
1187
1188 if (rt == NULL)
1189 return;
1190
1191 if (rt->dst.dev) {
1192 dev->hard_header_len = rt->dst.dev->hard_header_len +
1193 sizeof (struct ipv6hdr);
1194
1195 dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr);
1196 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1197 dev->mtu-=8;
1198
1199 if (dev->mtu < IPV6_MIN_MTU)
1200 dev->mtu = IPV6_MIN_MTU;
1201 }
1202 dst_release(&rt->dst);
1203 }
1204 }
1205
1206 /**
1207 * ip6_tnl_change - update the tunnel parameters
1208 * @t: tunnel to be changed
1209 * @p: tunnel configuration parameters
1210 *
1211 * Description:
1212 * ip6_tnl_change() updates the tunnel parameters
1213 **/
1214
1215 static int
1216 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
1217 {
1218 t->parms.laddr = p->laddr;
1219 t->parms.raddr = p->raddr;
1220 t->parms.flags = p->flags;
1221 t->parms.hop_limit = p->hop_limit;
1222 t->parms.encap_limit = p->encap_limit;
1223 t->parms.flowinfo = p->flowinfo;
1224 t->parms.link = p->link;
1225 t->parms.proto = p->proto;
1226 ip6_tnl_dst_reset(t);
1227 ip6_tnl_link_config(t);
1228 return 0;
1229 }
1230
1231 /**
1232 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1233 * @dev: virtual device associated with tunnel
1234 * @ifr: parameters passed from userspace
1235 * @cmd: command to be performed
1236 *
1237 * Description:
1238 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1239 * from userspace.
1240 *
1241 * The possible commands are the following:
1242 * %SIOCGETTUNNEL: get tunnel parameters for device
1243 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1244 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1245 * %SIOCDELTUNNEL: delete tunnel
1246 *
1247 * The fallback device "ip6tnl0", created during module
1248 * initialization, can be used for creating other tunnel devices.
1249 *
1250 * Return:
1251 * 0 on success,
1252 * %-EFAULT if unable to copy data to or from userspace,
1253 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1254 * %-EINVAL if passed tunnel parameters are invalid,
1255 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1256 * %-ENODEV if attempting to change or delete a nonexisting device
1257 **/
1258
1259 static int
1260 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1261 {
1262 int err = 0;
1263 struct ip6_tnl_parm p;
1264 struct ip6_tnl *t = NULL;
1265 struct net *net = dev_net(dev);
1266 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1267
1268 switch (cmd) {
1269 case SIOCGETTUNNEL:
1270 if (dev == ip6n->fb_tnl_dev) {
1271 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
1272 err = -EFAULT;
1273 break;
1274 }
1275 t = ip6_tnl_locate(net, &p, 0);
1276 }
1277 if (t == NULL)
1278 t = netdev_priv(dev);
1279 memcpy(&p, &t->parms, sizeof (p));
1280 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
1281 err = -EFAULT;
1282 }
1283 break;
1284 case SIOCADDTUNNEL:
1285 case SIOCCHGTUNNEL:
1286 err = -EPERM;
1287 if (!capable(CAP_NET_ADMIN))
1288 break;
1289 err = -EFAULT;
1290 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1291 break;
1292 err = -EINVAL;
1293 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1294 p.proto != 0)
1295 break;
1296 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
1297 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
1298 if (t != NULL) {
1299 if (t->dev != dev) {
1300 err = -EEXIST;
1301 break;
1302 }
1303 } else
1304 t = netdev_priv(dev);
1305
1306 ip6_tnl_unlink(ip6n, t);
1307 synchronize_net();
1308 err = ip6_tnl_change(t, &p);
1309 ip6_tnl_link(ip6n, t);
1310 netdev_state_change(dev);
1311 }
1312 if (t) {
1313 err = 0;
1314 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
1315 err = -EFAULT;
1316
1317 } else
1318 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1319 break;
1320 case SIOCDELTUNNEL:
1321 err = -EPERM;
1322 if (!capable(CAP_NET_ADMIN))
1323 break;
1324
1325 if (dev == ip6n->fb_tnl_dev) {
1326 err = -EFAULT;
1327 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1328 break;
1329 err = -ENOENT;
1330 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
1331 break;
1332 err = -EPERM;
1333 if (t->dev == ip6n->fb_tnl_dev)
1334 break;
1335 dev = t->dev;
1336 }
1337 err = 0;
1338 unregister_netdevice(dev);
1339 break;
1340 default:
1341 err = -EINVAL;
1342 }
1343 return err;
1344 }
1345
1346 /**
1347 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1348 * @dev: virtual device associated with tunnel
1349 * @new_mtu: the new mtu
1350 *
1351 * Return:
1352 * 0 on success,
1353 * %-EINVAL if mtu too small
1354 **/
1355
1356 static int
1357 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1358 {
1359 if (new_mtu < IPV6_MIN_MTU) {
1360 return -EINVAL;
1361 }
1362 dev->mtu = new_mtu;
1363 return 0;
1364 }
1365
1366
1367 static const struct net_device_ops ip6_tnl_netdev_ops = {
1368 .ndo_uninit = ip6_tnl_dev_uninit,
1369 .ndo_start_xmit = ip6_tnl_xmit,
1370 .ndo_do_ioctl = ip6_tnl_ioctl,
1371 .ndo_change_mtu = ip6_tnl_change_mtu,
1372 .ndo_get_stats = ip6_get_stats,
1373 };
1374
1375
1376 /**
1377 * ip6_tnl_dev_setup - setup virtual tunnel device
1378 * @dev: virtual device associated with tunnel
1379 *
1380 * Description:
1381 * Initialize function pointers and device parameters
1382 **/
1383
1384 static void ip6_tnl_dev_setup(struct net_device *dev)
1385 {
1386 struct ip6_tnl *t;
1387
1388 dev->netdev_ops = &ip6_tnl_netdev_ops;
1389 dev->destructor = ip6_dev_free;
1390
1391 dev->type = ARPHRD_TUNNEL6;
1392 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
1393 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
1394 t = netdev_priv(dev);
1395 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1396 dev->mtu-=8;
1397 dev->flags |= IFF_NOARP;
1398 dev->addr_len = sizeof(struct in6_addr);
1399 dev->features |= NETIF_F_NETNS_LOCAL;
1400 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1401 }
1402
1403
1404 /**
1405 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1406 * @dev: virtual device associated with tunnel
1407 **/
1408
1409 static inline int
1410 ip6_tnl_dev_init_gen(struct net_device *dev)
1411 {
1412 struct ip6_tnl *t = netdev_priv(dev);
1413
1414 t->dev = dev;
1415 dev->tstats = alloc_percpu(struct pcpu_tstats);
1416 if (!dev->tstats)
1417 return -ENOMEM;
1418 return 0;
1419 }
1420
1421 /**
1422 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1423 * @dev: virtual device associated with tunnel
1424 **/
1425
1426 static int ip6_tnl_dev_init(struct net_device *dev)
1427 {
1428 struct ip6_tnl *t = netdev_priv(dev);
1429 int err = ip6_tnl_dev_init_gen(dev);
1430
1431 if (err)
1432 return err;
1433 ip6_tnl_link_config(t);
1434 return 0;
1435 }
1436
1437 /**
1438 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1439 * @dev: fallback device
1440 *
1441 * Return: 0
1442 **/
1443
1444 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1445 {
1446 struct ip6_tnl *t = netdev_priv(dev);
1447 struct net *net = dev_net(dev);
1448 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1449 int err = ip6_tnl_dev_init_gen(dev);
1450
1451 if (err)
1452 return err;
1453
1454 t->parms.proto = IPPROTO_IPV6;
1455 dev_hold(dev);
1456
1457 ip6_tnl_link_config(t);
1458
1459 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1460 return 0;
1461 }
1462
1463 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
1464 .handler = ip4ip6_rcv,
1465 .err_handler = ip4ip6_err,
1466 .priority = 1,
1467 };
1468
1469 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
1470 .handler = ip6ip6_rcv,
1471 .err_handler = ip6ip6_err,
1472 .priority = 1,
1473 };
1474
1475 static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1476 {
1477 int h;
1478 struct ip6_tnl *t;
1479 LIST_HEAD(list);
1480
1481 for (h = 0; h < HASH_SIZE; h++) {
1482 t = rtnl_dereference(ip6n->tnls_r_l[h]);
1483 while (t != NULL) {
1484 unregister_netdevice_queue(t->dev, &list);
1485 t = rtnl_dereference(t->next);
1486 }
1487 }
1488
1489 t = rtnl_dereference(ip6n->tnls_wc[0]);
1490 unregister_netdevice_queue(t->dev, &list);
1491 unregister_netdevice_many(&list);
1492 }
1493
1494 static int __net_init ip6_tnl_init_net(struct net *net)
1495 {
1496 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1497 struct ip6_tnl *t = NULL;
1498 int err;
1499
1500 ip6n->tnls[0] = ip6n->tnls_wc;
1501 ip6n->tnls[1] = ip6n->tnls_r_l;
1502
1503 err = -ENOMEM;
1504 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
1505 ip6_tnl_dev_setup);
1506
1507 if (!ip6n->fb_tnl_dev)
1508 goto err_alloc_dev;
1509 dev_net_set(ip6n->fb_tnl_dev, net);
1510
1511 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
1512 if (err < 0)
1513 goto err_register;
1514
1515 err = register_netdev(ip6n->fb_tnl_dev);
1516 if (err < 0)
1517 goto err_register;
1518
1519 t = netdev_priv(ip6n->fb_tnl_dev);
1520
1521 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
1522 return 0;
1523
1524 err_register:
1525 ip6_dev_free(ip6n->fb_tnl_dev);
1526 err_alloc_dev:
1527 return err;
1528 }
1529
1530 static void __net_exit ip6_tnl_exit_net(struct net *net)
1531 {
1532 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1533
1534 rtnl_lock();
1535 ip6_tnl_destroy_tunnels(ip6n);
1536 rtnl_unlock();
1537 }
1538
1539 static struct pernet_operations ip6_tnl_net_ops = {
1540 .init = ip6_tnl_init_net,
1541 .exit = ip6_tnl_exit_net,
1542 .id = &ip6_tnl_net_id,
1543 .size = sizeof(struct ip6_tnl_net),
1544 };
1545
1546 /**
1547 * ip6_tunnel_init - register protocol and reserve needed resources
1548 *
1549 * Return: 0 on success
1550 **/
1551
1552 static int __init ip6_tunnel_init(void)
1553 {
1554 int err;
1555
1556 err = register_pernet_device(&ip6_tnl_net_ops);
1557 if (err < 0)
1558 goto out_pernet;
1559
1560 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1561 if (err < 0) {
1562 pr_err("%s: can't register ip4ip6\n", __func__);
1563 goto out_ip4ip6;
1564 }
1565
1566 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
1567 if (err < 0) {
1568 pr_err("%s: can't register ip6ip6\n", __func__);
1569 goto out_ip6ip6;
1570 }
1571
1572 return 0;
1573
1574 out_ip6ip6:
1575 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1576 out_ip4ip6:
1577 unregister_pernet_device(&ip6_tnl_net_ops);
1578 out_pernet:
1579 return err;
1580 }
1581
1582 /**
1583 * ip6_tunnel_cleanup - free resources and unregister protocol
1584 **/
1585
1586 static void __exit ip6_tunnel_cleanup(void)
1587 {
1588 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
1589 pr_info("%s: can't deregister ip4ip6\n", __func__);
1590
1591 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1592 pr_info("%s: can't deregister ip6ip6\n", __func__);
1593
1594 unregister_pernet_device(&ip6_tnl_net_ops);
1595 }
1596
1597 module_init(ip6_tunnel_init);
1598 module_exit(ip6_tunnel_cleanup);