2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
24 * Mark Smith <markzzzsmith@yahoo.com.au>
25 * Use eth_random_addr() for tap MAC address.
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #define DRV_NAME "tun"
40 #define DRV_VERSION "1.6"
41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
44 #include <linux/module.h>
45 #include <linux/errno.h>
46 #include <linux/kernel.h>
47 #include <linux/major.h>
48 #include <linux/slab.h>
49 #include <linux/poll.h>
50 #include <linux/fcntl.h>
51 #include <linux/init.h>
52 #include <linux/skbuff.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/miscdevice.h>
56 #include <linux/ethtool.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/compat.h>
60 #include <linux/if_arp.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_tun.h>
63 #include <linux/crc32.h>
64 #include <linux/nsproxy.h>
65 #include <linux/virtio_net.h>
66 #include <linux/rcupdate.h>
67 #include <net/net_namespace.h>
68 #include <net/netns/generic.h>
69 #include <net/rtnetlink.h>
72 #include <asm/uaccess.h>
74 /* Uncomment to enable debugging */
75 /* #define TUN_DEBUG 1 */
80 #define tun_debug(level, tun, fmt, args...) \
83 netdev_printk(level, tun->dev, fmt, ##args); \
85 #define DBG1(level, fmt, args...) \
88 printk(level fmt, ##args); \
91 #define tun_debug(level, tun, fmt, args...) \
94 netdev_printk(level, tun->dev, fmt, ##args); \
96 #define DBG1(level, fmt, args...) \
99 printk(level fmt, ##args); \
103 #define GOODCOPY_LEN 128
105 #define FLT_EXACT_COUNT 8
107 unsigned int count
; /* Number of addrs. Zero means disabled */
108 u32 mask
[2]; /* Mask of the hashed addrs */
109 unsigned char addr
[FLT_EXACT_COUNT
][ETH_ALEN
];
112 /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
113 * the netdevice to be fit in one page. So we can make sure the success of
114 * memory allocation. TODO: increase the limit. */
115 #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
116 #define MAX_TAP_FLOWS 4096
118 #define TUN_FLOW_EXPIRE (3 * HZ)
120 /* A tun_file connects an open character device to a tuntap netdevice. It
121 * also contains all socket related strctures (except sock_fprog and tap_filter)
122 * to serve as one transmit queue for tuntap device. The sock_fprog and
123 * tap_filter were kept in tun_struct since they were used for filtering for the
124 * netdevice not for a specific queue (at least I didn't see the requirement for
128 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
129 * other can only be read while rcu_read_lock or rtnl_lock is held.
133 struct socket socket
;
135 struct tun_struct __rcu
*tun
;
137 struct fasync_struct
*fasync
;
138 /* only used for fasnyc */
141 struct list_head next
;
142 struct tun_struct
*detached
;
145 struct tun_flow_entry
{
146 struct hlist_node hash_link
;
148 struct tun_struct
*tun
;
152 unsigned long updated
;
155 #define TUN_NUM_FLOW_ENTRIES 1024
157 /* Since the socket were moved to tun_file, to preserve the behavior of persist
158 * device, socket filter, sndbuf and vnet header size were restore when the
159 * file were attached to a persist device.
162 struct tun_file __rcu
*tfiles
[MAX_TAP_QUEUES
];
163 unsigned int numqueues
;
168 struct net_device
*dev
;
169 netdev_features_t set_features
;
170 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
171 NETIF_F_TSO6|NETIF_F_UFO)
175 struct tap_filter txflt
;
176 struct sock_fprog fprog
;
177 /* protected by rtnl lock */
178 bool filter_attached
;
183 struct hlist_head flows
[TUN_NUM_FLOW_ENTRIES
];
184 struct timer_list flow_gc_timer
;
185 unsigned long ageing_time
;
186 unsigned int numdisabled
;
187 struct list_head disabled
;
192 static inline u32
tun_hashfn(u32 rxhash
)
194 return rxhash
& 0x3ff;
197 static struct tun_flow_entry
*tun_flow_find(struct hlist_head
*head
, u32 rxhash
)
199 struct tun_flow_entry
*e
;
200 struct hlist_node
*n
;
202 hlist_for_each_entry_rcu(e
, n
, head
, hash_link
) {
203 if (e
->rxhash
== rxhash
)
209 static struct tun_flow_entry
*tun_flow_create(struct tun_struct
*tun
,
210 struct hlist_head
*head
,
211 u32 rxhash
, u16 queue_index
)
213 struct tun_flow_entry
*e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
216 tun_debug(KERN_INFO
, tun
, "create flow: hash %u index %u\n",
217 rxhash
, queue_index
);
218 e
->updated
= jiffies
;
220 e
->queue_index
= queue_index
;
222 hlist_add_head_rcu(&e
->hash_link
, head
);
228 static void tun_flow_delete(struct tun_struct
*tun
, struct tun_flow_entry
*e
)
230 tun_debug(KERN_INFO
, tun
, "delete flow: hash %u index %u\n",
231 e
->rxhash
, e
->queue_index
);
232 hlist_del_rcu(&e
->hash_link
);
237 static void tun_flow_flush(struct tun_struct
*tun
)
241 spin_lock_bh(&tun
->lock
);
242 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
243 struct tun_flow_entry
*e
;
244 struct hlist_node
*h
, *n
;
246 hlist_for_each_entry_safe(e
, h
, n
, &tun
->flows
[i
], hash_link
)
247 tun_flow_delete(tun
, e
);
249 spin_unlock_bh(&tun
->lock
);
252 static void tun_flow_delete_by_queue(struct tun_struct
*tun
, u16 queue_index
)
256 spin_lock_bh(&tun
->lock
);
257 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
258 struct tun_flow_entry
*e
;
259 struct hlist_node
*h
, *n
;
261 hlist_for_each_entry_safe(e
, h
, n
, &tun
->flows
[i
], hash_link
) {
262 if (e
->queue_index
== queue_index
)
263 tun_flow_delete(tun
, e
);
266 spin_unlock_bh(&tun
->lock
);
269 static void tun_flow_cleanup(unsigned long data
)
271 struct tun_struct
*tun
= (struct tun_struct
*)data
;
272 unsigned long delay
= tun
->ageing_time
;
273 unsigned long next_timer
= jiffies
+ delay
;
274 unsigned long count
= 0;
277 tun_debug(KERN_INFO
, tun
, "tun_flow_cleanup\n");
279 spin_lock_bh(&tun
->lock
);
280 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
281 struct tun_flow_entry
*e
;
282 struct hlist_node
*h
, *n
;
284 hlist_for_each_entry_safe(e
, h
, n
, &tun
->flows
[i
], hash_link
) {
285 unsigned long this_timer
;
287 this_timer
= e
->updated
+ delay
;
288 if (time_before_eq(this_timer
, jiffies
))
289 tun_flow_delete(tun
, e
);
290 else if (time_before(this_timer
, next_timer
))
291 next_timer
= this_timer
;
296 mod_timer(&tun
->flow_gc_timer
, round_jiffies_up(next_timer
));
297 spin_unlock_bh(&tun
->lock
);
300 static void tun_flow_update(struct tun_struct
*tun
, u32 rxhash
,
301 struct tun_file
*tfile
)
303 struct hlist_head
*head
;
304 struct tun_flow_entry
*e
;
305 unsigned long delay
= tun
->ageing_time
;
306 u16 queue_index
= tfile
->queue_index
;
311 head
= &tun
->flows
[tun_hashfn(rxhash
)];
315 /* We may get a very small possibility of OOO during switching, not
316 * worth to optimize.*/
317 if (tun
->numqueues
== 1 || tfile
->detached
)
320 e
= tun_flow_find(head
, rxhash
);
322 /* TODO: keep queueing to old queue until it's empty? */
323 e
->queue_index
= queue_index
;
324 e
->updated
= jiffies
;
326 spin_lock_bh(&tun
->lock
);
327 if (!tun_flow_find(head
, rxhash
) &&
328 tun
->flow_count
< MAX_TAP_FLOWS
)
329 tun_flow_create(tun
, head
, rxhash
, queue_index
);
331 if (!timer_pending(&tun
->flow_gc_timer
))
332 mod_timer(&tun
->flow_gc_timer
,
333 round_jiffies_up(jiffies
+ delay
));
334 spin_unlock_bh(&tun
->lock
);
341 /* We try to identify a flow through its rxhash first. The reason that
342 * we do not check rxq no. is becuase some cards(e.g 82599), chooses
343 * the rxq based on the txq where the last packet of the flow comes. As
344 * the userspace application move between processors, we may get a
345 * different rxq no. here. If we could not get rxhash, then we would
346 * hope the rxq no. may help here.
348 static u16
tun_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
350 struct tun_struct
*tun
= netdev_priv(dev
);
351 struct tun_flow_entry
*e
;
356 numqueues
= tun
->numqueues
;
358 txq
= skb_get_rxhash(skb
);
360 e
= tun_flow_find(&tun
->flows
[tun_hashfn(txq
)], txq
);
362 txq
= e
->queue_index
;
364 /* use multiply and shift instead of expensive divide */
365 txq
= ((u64
)txq
* numqueues
) >> 32;
366 } else if (likely(skb_rx_queue_recorded(skb
))) {
367 txq
= skb_get_rx_queue(skb
);
368 while (unlikely(txq
>= numqueues
))
376 static inline bool tun_not_capable(struct tun_struct
*tun
)
378 const struct cred
*cred
= current_cred();
379 struct net
*net
= dev_net(tun
->dev
);
381 return ((uid_valid(tun
->owner
) && !uid_eq(cred
->euid
, tun
->owner
)) ||
382 (gid_valid(tun
->group
) && !in_egroup_p(tun
->group
))) &&
383 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
);
386 static void tun_set_real_num_queues(struct tun_struct
*tun
)
388 netif_set_real_num_tx_queues(tun
->dev
, tun
->numqueues
);
389 netif_set_real_num_rx_queues(tun
->dev
, tun
->numqueues
);
392 static void tun_disable_queue(struct tun_struct
*tun
, struct tun_file
*tfile
)
394 tfile
->detached
= tun
;
395 list_add_tail(&tfile
->next
, &tun
->disabled
);
399 static struct tun_struct
*tun_enable_queue(struct tun_file
*tfile
)
401 struct tun_struct
*tun
= tfile
->detached
;
403 tfile
->detached
= NULL
;
404 list_del_init(&tfile
->next
);
409 static void __tun_detach(struct tun_file
*tfile
, bool clean
)
411 struct tun_file
*ntfile
;
412 struct tun_struct
*tun
;
413 struct net_device
*dev
;
415 tun
= rtnl_dereference(tfile
->tun
);
417 if (tun
&& !tfile
->detached
) {
418 u16 index
= tfile
->queue_index
;
419 BUG_ON(index
>= tun
->numqueues
);
422 rcu_assign_pointer(tun
->tfiles
[index
],
423 tun
->tfiles
[tun
->numqueues
- 1]);
424 ntfile
= rtnl_dereference(tun
->tfiles
[index
]);
425 ntfile
->queue_index
= index
;
429 rcu_assign_pointer(tfile
->tun
, NULL
);
430 sock_put(&tfile
->sk
);
432 tun_disable_queue(tun
, tfile
);
435 tun_flow_delete_by_queue(tun
, tun
->numqueues
+ 1);
436 /* Drop read queue */
437 skb_queue_purge(&tfile
->sk
.sk_receive_queue
);
438 tun_set_real_num_queues(tun
);
439 } else if (tfile
->detached
&& clean
) {
440 tun
= tun_enable_queue(tfile
);
441 sock_put(&tfile
->sk
);
445 if (tun
&& tun
->numqueues
== 0 && tun
->numdisabled
== 0) {
446 netif_carrier_off(tun
->dev
);
448 if (!(tun
->flags
& TUN_PERSIST
) &&
449 tun
->dev
->reg_state
== NETREG_REGISTERED
)
450 unregister_netdevice(tun
->dev
);
453 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED
,
454 &tfile
->socket
.flags
));
455 sk_release_kernel(&tfile
->sk
);
459 static void tun_detach(struct tun_file
*tfile
, bool clean
)
462 __tun_detach(tfile
, clean
);
466 static void tun_detach_all(struct net_device
*dev
)
468 struct tun_struct
*tun
= netdev_priv(dev
);
469 struct tun_file
*tfile
, *tmp
;
470 int i
, n
= tun
->numqueues
;
472 for (i
= 0; i
< n
; i
++) {
473 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
475 wake_up_all(&tfile
->wq
.wait
);
476 rcu_assign_pointer(tfile
->tun
, NULL
);
479 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
480 wake_up_all(&tfile
->wq
.wait
);
481 rcu_assign_pointer(tfile
->tun
, NULL
);
483 BUG_ON(tun
->numqueues
!= 0);
486 for (i
= 0; i
< n
; i
++) {
487 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
488 /* Drop read queue */
489 skb_queue_purge(&tfile
->sk
.sk_receive_queue
);
490 sock_put(&tfile
->sk
);
492 list_for_each_entry_safe(tfile
, tmp
, &tun
->disabled
, next
) {
493 tun_enable_queue(tfile
);
494 skb_queue_purge(&tfile
->sk
.sk_receive_queue
);
495 sock_put(&tfile
->sk
);
497 BUG_ON(tun
->numdisabled
!= 0);
499 if (tun
->flags
& TUN_PERSIST
)
500 module_put(THIS_MODULE
);
503 static int tun_attach(struct tun_struct
*tun
, struct file
*file
)
505 struct tun_file
*tfile
= file
->private_data
;
508 err
= security_tun_dev_attach(tfile
->socket
.sk
, tun
->security
);
513 if (rtnl_dereference(tfile
->tun
) && !tfile
->detached
)
517 if (!(tun
->flags
& TUN_TAP_MQ
) && tun
->numqueues
== 1)
521 if (!tfile
->detached
&&
522 tun
->numqueues
+ tun
->numdisabled
== MAX_TAP_QUEUES
)
527 /* Re-attach the filter to presist device */
528 if (tun
->filter_attached
== true) {
529 err
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
533 tfile
->queue_index
= tun
->numqueues
;
534 rcu_assign_pointer(tfile
->tun
, tun
);
535 rcu_assign_pointer(tun
->tfiles
[tun
->numqueues
], tfile
);
539 tun_enable_queue(tfile
);
541 sock_hold(&tfile
->sk
);
543 tun_set_real_num_queues(tun
);
545 /* device is allowed to go away first, so no need to hold extra
553 static struct tun_struct
*__tun_get(struct tun_file
*tfile
)
555 struct tun_struct
*tun
;
558 tun
= rcu_dereference(tfile
->tun
);
566 static struct tun_struct
*tun_get(struct file
*file
)
568 return __tun_get(file
->private_data
);
571 static void tun_put(struct tun_struct
*tun
)
577 static void addr_hash_set(u32
*mask
, const u8
*addr
)
579 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
580 mask
[n
>> 5] |= (1 << (n
& 31));
583 static unsigned int addr_hash_test(const u32
*mask
, const u8
*addr
)
585 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
586 return mask
[n
>> 5] & (1 << (n
& 31));
589 static int update_filter(struct tap_filter
*filter
, void __user
*arg
)
591 struct { u8 u
[ETH_ALEN
]; } *addr
;
592 struct tun_filter uf
;
593 int err
, alen
, n
, nexact
;
595 if (copy_from_user(&uf
, arg
, sizeof(uf
)))
604 alen
= ETH_ALEN
* uf
.count
;
605 addr
= kmalloc(alen
, GFP_KERNEL
);
609 if (copy_from_user(addr
, arg
+ sizeof(uf
), alen
)) {
614 /* The filter is updated without holding any locks. Which is
615 * perfectly safe. We disable it first and in the worst
616 * case we'll accept a few undesired packets. */
620 /* Use first set of addresses as an exact filter */
621 for (n
= 0; n
< uf
.count
&& n
< FLT_EXACT_COUNT
; n
++)
622 memcpy(filter
->addr
[n
], addr
[n
].u
, ETH_ALEN
);
626 /* Remaining multicast addresses are hashed,
627 * unicast will leave the filter disabled. */
628 memset(filter
->mask
, 0, sizeof(filter
->mask
));
629 for (; n
< uf
.count
; n
++) {
630 if (!is_multicast_ether_addr(addr
[n
].u
)) {
631 err
= 0; /* no filter */
634 addr_hash_set(filter
->mask
, addr
[n
].u
);
637 /* For ALLMULTI just set the mask to all ones.
638 * This overrides the mask populated above. */
639 if ((uf
.flags
& TUN_FLT_ALLMULTI
))
640 memset(filter
->mask
, ~0, sizeof(filter
->mask
));
642 /* Now enable the filter */
644 filter
->count
= nexact
;
646 /* Return the number of exact filters */
654 /* Returns: 0 - drop, !=0 - accept */
655 static int run_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
657 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
659 struct ethhdr
*eh
= (struct ethhdr
*) skb
->data
;
663 for (i
= 0; i
< filter
->count
; i
++)
664 if (ether_addr_equal(eh
->h_dest
, filter
->addr
[i
]))
667 /* Inexact match (multicast only) */
668 if (is_multicast_ether_addr(eh
->h_dest
))
669 return addr_hash_test(filter
->mask
, eh
->h_dest
);
675 * Checks whether the packet is accepted or not.
676 * Returns: 0 - drop, !=0 - accept
678 static int check_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
683 return run_filter(filter
, skb
);
686 /* Network device part of the driver */
688 static const struct ethtool_ops tun_ethtool_ops
;
690 /* Net device detach from fd. */
691 static void tun_net_uninit(struct net_device
*dev
)
696 /* Net device open. */
697 static int tun_net_open(struct net_device
*dev
)
699 netif_tx_start_all_queues(dev
);
703 /* Net device close. */
704 static int tun_net_close(struct net_device
*dev
)
706 netif_tx_stop_all_queues(dev
);
710 /* Net device start xmit */
711 static netdev_tx_t
tun_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
713 struct tun_struct
*tun
= netdev_priv(dev
);
714 int txq
= skb
->queue_mapping
;
715 struct tun_file
*tfile
;
718 tfile
= rcu_dereference(tun
->tfiles
[txq
]);
720 /* Drop packet if interface is not attached */
721 if (txq
>= tun
->numqueues
)
724 tun_debug(KERN_INFO
, tun
, "tun_net_xmit %d\n", skb
->len
);
728 /* Drop if the filter does not like it.
729 * This is a noop if the filter is disabled.
730 * Filter can be enabled only for the TAP devices. */
731 if (!check_filter(&tun
->txflt
, skb
))
734 if (tfile
->socket
.sk
->sk_filter
&&
735 sk_filter(tfile
->socket
.sk
, skb
))
738 /* Limit the number of packets queued by dividing txq length with the
741 if (skb_queue_len(&tfile
->socket
.sk
->sk_receive_queue
)
742 >= dev
->tx_queue_len
/ tun
->numqueues
)
745 /* Orphan the skb - required as we might hang on to it
746 * for indefinite time. */
747 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
752 skb_queue_tail(&tfile
->socket
.sk
->sk_receive_queue
, skb
);
754 /* Notify and wake up reader process */
755 if (tfile
->flags
& TUN_FASYNC
)
756 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
757 wake_up_interruptible_poll(&tfile
->wq
.wait
, POLLIN
|
758 POLLRDNORM
| POLLRDBAND
);
764 dev
->stats
.tx_dropped
++;
771 static void tun_net_mclist(struct net_device
*dev
)
774 * This callback is supposed to deal with mc filter in
775 * _rx_ path and has nothing to do with the _tx_ path.
776 * In rx path we always accept everything userspace gives us.
781 #define MAX_MTU 65535
784 tun_net_change_mtu(struct net_device
*dev
, int new_mtu
)
786 if (new_mtu
< MIN_MTU
|| new_mtu
+ dev
->hard_header_len
> MAX_MTU
)
792 static netdev_features_t
tun_net_fix_features(struct net_device
*dev
,
793 netdev_features_t features
)
795 struct tun_struct
*tun
= netdev_priv(dev
);
797 return (features
& tun
->set_features
) | (features
& ~TUN_USER_FEATURES
);
799 #ifdef CONFIG_NET_POLL_CONTROLLER
800 static void tun_poll_controller(struct net_device
*dev
)
803 * Tun only receives frames when:
804 * 1) the char device endpoint gets data from user space
805 * 2) the tun socket gets a sendmsg call from user space
806 * Since both of those are syncronous operations, we are guaranteed
807 * never to have pending data when we poll for it
808 * so theres nothing to do here but return.
809 * We need this though so netpoll recognizes us as an interface that
810 * supports polling, which enables bridge devices in virt setups to
811 * still use netconsole
816 static const struct net_device_ops tun_netdev_ops
= {
817 .ndo_uninit
= tun_net_uninit
,
818 .ndo_open
= tun_net_open
,
819 .ndo_stop
= tun_net_close
,
820 .ndo_start_xmit
= tun_net_xmit
,
821 .ndo_change_mtu
= tun_net_change_mtu
,
822 .ndo_fix_features
= tun_net_fix_features
,
823 .ndo_select_queue
= tun_select_queue
,
824 #ifdef CONFIG_NET_POLL_CONTROLLER
825 .ndo_poll_controller
= tun_poll_controller
,
829 static const struct net_device_ops tap_netdev_ops
= {
830 .ndo_uninit
= tun_net_uninit
,
831 .ndo_open
= tun_net_open
,
832 .ndo_stop
= tun_net_close
,
833 .ndo_start_xmit
= tun_net_xmit
,
834 .ndo_change_mtu
= tun_net_change_mtu
,
835 .ndo_fix_features
= tun_net_fix_features
,
836 .ndo_set_rx_mode
= tun_net_mclist
,
837 .ndo_set_mac_address
= eth_mac_addr
,
838 .ndo_validate_addr
= eth_validate_addr
,
839 .ndo_select_queue
= tun_select_queue
,
840 #ifdef CONFIG_NET_POLL_CONTROLLER
841 .ndo_poll_controller
= tun_poll_controller
,
845 static int tun_flow_init(struct tun_struct
*tun
)
849 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++)
850 INIT_HLIST_HEAD(&tun
->flows
[i
]);
852 tun
->ageing_time
= TUN_FLOW_EXPIRE
;
853 setup_timer(&tun
->flow_gc_timer
, tun_flow_cleanup
, (unsigned long)tun
);
854 mod_timer(&tun
->flow_gc_timer
,
855 round_jiffies_up(jiffies
+ tun
->ageing_time
));
860 static void tun_flow_uninit(struct tun_struct
*tun
)
862 del_timer_sync(&tun
->flow_gc_timer
);
866 /* Initialize net device. */
867 static void tun_net_init(struct net_device
*dev
)
869 struct tun_struct
*tun
= netdev_priv(dev
);
871 switch (tun
->flags
& TUN_TYPE_MASK
) {
873 dev
->netdev_ops
= &tun_netdev_ops
;
875 /* Point-to-Point TUN Device */
876 dev
->hard_header_len
= 0;
880 /* Zero header length */
881 dev
->type
= ARPHRD_NONE
;
882 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
883 dev
->tx_queue_len
= TUN_READQ_SIZE
; /* We prefer our own queue length */
887 dev
->netdev_ops
= &tap_netdev_ops
;
888 /* Ethernet TAP Device */
890 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
891 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
893 eth_hw_addr_random(dev
);
895 dev
->tx_queue_len
= TUN_READQ_SIZE
; /* We prefer our own queue length */
900 /* Character device part */
903 static unsigned int tun_chr_poll(struct file
*file
, poll_table
*wait
)
905 struct tun_file
*tfile
= file
->private_data
;
906 struct tun_struct
*tun
= __tun_get(tfile
);
908 unsigned int mask
= 0;
913 sk
= tfile
->socket
.sk
;
915 tun_debug(KERN_INFO
, tun
, "tun_chr_poll\n");
917 poll_wait(file
, &tfile
->wq
.wait
, wait
);
919 if (!skb_queue_empty(&sk
->sk_receive_queue
))
920 mask
|= POLLIN
| POLLRDNORM
;
922 if (sock_writeable(sk
) ||
923 (!test_and_set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
) &&
925 mask
|= POLLOUT
| POLLWRNORM
;
927 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
)
934 /* prepad is the amount to reserve at front. len is length after that.
935 * linear is a hint as to how much to copy (usually headers). */
936 static struct sk_buff
*tun_alloc_skb(struct tun_file
*tfile
,
937 size_t prepad
, size_t len
,
938 size_t linear
, int noblock
)
940 struct sock
*sk
= tfile
->socket
.sk
;
944 /* Under a page? Don't bother with paged skb. */
945 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
948 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
953 skb_reserve(skb
, prepad
);
954 skb_put(skb
, linear
);
955 skb
->data_len
= len
- linear
;
956 skb
->len
+= len
- linear
;
961 /* set skb frags from iovec, this can move to core network code for reuse */
962 static int zerocopy_sg_from_iovec(struct sk_buff
*skb
, const struct iovec
*from
,
963 int offset
, size_t count
)
965 int len
= iov_length(from
, count
) - offset
;
966 int copy
= skb_headlen(skb
);
967 int size
, offset1
= 0;
970 /* Skip over from offset */
971 while (count
&& (offset
>= from
->iov_len
)) {
972 offset
-= from
->iov_len
;
977 /* copy up to skb headlen */
978 while (count
&& (copy
> 0)) {
979 size
= min_t(unsigned int, copy
, from
->iov_len
- offset
);
980 if (copy_from_user(skb
->data
+ offset1
, from
->iov_base
+ offset
,
997 struct page
*page
[MAX_SKB_FRAGS
];
1000 unsigned long truesize
;
1002 len
= from
->iov_len
- offset
;
1008 base
= (unsigned long)from
->iov_base
+ offset
;
1009 size
= ((base
& ~PAGE_MASK
) + len
+ ~PAGE_MASK
) >> PAGE_SHIFT
;
1010 if (i
+ size
> MAX_SKB_FRAGS
)
1012 num_pages
= get_user_pages_fast(base
, size
, 0, &page
[i
]);
1013 if (num_pages
!= size
) {
1014 for (i
= 0; i
< num_pages
; i
++)
1018 truesize
= size
* PAGE_SIZE
;
1019 skb
->data_len
+= len
;
1021 skb
->truesize
+= truesize
;
1022 atomic_add(truesize
, &skb
->sk
->sk_wmem_alloc
);
1024 int off
= base
& ~PAGE_MASK
;
1025 int size
= min_t(int, len
, PAGE_SIZE
- off
);
1026 __skb_fill_page_desc(skb
, i
, page
[i
], off
, size
);
1027 skb_shinfo(skb
)->nr_frags
++;
1028 /* increase sk_wmem_alloc */
1039 /* Get packet from user space buffer */
1040 static ssize_t
tun_get_user(struct tun_struct
*tun
, struct tun_file
*tfile
,
1041 void *msg_control
, const struct iovec
*iv
,
1042 size_t total_len
, size_t count
, int noblock
)
1044 struct tun_pi pi
= { 0, cpu_to_be16(ETH_P_IP
) };
1045 struct sk_buff
*skb
;
1046 size_t len
= total_len
, align
= NET_SKB_PAD
;
1047 struct virtio_net_hdr gso
= { 0 };
1050 bool zerocopy
= false;
1054 if (!(tun
->flags
& TUN_NO_PI
)) {
1055 if ((len
-= sizeof(pi
)) > total_len
)
1058 if (memcpy_fromiovecend((void *)&pi
, iv
, 0, sizeof(pi
)))
1060 offset
+= sizeof(pi
);
1063 if (tun
->flags
& TUN_VNET_HDR
) {
1064 if ((len
-= tun
->vnet_hdr_sz
) > total_len
)
1067 if (memcpy_fromiovecend((void *)&gso
, iv
, offset
, sizeof(gso
)))
1070 if ((gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1071 gso
.csum_start
+ gso
.csum_offset
+ 2 > gso
.hdr_len
)
1072 gso
.hdr_len
= gso
.csum_start
+ gso
.csum_offset
+ 2;
1074 if (gso
.hdr_len
> len
)
1076 offset
+= tun
->vnet_hdr_sz
;
1079 if ((tun
->flags
& TUN_TYPE_MASK
) == TUN_TAP_DEV
) {
1080 align
+= NET_IP_ALIGN
;
1081 if (unlikely(len
< ETH_HLEN
||
1082 (gso
.hdr_len
&& gso
.hdr_len
< ETH_HLEN
)))
1090 /* Userspace may produce vectors with count greater than
1091 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1092 * to let the rest of data to be fit in the frags.
1094 if (count
> MAX_SKB_FRAGS
) {
1095 copylen
= iov_length(iv
, count
- MAX_SKB_FRAGS
);
1096 if (copylen
< offset
)
1102 /* There are 256 bytes to be copied in skb, so there is enough
1103 * room for skb expand head in case it is used.
1104 * The rest of the buffer is mapped from userspace.
1106 if (copylen
< gso
.hdr_len
)
1107 copylen
= gso
.hdr_len
;
1109 copylen
= GOODCOPY_LEN
;
1113 skb
= tun_alloc_skb(tfile
, align
, copylen
, gso
.hdr_len
, noblock
);
1115 if (PTR_ERR(skb
) != -EAGAIN
)
1116 tun
->dev
->stats
.rx_dropped
++;
1117 return PTR_ERR(skb
);
1121 err
= zerocopy_sg_from_iovec(skb
, iv
, offset
, count
);
1123 err
= skb_copy_datagram_from_iovec(skb
, 0, iv
, offset
, len
);
1126 tun
->dev
->stats
.rx_dropped
++;
1131 if (gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1132 if (!skb_partial_csum_set(skb
, gso
.csum_start
,
1134 tun
->dev
->stats
.rx_frame_errors
++;
1140 switch (tun
->flags
& TUN_TYPE_MASK
) {
1142 if (tun
->flags
& TUN_NO_PI
) {
1143 switch (skb
->data
[0] & 0xf0) {
1145 pi
.proto
= htons(ETH_P_IP
);
1148 pi
.proto
= htons(ETH_P_IPV6
);
1151 tun
->dev
->stats
.rx_dropped
++;
1157 skb_reset_mac_header(skb
);
1158 skb
->protocol
= pi
.proto
;
1159 skb
->dev
= tun
->dev
;
1162 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
1166 if (gso
.gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
1168 switch (gso
.gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
1169 case VIRTIO_NET_HDR_GSO_TCPV4
:
1170 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1172 case VIRTIO_NET_HDR_GSO_TCPV6
:
1173 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1175 case VIRTIO_NET_HDR_GSO_UDP
:
1176 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1179 tun
->dev
->stats
.rx_frame_errors
++;
1184 if (gso
.gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
1185 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
1187 skb_shinfo(skb
)->gso_size
= gso
.gso_size
;
1188 if (skb_shinfo(skb
)->gso_size
== 0) {
1189 tun
->dev
->stats
.rx_frame_errors
++;
1194 /* Header must be checked, and gso_segs computed. */
1195 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1196 skb_shinfo(skb
)->gso_segs
= 0;
1199 /* copy skb_ubuf_info for callback when skb has no error */
1201 skb_shinfo(skb
)->destructor_arg
= msg_control
;
1202 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
1203 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
1206 skb_reset_network_header(skb
);
1207 rxhash
= skb_get_rxhash(skb
);
1210 tun
->dev
->stats
.rx_packets
++;
1211 tun
->dev
->stats
.rx_bytes
+= len
;
1213 tun_flow_update(tun
, rxhash
, tfile
);
1217 static ssize_t
tun_chr_aio_write(struct kiocb
*iocb
, const struct iovec
*iv
,
1218 unsigned long count
, loff_t pos
)
1220 struct file
*file
= iocb
->ki_filp
;
1221 struct tun_struct
*tun
= tun_get(file
);
1222 struct tun_file
*tfile
= file
->private_data
;
1228 tun_debug(KERN_INFO
, tun
, "tun_chr_write %ld\n", count
);
1230 result
= tun_get_user(tun
, tfile
, NULL
, iv
, iov_length(iv
, count
),
1231 count
, file
->f_flags
& O_NONBLOCK
);
1237 /* Put packet to the user space buffer */
1238 static ssize_t
tun_put_user(struct tun_struct
*tun
,
1239 struct tun_file
*tfile
,
1240 struct sk_buff
*skb
,
1241 const struct iovec
*iv
, int len
)
1243 struct tun_pi pi
= { 0, skb
->protocol
};
1246 if (!(tun
->flags
& TUN_NO_PI
)) {
1247 if ((len
-= sizeof(pi
)) < 0)
1250 if (len
< skb
->len
) {
1251 /* Packet will be striped */
1252 pi
.flags
|= TUN_PKT_STRIP
;
1255 if (memcpy_toiovecend(iv
, (void *) &pi
, 0, sizeof(pi
)))
1257 total
+= sizeof(pi
);
1260 if (tun
->flags
& TUN_VNET_HDR
) {
1261 struct virtio_net_hdr gso
= { 0 }; /* no info leak */
1262 if ((len
-= tun
->vnet_hdr_sz
) < 0)
1265 if (skb_is_gso(skb
)) {
1266 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
1268 /* This is a hint as to how much should be linear. */
1269 gso
.hdr_len
= skb_headlen(skb
);
1270 gso
.gso_size
= sinfo
->gso_size
;
1271 if (sinfo
->gso_type
& SKB_GSO_TCPV4
)
1272 gso
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
1273 else if (sinfo
->gso_type
& SKB_GSO_TCPV6
)
1274 gso
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
1275 else if (sinfo
->gso_type
& SKB_GSO_UDP
)
1276 gso
.gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
1278 pr_err("unexpected GSO type: "
1279 "0x%x, gso_size %d, hdr_len %d\n",
1280 sinfo
->gso_type
, gso
.gso_size
,
1282 print_hex_dump(KERN_ERR
, "tun: ",
1285 min((int)gso
.hdr_len
, 64), true);
1289 if (sinfo
->gso_type
& SKB_GSO_TCP_ECN
)
1290 gso
.gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
1292 gso
.gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
1294 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1295 gso
.flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1296 gso
.csum_start
= skb_checksum_start_offset(skb
);
1297 gso
.csum_offset
= skb
->csum_offset
;
1298 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1299 gso
.flags
= VIRTIO_NET_HDR_F_DATA_VALID
;
1300 } /* else everything is zero */
1302 if (unlikely(memcpy_toiovecend(iv
, (void *)&gso
, total
,
1305 total
+= tun
->vnet_hdr_sz
;
1308 len
= min_t(int, skb
->len
, len
);
1310 skb_copy_datagram_const_iovec(skb
, 0, iv
, total
, len
);
1313 tun
->dev
->stats
.tx_packets
++;
1314 tun
->dev
->stats
.tx_bytes
+= len
;
1319 static ssize_t
tun_do_read(struct tun_struct
*tun
, struct tun_file
*tfile
,
1320 struct kiocb
*iocb
, const struct iovec
*iv
,
1321 ssize_t len
, int noblock
)
1323 DECLARE_WAITQUEUE(wait
, current
);
1324 struct sk_buff
*skb
;
1327 tun_debug(KERN_INFO
, tun
, "tun_do_read\n");
1329 if (unlikely(!noblock
))
1330 add_wait_queue(&tfile
->wq
.wait
, &wait
);
1332 current
->state
= TASK_INTERRUPTIBLE
;
1334 /* Read frames from the queue */
1335 if (!(skb
= skb_dequeue(&tfile
->socket
.sk
->sk_receive_queue
))) {
1340 if (signal_pending(current
)) {
1344 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
) {
1349 /* Nothing to read, let's sleep */
1354 ret
= tun_put_user(tun
, tfile
, skb
, iv
, len
);
1359 current
->state
= TASK_RUNNING
;
1360 if (unlikely(!noblock
))
1361 remove_wait_queue(&tfile
->wq
.wait
, &wait
);
1366 static ssize_t
tun_chr_aio_read(struct kiocb
*iocb
, const struct iovec
*iv
,
1367 unsigned long count
, loff_t pos
)
1369 struct file
*file
= iocb
->ki_filp
;
1370 struct tun_file
*tfile
= file
->private_data
;
1371 struct tun_struct
*tun
= __tun_get(tfile
);
1376 len
= iov_length(iv
, count
);
1382 ret
= tun_do_read(tun
, tfile
, iocb
, iv
, len
,
1383 file
->f_flags
& O_NONBLOCK
);
1384 ret
= min_t(ssize_t
, ret
, len
);
1390 static void tun_free_netdev(struct net_device
*dev
)
1392 struct tun_struct
*tun
= netdev_priv(dev
);
1394 BUG_ON(!(list_empty(&tun
->disabled
)));
1395 tun_flow_uninit(tun
);
1396 security_tun_dev_free_security(tun
->security
);
1400 static void tun_setup(struct net_device
*dev
)
1402 struct tun_struct
*tun
= netdev_priv(dev
);
1404 tun
->owner
= INVALID_UID
;
1405 tun
->group
= INVALID_GID
;
1407 dev
->ethtool_ops
= &tun_ethtool_ops
;
1408 dev
->destructor
= tun_free_netdev
;
1411 /* Trivial set of netlink ops to allow deleting tun or tap
1412 * device with netlink.
1414 static int tun_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1419 static struct rtnl_link_ops tun_link_ops __read_mostly
= {
1421 .priv_size
= sizeof(struct tun_struct
),
1423 .validate
= tun_validate
,
1426 static void tun_sock_write_space(struct sock
*sk
)
1428 struct tun_file
*tfile
;
1429 wait_queue_head_t
*wqueue
;
1431 if (!sock_writeable(sk
))
1434 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
1437 wqueue
= sk_sleep(sk
);
1438 if (wqueue
&& waitqueue_active(wqueue
))
1439 wake_up_interruptible_sync_poll(wqueue
, POLLOUT
|
1440 POLLWRNORM
| POLLWRBAND
);
1442 tfile
= container_of(sk
, struct tun_file
, sk
);
1443 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_OUT
);
1446 static int tun_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1447 struct msghdr
*m
, size_t total_len
)
1450 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
1451 struct tun_struct
*tun
= __tun_get(tfile
);
1455 ret
= tun_get_user(tun
, tfile
, m
->msg_control
, m
->msg_iov
, total_len
,
1456 m
->msg_iovlen
, m
->msg_flags
& MSG_DONTWAIT
);
1462 static int tun_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1463 struct msghdr
*m
, size_t total_len
,
1466 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
1467 struct tun_struct
*tun
= __tun_get(tfile
);
1473 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
))
1475 ret
= tun_do_read(tun
, tfile
, iocb
, m
->msg_iov
, total_len
,
1476 flags
& MSG_DONTWAIT
);
1477 if (ret
> total_len
) {
1478 m
->msg_flags
|= MSG_TRUNC
;
1479 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
1485 static int tun_release(struct socket
*sock
)
1492 /* Ops structure to mimic raw sockets with tun */
1493 static const struct proto_ops tun_socket_ops
= {
1494 .sendmsg
= tun_sendmsg
,
1495 .recvmsg
= tun_recvmsg
,
1496 .release
= tun_release
,
1499 static struct proto tun_proto
= {
1501 .owner
= THIS_MODULE
,
1502 .obj_size
= sizeof(struct tun_file
),
1505 static int tun_flags(struct tun_struct
*tun
)
1509 if (tun
->flags
& TUN_TUN_DEV
)
1514 if (tun
->flags
& TUN_NO_PI
)
1517 /* This flag has no real effect. We track the value for backwards
1520 if (tun
->flags
& TUN_ONE_QUEUE
)
1521 flags
|= IFF_ONE_QUEUE
;
1523 if (tun
->flags
& TUN_VNET_HDR
)
1524 flags
|= IFF_VNET_HDR
;
1526 if (tun
->flags
& TUN_TAP_MQ
)
1527 flags
|= IFF_MULTI_QUEUE
;
1532 static ssize_t
tun_show_flags(struct device
*dev
, struct device_attribute
*attr
,
1535 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1536 return sprintf(buf
, "0x%x\n", tun_flags(tun
));
1539 static ssize_t
tun_show_owner(struct device
*dev
, struct device_attribute
*attr
,
1542 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1543 return uid_valid(tun
->owner
)?
1544 sprintf(buf
, "%u\n",
1545 from_kuid_munged(current_user_ns(), tun
->owner
)):
1546 sprintf(buf
, "-1\n");
1549 static ssize_t
tun_show_group(struct device
*dev
, struct device_attribute
*attr
,
1552 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1553 return gid_valid(tun
->group
) ?
1554 sprintf(buf
, "%u\n",
1555 from_kgid_munged(current_user_ns(), tun
->group
)):
1556 sprintf(buf
, "-1\n");
1559 static DEVICE_ATTR(tun_flags
, 0444, tun_show_flags
, NULL
);
1560 static DEVICE_ATTR(owner
, 0444, tun_show_owner
, NULL
);
1561 static DEVICE_ATTR(group
, 0444, tun_show_group
, NULL
);
1563 static int tun_set_iff(struct net
*net
, struct file
*file
, struct ifreq
*ifr
)
1565 struct tun_struct
*tun
;
1566 struct tun_file
*tfile
= file
->private_data
;
1567 struct net_device
*dev
;
1570 if (tfile
->detached
)
1573 dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
1575 if (ifr
->ifr_flags
& IFF_TUN_EXCL
)
1577 if ((ifr
->ifr_flags
& IFF_TUN
) && dev
->netdev_ops
== &tun_netdev_ops
)
1578 tun
= netdev_priv(dev
);
1579 else if ((ifr
->ifr_flags
& IFF_TAP
) && dev
->netdev_ops
== &tap_netdev_ops
)
1580 tun
= netdev_priv(dev
);
1584 if (tun_not_capable(tun
))
1586 err
= security_tun_dev_open(tun
->security
);
1590 err
= tun_attach(tun
, file
);
1594 if (tun
->flags
& TUN_TAP_MQ
&&
1595 (tun
->numqueues
+ tun
->numdisabled
> 1))
1600 unsigned long flags
= 0;
1601 int queues
= ifr
->ifr_flags
& IFF_MULTI_QUEUE
?
1604 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1606 err
= security_tun_dev_create();
1611 if (ifr
->ifr_flags
& IFF_TUN
) {
1613 flags
|= TUN_TUN_DEV
;
1615 } else if (ifr
->ifr_flags
& IFF_TAP
) {
1617 flags
|= TUN_TAP_DEV
;
1623 name
= ifr
->ifr_name
;
1625 dev
= alloc_netdev_mqs(sizeof(struct tun_struct
), name
,
1626 tun_setup
, queues
, queues
);
1631 dev_net_set(dev
, net
);
1632 dev
->rtnl_link_ops
= &tun_link_ops
;
1634 tun
= netdev_priv(dev
);
1637 tun
->txflt
.count
= 0;
1638 tun
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
1640 tun
->filter_attached
= false;
1641 tun
->sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
1643 spin_lock_init(&tun
->lock
);
1645 err
= security_tun_dev_alloc_security(&tun
->security
);
1651 err
= tun_flow_init(tun
);
1655 dev
->hw_features
= NETIF_F_SG
| NETIF_F_FRAGLIST
|
1657 dev
->features
= dev
->hw_features
;
1659 INIT_LIST_HEAD(&tun
->disabled
);
1660 err
= tun_attach(tun
, file
);
1664 err
= register_netdevice(tun
->dev
);
1668 if (device_create_file(&tun
->dev
->dev
, &dev_attr_tun_flags
) ||
1669 device_create_file(&tun
->dev
->dev
, &dev_attr_owner
) ||
1670 device_create_file(&tun
->dev
->dev
, &dev_attr_group
))
1671 pr_err("Failed to create tun sysfs files\n");
1674 netif_carrier_on(tun
->dev
);
1676 tun_debug(KERN_INFO
, tun
, "tun_set_iff\n");
1678 if (ifr
->ifr_flags
& IFF_NO_PI
)
1679 tun
->flags
|= TUN_NO_PI
;
1681 tun
->flags
&= ~TUN_NO_PI
;
1683 /* This flag has no real effect. We track the value for backwards
1686 if (ifr
->ifr_flags
& IFF_ONE_QUEUE
)
1687 tun
->flags
|= TUN_ONE_QUEUE
;
1689 tun
->flags
&= ~TUN_ONE_QUEUE
;
1691 if (ifr
->ifr_flags
& IFF_VNET_HDR
)
1692 tun
->flags
|= TUN_VNET_HDR
;
1694 tun
->flags
&= ~TUN_VNET_HDR
;
1696 if (ifr
->ifr_flags
& IFF_MULTI_QUEUE
)
1697 tun
->flags
|= TUN_TAP_MQ
;
1699 tun
->flags
&= ~TUN_TAP_MQ
;
1701 /* Make sure persistent devices do not get stuck in
1704 if (netif_running(tun
->dev
))
1705 netif_tx_wake_all_queues(tun
->dev
);
1707 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
1715 static void tun_get_iff(struct net
*net
, struct tun_struct
*tun
,
1718 tun_debug(KERN_INFO
, tun
, "tun_get_iff\n");
1720 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
1722 ifr
->ifr_flags
= tun_flags(tun
);
1726 /* This is like a cut-down ethtool ops, except done via tun fd so no
1727 * privs required. */
1728 static int set_offload(struct tun_struct
*tun
, unsigned long arg
)
1730 netdev_features_t features
= 0;
1732 if (arg
& TUN_F_CSUM
) {
1733 features
|= NETIF_F_HW_CSUM
;
1736 if (arg
& (TUN_F_TSO4
|TUN_F_TSO6
)) {
1737 if (arg
& TUN_F_TSO_ECN
) {
1738 features
|= NETIF_F_TSO_ECN
;
1739 arg
&= ~TUN_F_TSO_ECN
;
1741 if (arg
& TUN_F_TSO4
)
1742 features
|= NETIF_F_TSO
;
1743 if (arg
& TUN_F_TSO6
)
1744 features
|= NETIF_F_TSO6
;
1745 arg
&= ~(TUN_F_TSO4
|TUN_F_TSO6
);
1748 if (arg
& TUN_F_UFO
) {
1749 features
|= NETIF_F_UFO
;
1754 /* This gives the user a way to test for new features in future by
1755 * trying to set them. */
1759 tun
->set_features
= features
;
1760 netdev_update_features(tun
->dev
);
1765 static void tun_detach_filter(struct tun_struct
*tun
, int n
)
1768 struct tun_file
*tfile
;
1770 for (i
= 0; i
< n
; i
++) {
1771 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1772 sk_detach_filter(tfile
->socket
.sk
);
1775 tun
->filter_attached
= false;
1778 static int tun_attach_filter(struct tun_struct
*tun
)
1781 struct tun_file
*tfile
;
1783 for (i
= 0; i
< tun
->numqueues
; i
++) {
1784 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1785 ret
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
1787 tun_detach_filter(tun
, i
);
1792 tun
->filter_attached
= true;
1796 static void tun_set_sndbuf(struct tun_struct
*tun
)
1798 struct tun_file
*tfile
;
1801 for (i
= 0; i
< tun
->numqueues
; i
++) {
1802 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1803 tfile
->socket
.sk
->sk_sndbuf
= tun
->sndbuf
;
1807 static int tun_set_queue(struct file
*file
, struct ifreq
*ifr
)
1809 struct tun_file
*tfile
= file
->private_data
;
1810 struct tun_struct
*tun
;
1815 if (ifr
->ifr_flags
& IFF_ATTACH_QUEUE
) {
1816 tun
= tfile
->detached
;
1821 ret
= security_tun_dev_attach_queue(tun
->security
);
1824 ret
= tun_attach(tun
, file
);
1825 } else if (ifr
->ifr_flags
& IFF_DETACH_QUEUE
) {
1826 tun
= rtnl_dereference(tfile
->tun
);
1827 if (!tun
|| !(tun
->flags
& TUN_TAP_MQ
) || tfile
->detached
)
1830 __tun_detach(tfile
, false);
1839 static long __tun_chr_ioctl(struct file
*file
, unsigned int cmd
,
1840 unsigned long arg
, int ifreq_len
)
1842 struct tun_file
*tfile
= file
->private_data
;
1843 struct tun_struct
*tun
;
1844 void __user
* argp
= (void __user
*)arg
;
1852 if (cmd
== TUNSETIFF
|| cmd
== TUNSETQUEUE
|| _IOC_TYPE(cmd
) == 0x89) {
1853 if (copy_from_user(&ifr
, argp
, ifreq_len
))
1856 memset(&ifr
, 0, sizeof(ifr
));
1858 if (cmd
== TUNGETFEATURES
) {
1859 /* Currently this just means: "what IFF flags are valid?".
1860 * This is needed because we never checked for invalid flags on
1862 return put_user(IFF_TUN
| IFF_TAP
| IFF_NO_PI
| IFF_ONE_QUEUE
|
1863 IFF_VNET_HDR
| IFF_MULTI_QUEUE
,
1864 (unsigned int __user
*)argp
);
1865 } else if (cmd
== TUNSETQUEUE
)
1866 return tun_set_queue(file
, &ifr
);
1871 tun
= __tun_get(tfile
);
1872 if (cmd
== TUNSETIFF
&& !tun
) {
1873 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
1875 ret
= tun_set_iff(tfile
->net
, file
, &ifr
);
1880 if (copy_to_user(argp
, &ifr
, ifreq_len
))
1889 tun_debug(KERN_INFO
, tun
, "tun_chr_ioctl cmd %u\n", cmd
);
1894 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
1896 if (copy_to_user(argp
, &ifr
, ifreq_len
))
1901 /* Disable/Enable checksum */
1903 /* [unimplemented] */
1904 tun_debug(KERN_INFO
, tun
, "ignored: set checksum %s\n",
1905 arg
? "disabled" : "enabled");
1909 /* Disable/Enable persist mode. Keep an extra reference to the
1910 * module to prevent the module being unprobed.
1912 if (arg
&& !(tun
->flags
& TUN_PERSIST
)) {
1913 tun
->flags
|= TUN_PERSIST
;
1914 __module_get(THIS_MODULE
);
1916 if (!arg
&& (tun
->flags
& TUN_PERSIST
)) {
1917 tun
->flags
&= ~TUN_PERSIST
;
1918 module_put(THIS_MODULE
);
1921 tun_debug(KERN_INFO
, tun
, "persist %s\n",
1922 arg
? "enabled" : "disabled");
1926 /* Set owner of the device */
1927 owner
= make_kuid(current_user_ns(), arg
);
1928 if (!uid_valid(owner
)) {
1933 tun_debug(KERN_INFO
, tun
, "owner set to %u\n",
1934 from_kuid(&init_user_ns
, tun
->owner
));
1938 /* Set group of the device */
1939 group
= make_kgid(current_user_ns(), arg
);
1940 if (!gid_valid(group
)) {
1945 tun_debug(KERN_INFO
, tun
, "group set to %u\n",
1946 from_kgid(&init_user_ns
, tun
->group
));
1950 /* Only allow setting the type when the interface is down */
1951 if (tun
->dev
->flags
& IFF_UP
) {
1952 tun_debug(KERN_INFO
, tun
,
1953 "Linktype set failed because interface is up\n");
1956 tun
->dev
->type
= (int) arg
;
1957 tun_debug(KERN_INFO
, tun
, "linktype set to %d\n",
1969 ret
= set_offload(tun
, arg
);
1972 case TUNSETTXFILTER
:
1973 /* Can be set only for TAPs */
1975 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
1977 ret
= update_filter(&tun
->txflt
, (void __user
*)arg
);
1981 /* Get hw address */
1982 memcpy(ifr
.ifr_hwaddr
.sa_data
, tun
->dev
->dev_addr
, ETH_ALEN
);
1983 ifr
.ifr_hwaddr
.sa_family
= tun
->dev
->type
;
1984 if (copy_to_user(argp
, &ifr
, ifreq_len
))
1989 /* Set hw address */
1990 tun_debug(KERN_DEBUG
, tun
, "set hw address: %pM\n",
1991 ifr
.ifr_hwaddr
.sa_data
);
1993 ret
= dev_set_mac_address(tun
->dev
, &ifr
.ifr_hwaddr
);
1997 sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
1998 if (copy_to_user(argp
, &sndbuf
, sizeof(sndbuf
)))
2003 if (copy_from_user(&sndbuf
, argp
, sizeof(sndbuf
))) {
2008 tun
->sndbuf
= sndbuf
;
2009 tun_set_sndbuf(tun
);
2012 case TUNGETVNETHDRSZ
:
2013 vnet_hdr_sz
= tun
->vnet_hdr_sz
;
2014 if (copy_to_user(argp
, &vnet_hdr_sz
, sizeof(vnet_hdr_sz
)))
2018 case TUNSETVNETHDRSZ
:
2019 if (copy_from_user(&vnet_hdr_sz
, argp
, sizeof(vnet_hdr_sz
))) {
2023 if (vnet_hdr_sz
< (int)sizeof(struct virtio_net_hdr
)) {
2028 tun
->vnet_hdr_sz
= vnet_hdr_sz
;
2031 case TUNATTACHFILTER
:
2032 /* Can be set only for TAPs */
2034 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2037 if (copy_from_user(&tun
->fprog
, argp
, sizeof(tun
->fprog
)))
2040 ret
= tun_attach_filter(tun
);
2043 case TUNDETACHFILTER
:
2044 /* Can be set only for TAPs */
2046 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2049 tun_detach_filter(tun
, tun
->numqueues
);
2064 static long tun_chr_ioctl(struct file
*file
,
2065 unsigned int cmd
, unsigned long arg
)
2067 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof (struct ifreq
));
2070 #ifdef CONFIG_COMPAT
2071 static long tun_chr_compat_ioctl(struct file
*file
,
2072 unsigned int cmd
, unsigned long arg
)
2077 case TUNSETTXFILTER
:
2082 arg
= (unsigned long)compat_ptr(arg
);
2085 arg
= (compat_ulong_t
)arg
;
2090 * compat_ifreq is shorter than ifreq, so we must not access beyond
2091 * the end of that structure. All fields that are used in this
2092 * driver are compatible though, we don't need to convert the
2095 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof(struct compat_ifreq
));
2097 #endif /* CONFIG_COMPAT */
2099 static int tun_chr_fasync(int fd
, struct file
*file
, int on
)
2101 struct tun_file
*tfile
= file
->private_data
;
2104 if ((ret
= fasync_helper(fd
, file
, on
, &tfile
->fasync
)) < 0)
2108 ret
= __f_setown(file
, task_pid(current
), PIDTYPE_PID
, 0);
2111 tfile
->flags
|= TUN_FASYNC
;
2113 tfile
->flags
&= ~TUN_FASYNC
;
2119 static int tun_chr_open(struct inode
*inode
, struct file
* file
)
2121 struct tun_file
*tfile
;
2123 DBG1(KERN_INFO
, "tunX: tun_chr_open\n");
2125 tfile
= (struct tun_file
*)sk_alloc(&init_net
, AF_UNSPEC
, GFP_KERNEL
,
2129 rcu_assign_pointer(tfile
->tun
, NULL
);
2130 tfile
->net
= get_net(current
->nsproxy
->net_ns
);
2133 rcu_assign_pointer(tfile
->socket
.wq
, &tfile
->wq
);
2134 init_waitqueue_head(&tfile
->wq
.wait
);
2136 tfile
->socket
.file
= file
;
2137 tfile
->socket
.ops
= &tun_socket_ops
;
2139 sock_init_data(&tfile
->socket
, &tfile
->sk
);
2140 sk_change_net(&tfile
->sk
, tfile
->net
);
2142 tfile
->sk
.sk_write_space
= tun_sock_write_space
;
2143 tfile
->sk
.sk_sndbuf
= INT_MAX
;
2145 file
->private_data
= tfile
;
2146 set_bit(SOCK_EXTERNALLY_ALLOCATED
, &tfile
->socket
.flags
);
2147 INIT_LIST_HEAD(&tfile
->next
);
2152 static int tun_chr_close(struct inode
*inode
, struct file
*file
)
2154 struct tun_file
*tfile
= file
->private_data
;
2155 struct net
*net
= tfile
->net
;
2157 tun_detach(tfile
, true);
2163 static const struct file_operations tun_fops
= {
2164 .owner
= THIS_MODULE
,
2165 .llseek
= no_llseek
,
2166 .read
= do_sync_read
,
2167 .aio_read
= tun_chr_aio_read
,
2168 .write
= do_sync_write
,
2169 .aio_write
= tun_chr_aio_write
,
2170 .poll
= tun_chr_poll
,
2171 .unlocked_ioctl
= tun_chr_ioctl
,
2172 #ifdef CONFIG_COMPAT
2173 .compat_ioctl
= tun_chr_compat_ioctl
,
2175 .open
= tun_chr_open
,
2176 .release
= tun_chr_close
,
2177 .fasync
= tun_chr_fasync
2180 static struct miscdevice tun_miscdev
= {
2183 .nodename
= "net/tun",
2187 /* ethtool interface */
2189 static int tun_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2192 cmd
->advertising
= 0;
2193 ethtool_cmd_speed_set(cmd
, SPEED_10
);
2194 cmd
->duplex
= DUPLEX_FULL
;
2195 cmd
->port
= PORT_TP
;
2196 cmd
->phy_address
= 0;
2197 cmd
->transceiver
= XCVR_INTERNAL
;
2198 cmd
->autoneg
= AUTONEG_DISABLE
;
2204 static void tun_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2206 struct tun_struct
*tun
= netdev_priv(dev
);
2208 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
2209 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
2211 switch (tun
->flags
& TUN_TYPE_MASK
) {
2213 strlcpy(info
->bus_info
, "tun", sizeof(info
->bus_info
));
2216 strlcpy(info
->bus_info
, "tap", sizeof(info
->bus_info
));
2221 static u32
tun_get_msglevel(struct net_device
*dev
)
2224 struct tun_struct
*tun
= netdev_priv(dev
);
2231 static void tun_set_msglevel(struct net_device
*dev
, u32 value
)
2234 struct tun_struct
*tun
= netdev_priv(dev
);
2239 static const struct ethtool_ops tun_ethtool_ops
= {
2240 .get_settings
= tun_get_settings
,
2241 .get_drvinfo
= tun_get_drvinfo
,
2242 .get_msglevel
= tun_get_msglevel
,
2243 .set_msglevel
= tun_set_msglevel
,
2244 .get_link
= ethtool_op_get_link
,
2248 static int __init
tun_init(void)
2252 pr_info("%s, %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2253 pr_info("%s\n", DRV_COPYRIGHT
);
2255 ret
= rtnl_link_register(&tun_link_ops
);
2257 pr_err("Can't register link_ops\n");
2261 ret
= misc_register(&tun_miscdev
);
2263 pr_err("Can't register misc device %d\n", TUN_MINOR
);
2268 rtnl_link_unregister(&tun_link_ops
);
2273 static void tun_cleanup(void)
2275 misc_deregister(&tun_miscdev
);
2276 rtnl_link_unregister(&tun_link_ops
);
2279 /* Get an underlying socket object from tun file. Returns error unless file is
2280 * attached to a device. The returned object works like a packet socket, it
2281 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
2282 * holding a reference to the file for as long as the socket is in use. */
2283 struct socket
*tun_get_socket(struct file
*file
)
2285 struct tun_file
*tfile
;
2286 if (file
->f_op
!= &tun_fops
)
2287 return ERR_PTR(-EINVAL
);
2288 tfile
= file
->private_data
;
2290 return ERR_PTR(-EBADFD
);
2291 return &tfile
->socket
;
2293 EXPORT_SYMBOL_GPL(tun_get_socket
);
2295 module_init(tun_init
);
2296 module_exit(tun_cleanup
);
2297 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
2298 MODULE_AUTHOR(DRV_COPYRIGHT
);
2299 MODULE_LICENSE("GPL");
2300 MODULE_ALIAS_MISCDEV(TUN_MINOR
);
2301 MODULE_ALIAS("devname:net/tun");