2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
22 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
23 * Add TUNSETLINK ioctl to set the link encapsulation
25 * Mark Smith <markzzzsmith@yahoo.com.au>
26 * Use eth_random_addr() for tap MAC address.
28 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
29 * Fixes in packet dropping, queue length setting and queue wakeup.
30 * Increased default tx queue length.
34 * Daniel Podlejski <underley@underley.eu.org>
35 * Modifications for 2.3.99-pre5 kernel.
39 * KwnagHyun Kim <kh0304.kim@samsung.com> 2015/07/08
40 * Baesung Park <baesung.park@samsung.com> 2015/07/08
41 * Vignesh Saravanaperumal <vignesh1.s@samsung.com> 2015/07/08
42 * Add codes to share UID/PID information
46 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 #define DRV_NAME "tun"
49 #define DRV_VERSION "1.6"
50 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
51 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
53 #include <linux/module.h>
54 #include <linux/errno.h>
55 #include <linux/kernel.h>
56 #include <linux/major.h>
57 #include <linux/slab.h>
58 #include <linux/poll.h>
59 #include <linux/fcntl.h>
60 #include <linux/init.h>
61 #include <linux/skbuff.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/miscdevice.h>
65 #include <linux/ethtool.h>
66 #include <linux/rtnetlink.h>
67 #include <linux/compat.h>
69 #include <linux/if_arp.h>
70 #include <linux/if_ether.h>
71 #include <linux/if_tun.h>
72 #include <linux/crc32.h>
73 #include <linux/nsproxy.h>
74 #include <linux/virtio_net.h>
75 #include <linux/rcupdate.h>
76 #include <net/net_namespace.h>
77 #include <net/netns/generic.h>
78 #include <net/rtnetlink.h>
80 // ------------- START of KNOX_VPN ------------------//
81 #include <linux/types.h>
82 #include <linux/udp.h>
83 #include <linux/tcp.h>
87 #define META_MARK_BASE_LOWER 100
88 #define META_MARK_BASE_UPPER 500
89 // ------------- END of KNOX_VPN -------------------//
91 #include <asm/uaccess.h>
93 /* Uncomment to enable debugging */
94 /* #define TUN_DEBUG 1 */
99 #define tun_debug(level, tun, fmt, args...) \
102 netdev_printk(level, tun->dev, fmt, ##args); \
104 #define DBG1(level, fmt, args...) \
107 printk(level fmt, ##args); \
110 #define tun_debug(level, tun, fmt, args...) \
113 netdev_printk(level, tun->dev, fmt, ##args); \
115 #define DBG1(level, fmt, args...) \
118 printk(level fmt, ##args); \
122 // ------------- START of KNOX_VPN ------------------//
123 /* The KNOX framework marks packets intended to a VPN client for special processing differently.
124 * The marked packets hit special IP table rules and are routed back to user space using the TUN driver
125 * for policy based treatment by the VPN client.
126 * Some VPN clients can make more intelligent decisions based on the UID/PID information.
127 * For such clients, we mark packets to be in the range >= META_MARK_BASE_LOWER and < META_MARK_BASE_UPPER.
128 * When such packets are seen, we update the IP headers to carry UID/PID information
129 * in the IP options - all other packets are ignored.
130 * Also, see the comments above the individual steps taken in the code for details
133 /* Metadata header structure */
135 struct knox_meta_param
{
140 #define TUN_META_HDR_SZ sizeof(struct knox_meta_param)
141 #define TUN_META_MARK_OFFSET offsetof(struct knox_meta_param, uid)
142 // ------------- END of KNOX_VPN -------------------//
144 #define GOODCOPY_LEN 128
146 #define FLT_EXACT_COUNT 8
148 unsigned int count
; /* Number of addrs. Zero means disabled */
149 u32 mask
[2]; /* Mask of the hashed addrs */
150 unsigned char addr
[FLT_EXACT_COUNT
][ETH_ALEN
];
153 /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
154 * the netdevice to be fit in one page. So we can make sure the success of
155 * memory allocation. TODO: increase the limit. */
156 #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
157 #define MAX_TAP_FLOWS 4096
159 #define TUN_FLOW_EXPIRE (3 * HZ)
161 /* A tun_file connects an open character device to a tuntap netdevice. It
162 * also contains all socket related strctures (except sock_fprog and tap_filter)
163 * to serve as one transmit queue for tuntap device. The sock_fprog and
164 * tap_filter were kept in tun_struct since they were used for filtering for the
165 * netdevice not for a specific queue (at least I didn't see the requirement for
169 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
170 * other can only be read while rcu_read_lock or rtnl_lock is held.
174 struct socket socket
;
176 struct tun_struct __rcu
*tun
;
178 struct fasync_struct
*fasync
;
179 /* only used for fasnyc */
182 struct list_head next
;
183 struct tun_struct
*detached
;
186 struct tun_flow_entry
{
187 struct hlist_node hash_link
;
189 struct tun_struct
*tun
;
193 unsigned long updated
;
196 #define TUN_NUM_FLOW_ENTRIES 1024
198 /* Since the socket were moved to tun_file, to preserve the behavior of persist
199 * device, socket filter, sndbuf and vnet header size were restore when the
200 * file were attached to a persist device.
203 struct tun_file __rcu
*tfiles
[MAX_TAP_QUEUES
];
204 unsigned int numqueues
;
209 struct net_device
*dev
;
210 netdev_features_t set_features
;
211 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
212 NETIF_F_TSO6|NETIF_F_UFO)
216 struct tap_filter txflt
;
217 struct sock_fprog fprog
;
218 /* protected by rtnl lock */
219 bool filter_attached
;
224 struct hlist_head flows
[TUN_NUM_FLOW_ENTRIES
];
225 struct timer_list flow_gc_timer
;
226 unsigned long ageing_time
;
227 unsigned int numdisabled
;
228 struct list_head disabled
;
233 static inline u32
tun_hashfn(u32 rxhash
)
235 return rxhash
& 0x3ff;
238 static struct tun_flow_entry
*tun_flow_find(struct hlist_head
*head
, u32 rxhash
)
240 struct tun_flow_entry
*e
;
242 hlist_for_each_entry_rcu(e
, head
, hash_link
) {
243 if (e
->rxhash
== rxhash
)
249 static struct tun_flow_entry
*tun_flow_create(struct tun_struct
*tun
,
250 struct hlist_head
*head
,
251 u32 rxhash
, u16 queue_index
)
253 struct tun_flow_entry
*e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
256 tun_debug(KERN_INFO
, tun
, "create flow: hash %u index %u\n",
257 rxhash
, queue_index
);
258 e
->updated
= jiffies
;
260 e
->queue_index
= queue_index
;
262 hlist_add_head_rcu(&e
->hash_link
, head
);
268 static void tun_flow_delete(struct tun_struct
*tun
, struct tun_flow_entry
*e
)
270 tun_debug(KERN_INFO
, tun
, "delete flow: hash %u index %u\n",
271 e
->rxhash
, e
->queue_index
);
272 hlist_del_rcu(&e
->hash_link
);
277 static void tun_flow_flush(struct tun_struct
*tun
)
281 spin_lock_bh(&tun
->lock
);
282 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
283 struct tun_flow_entry
*e
;
284 struct hlist_node
*n
;
286 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
)
287 tun_flow_delete(tun
, e
);
289 spin_unlock_bh(&tun
->lock
);
292 static void tun_flow_delete_by_queue(struct tun_struct
*tun
, u16 queue_index
)
296 spin_lock_bh(&tun
->lock
);
297 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
298 struct tun_flow_entry
*e
;
299 struct hlist_node
*n
;
301 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
302 if (e
->queue_index
== queue_index
)
303 tun_flow_delete(tun
, e
);
306 spin_unlock_bh(&tun
->lock
);
309 static void tun_flow_cleanup(unsigned long data
)
311 struct tun_struct
*tun
= (struct tun_struct
*)data
;
312 unsigned long delay
= tun
->ageing_time
;
313 unsigned long next_timer
= jiffies
+ delay
;
314 unsigned long count
= 0;
317 tun_debug(KERN_INFO
, tun
, "tun_flow_cleanup\n");
319 spin_lock_bh(&tun
->lock
);
320 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
321 struct tun_flow_entry
*e
;
322 struct hlist_node
*n
;
324 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
325 unsigned long this_timer
;
327 this_timer
= e
->updated
+ delay
;
328 if (time_before_eq(this_timer
, jiffies
))
329 tun_flow_delete(tun
, e
);
330 else if (time_before(this_timer
, next_timer
))
331 next_timer
= this_timer
;
336 mod_timer(&tun
->flow_gc_timer
, round_jiffies_up(next_timer
));
337 spin_unlock_bh(&tun
->lock
);
340 static void tun_flow_update(struct tun_struct
*tun
, u32 rxhash
,
341 struct tun_file
*tfile
)
343 struct hlist_head
*head
;
344 struct tun_flow_entry
*e
;
345 unsigned long delay
= tun
->ageing_time
;
346 u16 queue_index
= tfile
->queue_index
;
351 head
= &tun
->flows
[tun_hashfn(rxhash
)];
355 /* We may get a very small possibility of OOO during switching, not
356 * worth to optimize.*/
357 if (tun
->numqueues
== 1 || tfile
->detached
)
360 e
= tun_flow_find(head
, rxhash
);
362 /* TODO: keep queueing to old queue until it's empty? */
363 e
->queue_index
= queue_index
;
364 e
->updated
= jiffies
;
366 spin_lock_bh(&tun
->lock
);
367 if (!tun_flow_find(head
, rxhash
) &&
368 tun
->flow_count
< MAX_TAP_FLOWS
)
369 tun_flow_create(tun
, head
, rxhash
, queue_index
);
371 if (!timer_pending(&tun
->flow_gc_timer
))
372 mod_timer(&tun
->flow_gc_timer
,
373 round_jiffies_up(jiffies
+ delay
));
374 spin_unlock_bh(&tun
->lock
);
381 /* We try to identify a flow through its rxhash first. The reason that
382 * we do not check rxq no. is becuase some cards(e.g 82599), chooses
383 * the rxq based on the txq where the last packet of the flow comes. As
384 * the userspace application move between processors, we may get a
385 * different rxq no. here. If we could not get rxhash, then we would
386 * hope the rxq no. may help here.
388 static u16
tun_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
390 struct tun_struct
*tun
= netdev_priv(dev
);
391 struct tun_flow_entry
*e
;
396 numqueues
= ACCESS_ONCE(tun
->numqueues
);
398 txq
= skb_get_rxhash(skb
);
400 e
= tun_flow_find(&tun
->flows
[tun_hashfn(txq
)], txq
);
402 txq
= e
->queue_index
;
404 /* use multiply and shift instead of expensive divide */
405 txq
= ((u64
)txq
* numqueues
) >> 32;
406 } else if (likely(skb_rx_queue_recorded(skb
))) {
407 txq
= skb_get_rx_queue(skb
);
408 while (unlikely(txq
>= numqueues
))
416 static inline bool tun_not_capable(struct tun_struct
*tun
)
418 const struct cred
*cred
= current_cred();
419 struct net
*net
= dev_net(tun
->dev
);
421 return ((uid_valid(tun
->owner
) && !uid_eq(cred
->euid
, tun
->owner
)) ||
422 (gid_valid(tun
->group
) && !in_egroup_p(tun
->group
))) &&
423 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
);
426 static void tun_set_real_num_queues(struct tun_struct
*tun
)
428 netif_set_real_num_tx_queues(tun
->dev
, tun
->numqueues
);
429 netif_set_real_num_rx_queues(tun
->dev
, tun
->numqueues
);
432 static void tun_disable_queue(struct tun_struct
*tun
, struct tun_file
*tfile
)
434 tfile
->detached
= tun
;
435 list_add_tail(&tfile
->next
, &tun
->disabled
);
439 static struct tun_struct
*tun_enable_queue(struct tun_file
*tfile
)
441 struct tun_struct
*tun
= tfile
->detached
;
443 tfile
->detached
= NULL
;
444 list_del_init(&tfile
->next
);
449 static void __tun_detach(struct tun_file
*tfile
, bool clean
)
451 struct tun_file
*ntfile
;
452 struct tun_struct
*tun
;
454 tun
= rtnl_dereference(tfile
->tun
);
456 if (tun
&& !tfile
->detached
) {
457 u16 index
= tfile
->queue_index
;
458 BUG_ON(index
>= tun
->numqueues
);
460 rcu_assign_pointer(tun
->tfiles
[index
],
461 tun
->tfiles
[tun
->numqueues
- 1]);
462 ntfile
= rtnl_dereference(tun
->tfiles
[index
]);
463 ntfile
->queue_index
= index
;
467 rcu_assign_pointer(tfile
->tun
, NULL
);
468 sock_put(&tfile
->sk
);
470 tun_disable_queue(tun
, tfile
);
473 tun_flow_delete_by_queue(tun
, tun
->numqueues
+ 1);
474 /* Drop read queue */
475 skb_queue_purge(&tfile
->sk
.sk_receive_queue
);
476 tun_set_real_num_queues(tun
);
477 } else if (tfile
->detached
&& clean
) {
478 tun
= tun_enable_queue(tfile
);
479 sock_put(&tfile
->sk
);
483 if (tun
&& tun
->numqueues
== 0 && tun
->numdisabled
== 0) {
484 netif_carrier_off(tun
->dev
);
486 if (!(tun
->flags
& TUN_PERSIST
) &&
487 tun
->dev
->reg_state
== NETREG_REGISTERED
)
488 unregister_netdevice(tun
->dev
);
491 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED
,
492 &tfile
->socket
.flags
));
493 sk_release_kernel(&tfile
->sk
);
497 static void tun_detach(struct tun_file
*tfile
, bool clean
)
500 __tun_detach(tfile
, clean
);
504 static void tun_detach_all(struct net_device
*dev
)
506 struct tun_struct
*tun
= netdev_priv(dev
);
507 struct tun_file
*tfile
, *tmp
;
508 int i
, n
= tun
->numqueues
;
510 for (i
= 0; i
< n
; i
++) {
511 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
513 wake_up_all(&tfile
->wq
.wait
);
514 rcu_assign_pointer(tfile
->tun
, NULL
);
517 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
518 wake_up_all(&tfile
->wq
.wait
);
519 rcu_assign_pointer(tfile
->tun
, NULL
);
521 BUG_ON(tun
->numqueues
!= 0);
524 for (i
= 0; i
< n
; i
++) {
525 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
526 /* Drop read queue */
527 skb_queue_purge(&tfile
->sk
.sk_receive_queue
);
528 sock_put(&tfile
->sk
);
530 list_for_each_entry_safe(tfile
, tmp
, &tun
->disabled
, next
) {
531 tun_enable_queue(tfile
);
532 skb_queue_purge(&tfile
->sk
.sk_receive_queue
);
533 sock_put(&tfile
->sk
);
535 BUG_ON(tun
->numdisabled
!= 0);
537 if (tun
->flags
& TUN_PERSIST
)
538 module_put(THIS_MODULE
);
541 static int tun_attach(struct tun_struct
*tun
, struct file
*file
)
543 struct tun_file
*tfile
= file
->private_data
;
546 err
= security_tun_dev_attach(tfile
->socket
.sk
, tun
->security
);
551 if (rtnl_dereference(tfile
->tun
) && !tfile
->detached
)
555 if (!(tun
->flags
& TUN_TAP_MQ
) && tun
->numqueues
== 1)
559 if (!tfile
->detached
&&
560 tun
->numqueues
+ tun
->numdisabled
== MAX_TAP_QUEUES
)
565 /* Re-attach the filter to presist device */
566 if (tun
->filter_attached
== true) {
567 err
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
571 tfile
->queue_index
= tun
->numqueues
;
572 rcu_assign_pointer(tfile
->tun
, tun
);
573 rcu_assign_pointer(tun
->tfiles
[tun
->numqueues
], tfile
);
577 tun_enable_queue(tfile
);
579 sock_hold(&tfile
->sk
);
581 tun_set_real_num_queues(tun
);
583 /* device is allowed to go away first, so no need to hold extra
591 static struct tun_struct
*__tun_get(struct tun_file
*tfile
)
593 struct tun_struct
*tun
;
596 tun
= rcu_dereference(tfile
->tun
);
604 static struct tun_struct
*tun_get(struct file
*file
)
606 return __tun_get(file
->private_data
);
609 static void tun_put(struct tun_struct
*tun
)
615 static void addr_hash_set(u32
*mask
, const u8
*addr
)
617 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
618 mask
[n
>> 5] |= (1 << (n
& 31));
621 static unsigned int addr_hash_test(const u32
*mask
, const u8
*addr
)
623 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
624 return mask
[n
>> 5] & (1 << (n
& 31));
627 static int update_filter(struct tap_filter
*filter
, void __user
*arg
)
629 struct { u8 u
[ETH_ALEN
]; } *addr
;
630 struct tun_filter uf
;
631 int err
, alen
, n
, nexact
;
633 if (copy_from_user(&uf
, arg
, sizeof(uf
)))
642 alen
= ETH_ALEN
* uf
.count
;
643 addr
= kmalloc(alen
, GFP_KERNEL
);
647 if (copy_from_user(addr
, arg
+ sizeof(uf
), alen
)) {
652 /* The filter is updated without holding any locks. Which is
653 * perfectly safe. We disable it first and in the worst
654 * case we'll accept a few undesired packets. */
658 /* Use first set of addresses as an exact filter */
659 for (n
= 0; n
< uf
.count
&& n
< FLT_EXACT_COUNT
; n
++)
660 memcpy(filter
->addr
[n
], addr
[n
].u
, ETH_ALEN
);
664 /* Remaining multicast addresses are hashed,
665 * unicast will leave the filter disabled. */
666 memset(filter
->mask
, 0, sizeof(filter
->mask
));
667 for (; n
< uf
.count
; n
++) {
668 if (!is_multicast_ether_addr(addr
[n
].u
)) {
669 err
= 0; /* no filter */
672 addr_hash_set(filter
->mask
, addr
[n
].u
);
675 /* For ALLMULTI just set the mask to all ones.
676 * This overrides the mask populated above. */
677 if ((uf
.flags
& TUN_FLT_ALLMULTI
))
678 memset(filter
->mask
, ~0, sizeof(filter
->mask
));
680 /* Now enable the filter */
682 filter
->count
= nexact
;
684 /* Return the number of exact filters */
692 /* Returns: 0 - drop, !=0 - accept */
693 static int run_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
695 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
697 struct ethhdr
*eh
= (struct ethhdr
*) skb
->data
;
701 for (i
= 0; i
< filter
->count
; i
++)
702 if (ether_addr_equal(eh
->h_dest
, filter
->addr
[i
]))
705 /* Inexact match (multicast only) */
706 if (is_multicast_ether_addr(eh
->h_dest
))
707 return addr_hash_test(filter
->mask
, eh
->h_dest
);
713 * Checks whether the packet is accepted or not.
714 * Returns: 0 - drop, !=0 - accept
716 static int check_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
721 return run_filter(filter
, skb
);
724 /* Network device part of the driver */
726 static const struct ethtool_ops tun_ethtool_ops
;
728 /* Net device detach from fd. */
729 static void tun_net_uninit(struct net_device
*dev
)
734 /* Net device open. */
735 static int tun_net_open(struct net_device
*dev
)
737 netif_tx_start_all_queues(dev
);
741 /* Net device close. */
742 static int tun_net_close(struct net_device
*dev
)
744 netif_tx_stop_all_queues(dev
);
748 /* Net device start xmit */
749 static netdev_tx_t
tun_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
751 struct tun_struct
*tun
= netdev_priv(dev
);
752 int txq
= skb
->queue_mapping
;
753 struct tun_file
*tfile
;
756 tfile
= rcu_dereference(tun
->tfiles
[txq
]);
758 /* Drop packet if interface is not attached */
759 if (txq
>= tun
->numqueues
)
762 tun_debug(KERN_INFO
, tun
, "tun_net_xmit %d\n", skb
->len
);
766 /* Drop if the filter does not like it.
767 * This is a noop if the filter is disabled.
768 * Filter can be enabled only for the TAP devices. */
769 if (!check_filter(&tun
->txflt
, skb
))
772 if (tfile
->socket
.sk
->sk_filter
&&
773 sk_filter(tfile
->socket
.sk
, skb
))
776 /* Limit the number of packets queued by dividing txq length with the
779 if (skb_queue_len(&tfile
->socket
.sk
->sk_receive_queue
)
780 >= dev
->tx_queue_len
/ tun
->numqueues
)
783 /* Orphan the skb - required as we might hang on to it
784 * for indefinite time. */
785 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
792 skb_queue_tail(&tfile
->socket
.sk
->sk_receive_queue
, skb
);
794 /* Notify and wake up reader process */
795 if (tfile
->flags
& TUN_FASYNC
)
796 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
797 wake_up_interruptible_poll(&tfile
->wq
.wait
, POLLIN
|
798 POLLRDNORM
| POLLRDBAND
);
804 dev
->stats
.tx_dropped
++;
811 static void tun_net_mclist(struct net_device
*dev
)
814 * This callback is supposed to deal with mc filter in
815 * _rx_ path and has nothing to do with the _tx_ path.
816 * In rx path we always accept everything userspace gives us.
821 #define MAX_MTU 65535
824 tun_net_change_mtu(struct net_device
*dev
, int new_mtu
)
826 if (new_mtu
< MIN_MTU
|| new_mtu
+ dev
->hard_header_len
> MAX_MTU
)
832 static netdev_features_t
tun_net_fix_features(struct net_device
*dev
,
833 netdev_features_t features
)
835 struct tun_struct
*tun
= netdev_priv(dev
);
837 return (features
& tun
->set_features
) | (features
& ~TUN_USER_FEATURES
);
839 #ifdef CONFIG_NET_POLL_CONTROLLER
840 static void tun_poll_controller(struct net_device
*dev
)
843 * Tun only receives frames when:
844 * 1) the char device endpoint gets data from user space
845 * 2) the tun socket gets a sendmsg call from user space
846 * Since both of those are syncronous operations, we are guaranteed
847 * never to have pending data when we poll for it
848 * so theres nothing to do here but return.
849 * We need this though so netpoll recognizes us as an interface that
850 * supports polling, which enables bridge devices in virt setups to
851 * still use netconsole
856 static const struct net_device_ops tun_netdev_ops
= {
857 .ndo_uninit
= tun_net_uninit
,
858 .ndo_open
= tun_net_open
,
859 .ndo_stop
= tun_net_close
,
860 .ndo_start_xmit
= tun_net_xmit
,
861 .ndo_change_mtu
= tun_net_change_mtu
,
862 .ndo_fix_features
= tun_net_fix_features
,
863 .ndo_select_queue
= tun_select_queue
,
864 #ifdef CONFIG_NET_POLL_CONTROLLER
865 .ndo_poll_controller
= tun_poll_controller
,
869 static const struct net_device_ops tap_netdev_ops
= {
870 .ndo_uninit
= tun_net_uninit
,
871 .ndo_open
= tun_net_open
,
872 .ndo_stop
= tun_net_close
,
873 .ndo_start_xmit
= tun_net_xmit
,
874 .ndo_change_mtu
= tun_net_change_mtu
,
875 .ndo_fix_features
= tun_net_fix_features
,
876 .ndo_set_rx_mode
= tun_net_mclist
,
877 .ndo_set_mac_address
= eth_mac_addr
,
878 .ndo_validate_addr
= eth_validate_addr
,
879 .ndo_select_queue
= tun_select_queue
,
880 #ifdef CONFIG_NET_POLL_CONTROLLER
881 .ndo_poll_controller
= tun_poll_controller
,
885 static int tun_flow_init(struct tun_struct
*tun
)
889 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++)
890 INIT_HLIST_HEAD(&tun
->flows
[i
]);
892 tun
->ageing_time
= TUN_FLOW_EXPIRE
;
893 setup_timer(&tun
->flow_gc_timer
, tun_flow_cleanup
, (unsigned long)tun
);
894 mod_timer(&tun
->flow_gc_timer
,
895 round_jiffies_up(jiffies
+ tun
->ageing_time
));
900 static void tun_flow_uninit(struct tun_struct
*tun
)
902 del_timer_sync(&tun
->flow_gc_timer
);
906 /* Initialize net device. */
907 static void tun_net_init(struct net_device
*dev
)
909 struct tun_struct
*tun
= netdev_priv(dev
);
911 switch (tun
->flags
& TUN_TYPE_MASK
) {
913 dev
->netdev_ops
= &tun_netdev_ops
;
915 /* Point-to-Point TUN Device */
916 dev
->hard_header_len
= 0;
920 /* Zero header length */
921 dev
->type
= ARPHRD_NONE
;
922 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
923 dev
->tx_queue_len
= TUN_READQ_SIZE
; /* We prefer our own queue length */
927 dev
->netdev_ops
= &tap_netdev_ops
;
928 /* Ethernet TAP Device */
930 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
931 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
933 eth_hw_addr_random(dev
);
935 dev
->tx_queue_len
= TUN_READQ_SIZE
; /* We prefer our own queue length */
940 /* Character device part */
943 static unsigned int tun_chr_poll(struct file
*file
, poll_table
*wait
)
945 struct tun_file
*tfile
= file
->private_data
;
946 struct tun_struct
*tun
= __tun_get(tfile
);
948 unsigned int mask
= 0;
953 sk
= tfile
->socket
.sk
;
955 tun_debug(KERN_INFO
, tun
, "tun_chr_poll\n");
957 poll_wait(file
, &tfile
->wq
.wait
, wait
);
959 if (!skb_queue_empty(&sk
->sk_receive_queue
))
960 mask
|= POLLIN
| POLLRDNORM
;
962 if (sock_writeable(sk
) ||
963 (!test_and_set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
) &&
965 mask
|= POLLOUT
| POLLWRNORM
;
967 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
)
974 /* prepad is the amount to reserve at front. len is length after that.
975 * linear is a hint as to how much to copy (usually headers). */
976 static struct sk_buff
*tun_alloc_skb(struct tun_file
*tfile
,
977 size_t prepad
, size_t len
,
978 size_t linear
, int noblock
)
980 struct sock
*sk
= tfile
->socket
.sk
;
984 /* Under a page? Don't bother with paged skb. */
985 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
988 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
993 skb_reserve(skb
, prepad
);
994 skb_put(skb
, linear
);
995 skb
->data_len
= len
- linear
;
996 skb
->len
+= len
- linear
;
1001 /* set skb frags from iovec, this can move to core network code for reuse */
1002 static int zerocopy_sg_from_iovec(struct sk_buff
*skb
, const struct iovec
*from
,
1003 int offset
, size_t count
)
1005 int len
= iov_length(from
, count
) - offset
;
1006 int copy
= skb_headlen(skb
);
1007 int size
, offset1
= 0;
1010 /* Skip over from offset */
1011 while (count
&& (offset
>= from
->iov_len
)) {
1012 offset
-= from
->iov_len
;
1017 /* copy up to skb headlen */
1018 while (count
&& (copy
> 0)) {
1019 size
= min_t(unsigned int, copy
, from
->iov_len
- offset
);
1020 if (copy_from_user(skb
->data
+ offset1
, from
->iov_base
+ offset
,
1037 struct page
*page
[MAX_SKB_FRAGS
];
1040 unsigned long truesize
;
1042 len
= from
->iov_len
- offset
;
1048 base
= (unsigned long)from
->iov_base
+ offset
;
1049 size
= ((base
& ~PAGE_MASK
) + len
+ ~PAGE_MASK
) >> PAGE_SHIFT
;
1050 if (i
+ size
> MAX_SKB_FRAGS
)
1052 num_pages
= get_user_pages_fast(base
, size
, 0, &page
[i
]);
1053 if (num_pages
!= size
) {
1056 for (j
= 0; j
< num_pages
; j
++)
1057 put_page(page
[i
+ j
]);
1060 truesize
= size
* PAGE_SIZE
;
1061 skb
->data_len
+= len
;
1063 skb
->truesize
+= truesize
;
1064 atomic_add(truesize
, &skb
->sk
->sk_wmem_alloc
);
1066 int off
= base
& ~PAGE_MASK
;
1067 int size
= min_t(int, len
, PAGE_SIZE
- off
);
1068 __skb_fill_page_desc(skb
, i
, page
[i
], off
, size
);
1069 skb_shinfo(skb
)->nr_frags
++;
1070 /* increase sk_wmem_alloc */
1081 static unsigned long iov_pages(const struct iovec
*iv
, int offset
,
1082 unsigned long nr_segs
)
1084 unsigned long seg
, base
;
1085 int pages
= 0, len
, size
;
1087 while (nr_segs
&& (offset
>= iv
->iov_len
)) {
1088 offset
-= iv
->iov_len
;
1093 for (seg
= 0; seg
< nr_segs
; seg
++) {
1094 base
= (unsigned long)iv
[seg
].iov_base
+ offset
;
1095 len
= iv
[seg
].iov_len
- offset
;
1096 size
= ((base
& ~PAGE_MASK
) + len
+ ~PAGE_MASK
) >> PAGE_SHIFT
;
1104 /* Get packet from user space buffer */
1105 static ssize_t
tun_get_user(struct tun_struct
*tun
, struct tun_file
*tfile
,
1106 void *msg_control
, const struct iovec
*iv
,
1107 size_t total_len
, size_t count
, int noblock
)
1109 struct tun_pi pi
= { 0, cpu_to_be16(ETH_P_IP
) };
1110 struct sk_buff
*skb
;
1111 size_t len
= total_len
, align
= NET_SKB_PAD
, linear
;
1112 struct virtio_net_hdr gso
= { 0 };
1116 bool zerocopy
= false;
1120 if (!(tun
->flags
& TUN_NO_PI
)) {
1121 if (len
< sizeof(pi
))
1125 if (memcpy_fromiovecend((void *)&pi
, iv
, 0, sizeof(pi
)))
1127 offset
+= sizeof(pi
);
1130 if (tun
->flags
& TUN_VNET_HDR
) {
1131 int vnet_hdr_sz
= ACCESS_ONCE(tun
->vnet_hdr_sz
);
1133 if (len
< vnet_hdr_sz
)
1137 if (memcpy_fromiovecend((void *)&gso
, iv
, offset
, sizeof(gso
)))
1140 if ((gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1141 gso
.csum_start
+ gso
.csum_offset
+ 2 > gso
.hdr_len
)
1142 gso
.hdr_len
= gso
.csum_start
+ gso
.csum_offset
+ 2;
1144 if (gso
.hdr_len
> len
)
1146 offset
+= vnet_hdr_sz
;
1149 if ((tun
->flags
& TUN_TYPE_MASK
) == TUN_TAP_DEV
) {
1150 align
+= NET_IP_ALIGN
;
1151 if (unlikely(len
< ETH_HLEN
||
1152 (gso
.hdr_len
&& gso
.hdr_len
< ETH_HLEN
)))
1156 good_linear
= SKB_MAX_HEAD(align
);
1159 /* There are 256 bytes to be copied in skb, so there is
1160 * enough room for skb expand head in case it is used.
1161 * The rest of the buffer is mapped from userspace.
1163 copylen
= gso
.hdr_len
? gso
.hdr_len
: GOODCOPY_LEN
;
1164 if (copylen
> good_linear
)
1165 copylen
= good_linear
;
1167 if (iov_pages(iv
, offset
+ copylen
, count
) <= MAX_SKB_FRAGS
)
1173 if (gso
.hdr_len
> good_linear
)
1174 linear
= good_linear
;
1176 linear
= gso
.hdr_len
;
1179 skb
= tun_alloc_skb(tfile
, align
, copylen
, linear
, noblock
);
1181 if (PTR_ERR(skb
) != -EAGAIN
)
1182 tun
->dev
->stats
.rx_dropped
++;
1183 return PTR_ERR(skb
);
1187 err
= zerocopy_sg_from_iovec(skb
, iv
, offset
, count
);
1189 err
= skb_copy_datagram_from_iovec(skb
, 0, iv
, offset
, len
);
1190 if (!err
&& msg_control
) {
1191 struct ubuf_info
*uarg
= msg_control
;
1192 uarg
->callback(uarg
, false);
1197 tun
->dev
->stats
.rx_dropped
++;
1202 if (gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1203 if (!skb_partial_csum_set(skb
, gso
.csum_start
,
1205 tun
->dev
->stats
.rx_frame_errors
++;
1211 switch (tun
->flags
& TUN_TYPE_MASK
) {
1213 if (tun
->flags
& TUN_NO_PI
) {
1214 switch (skb
->data
[0] & 0xf0) {
1216 pi
.proto
= htons(ETH_P_IP
);
1219 pi
.proto
= htons(ETH_P_IPV6
);
1222 tun
->dev
->stats
.rx_dropped
++;
1228 skb_reset_mac_header(skb
);
1229 skb
->protocol
= pi
.proto
;
1230 skb
->dev
= tun
->dev
;
1233 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
1237 if (gso
.gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
1239 switch (gso
.gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
1240 case VIRTIO_NET_HDR_GSO_TCPV4
:
1241 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1243 case VIRTIO_NET_HDR_GSO_TCPV6
:
1244 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1246 case VIRTIO_NET_HDR_GSO_UDP
:
1247 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1250 tun
->dev
->stats
.rx_frame_errors
++;
1255 if (gso
.gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
1256 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
1258 skb_shinfo(skb
)->gso_size
= gso
.gso_size
;
1259 if (skb_shinfo(skb
)->gso_size
== 0) {
1260 tun
->dev
->stats
.rx_frame_errors
++;
1265 /* Header must be checked, and gso_segs computed. */
1266 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1267 skb_shinfo(skb
)->gso_segs
= 0;
1270 /* copy skb_ubuf_info for callback when skb has no error */
1272 skb_shinfo(skb
)->destructor_arg
= msg_control
;
1273 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
1274 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
1277 skb_reset_network_header(skb
);
1278 skb_probe_transport_header(skb
, 0);
1280 rxhash
= skb_get_rxhash(skb
);
1283 tun
->dev
->stats
.rx_packets
++;
1284 tun
->dev
->stats
.rx_bytes
+= len
;
1286 tun_flow_update(tun
, rxhash
, tfile
);
1290 static ssize_t
tun_chr_aio_write(struct kiocb
*iocb
, const struct iovec
*iv
,
1291 unsigned long count
, loff_t pos
)
1293 struct file
*file
= iocb
->ki_filp
;
1294 struct tun_struct
*tun
= tun_get(file
);
1295 struct tun_file
*tfile
= file
->private_data
;
1301 tun_debug(KERN_INFO
, tun
, "tun_chr_write %ld\n", count
);
1303 result
= tun_get_user(tun
, tfile
, NULL
, iv
, iov_length(iv
, count
),
1304 count
, file
->f_flags
& O_NONBLOCK
);
1310 // ------------- START of KNOX_VPN ------------------//
1312 /* KNOX VPN packets have extra bytes because they carry meta information by default
1313 * Such packets have sizeof(struct tun_meta_header) extra bytes in the IP options
1314 * This automatically reflects in the IP header length (IHL)
1316 static int knoxvpn_process_uidpid(struct tun_struct
*tun
, struct sk_buff
*skb
,
1317 const struct iovec
*iv
, int *len
, ssize_t
* total
)
1319 struct skb_shared_info
*knox_shinfo
= NULL
;
1320 struct knox_meta_param metalocal
= { 0, 0 };
1323 knox_shinfo
= skb_shinfo(skb
);
1326 pr_err("KNOX: NULL SKB in knoxvpn_process_uidpid");
1331 if (knox_shinfo
== NULL
) {
1333 pr_err("KNOX: knox_shinfo value is null");
1338 if (knox_shinfo
->knox_mark
>= META_MARK_BASE_LOWER
&& knox_shinfo
->knox_mark
<= META_MARK_BASE_UPPER
) {
1339 metalocal
.uid
= knox_shinfo
->uid
;
1340 metalocal
.pid
= knox_shinfo
->pid
;
1343 if (knox_shinfo
!= NULL
) {
1344 knox_shinfo
->uid
= knox_shinfo
->pid
= 0;
1345 knox_shinfo
->knox_mark
= 0;
1348 if (tun
->flags
& TUN_META_HDR
) {
1350 pr_err("KNOX: Appending uid: %d and pid: %d", metalocal
.uid
,
1355 (iv
, (void *)&metalocal
, (*total
),
1356 sizeof(struct knox_meta_param
)))) {
1358 pr_err("KNOX: Failed to copy buffer to userspace");
1362 (*total
) += TUN_META_HDR_SZ
;
1369 // ------------- END of KNOX_VPN ------------------//
1371 /* Put packet to the user space buffer */
1372 static ssize_t
tun_put_user(struct tun_struct
*tun
,
1373 struct tun_file
*tfile
,
1374 struct sk_buff
*skb
,
1375 const struct iovec
*iv
, int len
)
1377 struct tun_pi pi
= { 0, skb
->protocol
};
1379 int vnet_hdr_sz
= 0;
1381 if (tun
->flags
& TUN_VNET_HDR
)
1382 vnet_hdr_sz
= ACCESS_ONCE(tun
->vnet_hdr_sz
);
1384 if (!(tun
->flags
& TUN_NO_PI
)) {
1385 if ((len
-= sizeof(pi
)) < 0)
1388 if (len
< skb
->len
+ vnet_hdr_sz
) {
1389 /* Packet will be striped */
1390 pi
.flags
|= TUN_PKT_STRIP
;
1393 if (memcpy_toiovecend(iv
, (void *) &pi
, 0, sizeof(pi
)))
1395 total
+= sizeof(pi
);
1399 struct virtio_net_hdr gso
= { 0 }; /* no info leak */
1400 if ((len
-= vnet_hdr_sz
) < 0)
1403 if (skb_is_gso(skb
)) {
1404 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
1406 /* This is a hint as to how much should be linear. */
1407 gso
.hdr_len
= skb_headlen(skb
);
1408 gso
.gso_size
= sinfo
->gso_size
;
1409 if (sinfo
->gso_type
& SKB_GSO_TCPV4
)
1410 gso
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
1411 else if (sinfo
->gso_type
& SKB_GSO_TCPV6
)
1412 gso
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
1413 else if (sinfo
->gso_type
& SKB_GSO_UDP
)
1414 gso
.gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
1416 pr_err("unexpected GSO type: "
1417 "0x%x, gso_size %d, hdr_len %d\n",
1418 sinfo
->gso_type
, gso
.gso_size
,
1420 print_hex_dump(KERN_ERR
, "tun: ",
1423 min((int)gso
.hdr_len
, 64), true);
1427 if (sinfo
->gso_type
& SKB_GSO_TCP_ECN
)
1428 gso
.gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
1430 gso
.gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
1432 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1433 gso
.flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1434 gso
.csum_start
= skb_checksum_start_offset(skb
);
1435 gso
.csum_offset
= skb
->csum_offset
;
1436 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1437 gso
.flags
= VIRTIO_NET_HDR_F_DATA_VALID
;
1438 } /* else everything is zero */
1440 if (unlikely(memcpy_toiovecend(iv
, (void *)&gso
, total
,
1443 total
+= vnet_hdr_sz
;
1446 // ------------- START of KNOX_VPN ------------------//
1447 if (knoxvpn_process_uidpid(tun
, skb
, iv
, &len
, &total
) < 0) {
1450 // ------------- END of KNOX_VPN ------------------//
1452 len
= min_t(int, skb
->len
, len
);
1454 skb_copy_datagram_const_iovec(skb
, 0, iv
, total
, len
);
1457 tun
->dev
->stats
.tx_packets
++;
1458 tun
->dev
->stats
.tx_bytes
+= len
;
1463 static ssize_t
tun_do_read(struct tun_struct
*tun
, struct tun_file
*tfile
,
1464 struct kiocb
*iocb
, const struct iovec
*iv
,
1465 ssize_t len
, int noblock
)
1467 DECLARE_WAITQUEUE(wait
, current
);
1468 struct sk_buff
*skb
;
1471 tun_debug(KERN_INFO
, tun
, "tun_do_read\n");
1473 if (unlikely(!noblock
))
1474 add_wait_queue(&tfile
->wq
.wait
, &wait
);
1476 current
->state
= TASK_INTERRUPTIBLE
;
1478 /* Read frames from the queue */
1479 if (!(skb
= skb_dequeue(&tfile
->socket
.sk
->sk_receive_queue
))) {
1484 if (signal_pending(current
)) {
1488 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
) {
1493 /* Nothing to read, let's sleep */
1498 ret
= tun_put_user(tun
, tfile
, skb
, iv
, len
);
1503 current
->state
= TASK_RUNNING
;
1504 if (unlikely(!noblock
))
1505 remove_wait_queue(&tfile
->wq
.wait
, &wait
);
1510 static ssize_t
tun_chr_aio_read(struct kiocb
*iocb
, const struct iovec
*iv
,
1511 unsigned long count
, loff_t pos
)
1513 struct file
*file
= iocb
->ki_filp
;
1514 struct tun_file
*tfile
= file
->private_data
;
1515 struct tun_struct
*tun
= __tun_get(tfile
);
1520 len
= iov_length(iv
, count
);
1526 ret
= tun_do_read(tun
, tfile
, iocb
, iv
, len
,
1527 file
->f_flags
& O_NONBLOCK
);
1528 ret
= min_t(ssize_t
, ret
, len
);
1536 static void tun_free_netdev(struct net_device
*dev
)
1538 struct tun_struct
*tun
= netdev_priv(dev
);
1540 BUG_ON(!(list_empty(&tun
->disabled
)));
1541 tun_flow_uninit(tun
);
1542 security_tun_dev_free_security(tun
->security
);
1546 static void tun_setup(struct net_device
*dev
)
1548 struct tun_struct
*tun
= netdev_priv(dev
);
1550 tun
->owner
= INVALID_UID
;
1551 tun
->group
= INVALID_GID
;
1553 dev
->ethtool_ops
= &tun_ethtool_ops
;
1554 dev
->destructor
= tun_free_netdev
;
1557 /* Trivial set of netlink ops to allow deleting tun or tap
1558 * device with netlink.
1560 static int tun_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1565 static struct rtnl_link_ops tun_link_ops __read_mostly
= {
1567 .priv_size
= sizeof(struct tun_struct
),
1569 .validate
= tun_validate
,
1572 static void tun_sock_write_space(struct sock
*sk
)
1574 struct tun_file
*tfile
;
1575 wait_queue_head_t
*wqueue
;
1577 if (!sock_writeable(sk
))
1580 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
1583 wqueue
= sk_sleep(sk
);
1584 if (wqueue
&& waitqueue_active(wqueue
))
1585 wake_up_interruptible_sync_poll(wqueue
, POLLOUT
|
1586 POLLWRNORM
| POLLWRBAND
);
1588 tfile
= container_of(sk
, struct tun_file
, sk
);
1589 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_OUT
);
1592 static int tun_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1593 struct msghdr
*m
, size_t total_len
)
1596 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
1597 struct tun_struct
*tun
= __tun_get(tfile
);
1601 ret
= tun_get_user(tun
, tfile
, m
->msg_control
, m
->msg_iov
, total_len
,
1602 m
->msg_iovlen
, m
->msg_flags
& MSG_DONTWAIT
);
1608 static int tun_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1609 struct msghdr
*m
, size_t total_len
,
1612 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
1613 struct tun_struct
*tun
= __tun_get(tfile
);
1619 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
)) {
1623 ret
= tun_do_read(tun
, tfile
, iocb
, m
->msg_iov
, total_len
,
1624 flags
& MSG_DONTWAIT
);
1625 if (ret
> total_len
) {
1626 m
->msg_flags
|= MSG_TRUNC
;
1627 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
1634 static int tun_release(struct socket
*sock
)
1641 /* Ops structure to mimic raw sockets with tun */
1642 static const struct proto_ops tun_socket_ops
= {
1643 .sendmsg
= tun_sendmsg
,
1644 .recvmsg
= tun_recvmsg
,
1645 .release
= tun_release
,
1648 static struct proto tun_proto
= {
1650 .owner
= THIS_MODULE
,
1651 .obj_size
= sizeof(struct tun_file
),
1654 static int tun_flags(struct tun_struct
*tun
)
1658 // ------------- START of KNOX_VPN ------------------//
1659 /* Checks if meta header is enabled so that
1660 * packets will be prepended with meta data(UID/PID)
1662 if (tun
->flags
& TUN_META_HDR
) {
1663 flags
|= IFF_META_HDR
;
1665 // ------------- END of KNOX_VPN -------------------//
1667 if (tun
->flags
& TUN_TUN_DEV
)
1672 if (tun
->flags
& TUN_NO_PI
)
1675 /* This flag has no real effect. We track the value for backwards
1678 if (tun
->flags
& TUN_ONE_QUEUE
)
1679 flags
|= IFF_ONE_QUEUE
;
1681 if (tun
->flags
& TUN_VNET_HDR
)
1682 flags
|= IFF_VNET_HDR
;
1684 if (tun
->flags
& TUN_TAP_MQ
)
1685 flags
|= IFF_MULTI_QUEUE
;
1690 static ssize_t
tun_show_flags(struct device
*dev
, struct device_attribute
*attr
,
1693 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1694 return sprintf(buf
, "0x%x\n", tun_flags(tun
));
1697 static ssize_t
tun_show_owner(struct device
*dev
, struct device_attribute
*attr
,
1700 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1701 return uid_valid(tun
->owner
)?
1702 sprintf(buf
, "%u\n",
1703 from_kuid_munged(current_user_ns(), tun
->owner
)):
1704 sprintf(buf
, "-1\n");
1707 static ssize_t
tun_show_group(struct device
*dev
, struct device_attribute
*attr
,
1710 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1711 return gid_valid(tun
->group
) ?
1712 sprintf(buf
, "%u\n",
1713 from_kgid_munged(current_user_ns(), tun
->group
)):
1714 sprintf(buf
, "-1\n");
1717 static DEVICE_ATTR(tun_flags
, 0444, tun_show_flags
, NULL
);
1718 static DEVICE_ATTR(owner
, 0444, tun_show_owner
, NULL
);
1719 static DEVICE_ATTR(group
, 0444, tun_show_group
, NULL
);
1721 static int tun_set_iff(struct net
*net
, struct file
*file
, struct ifreq
*ifr
)
1723 struct tun_struct
*tun
;
1724 struct tun_file
*tfile
= file
->private_data
;
1725 struct net_device
*dev
;
1728 if (tfile
->detached
)
1731 dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
1733 if (ifr
->ifr_flags
& IFF_TUN_EXCL
)
1735 if ((ifr
->ifr_flags
& IFF_TUN
) && dev
->netdev_ops
== &tun_netdev_ops
)
1736 tun
= netdev_priv(dev
);
1737 else if ((ifr
->ifr_flags
& IFF_TAP
) && dev
->netdev_ops
== &tap_netdev_ops
)
1738 tun
= netdev_priv(dev
);
1742 if (!!(ifr
->ifr_flags
& IFF_MULTI_QUEUE
) !=
1743 !!(tun
->flags
& TUN_TAP_MQ
))
1746 if (tun_not_capable(tun
))
1748 err
= security_tun_dev_open(tun
->security
);
1752 err
= tun_attach(tun
, file
);
1756 if (tun
->flags
& TUN_TAP_MQ
&&
1757 (tun
->numqueues
+ tun
->numdisabled
> 1)) {
1758 /* One or more queue has already been attached, no need
1759 * to initialize the device again.
1766 unsigned long flags
= 0;
1767 int queues
= ifr
->ifr_flags
& IFF_MULTI_QUEUE
?
1770 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1772 err
= security_tun_dev_create();
1777 if (ifr
->ifr_flags
& IFF_TUN
) {
1779 flags
|= TUN_TUN_DEV
;
1781 } else if (ifr
->ifr_flags
& IFF_TAP
) {
1783 flags
|= TUN_TAP_DEV
;
1789 name
= ifr
->ifr_name
;
1791 dev
= alloc_netdev_mqs(sizeof(struct tun_struct
), name
,
1792 tun_setup
, queues
, queues
);
1797 dev_net_set(dev
, net
);
1798 dev
->rtnl_link_ops
= &tun_link_ops
;
1800 tun
= netdev_priv(dev
);
1803 tun
->txflt
.count
= 0;
1804 tun
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
1806 tun
->filter_attached
= false;
1807 tun
->sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
1809 spin_lock_init(&tun
->lock
);
1811 err
= security_tun_dev_alloc_security(&tun
->security
);
1817 err
= tun_flow_init(tun
);
1821 dev
->hw_features
= NETIF_F_SG
| NETIF_F_FRAGLIST
|
1823 dev
->features
= dev
->hw_features
;
1824 dev
->vlan_features
= dev
->features
;
1826 INIT_LIST_HEAD(&tun
->disabled
);
1827 err
= tun_attach(tun
, file
);
1831 err
= register_netdevice(tun
->dev
);
1835 if (device_create_file(&tun
->dev
->dev
, &dev_attr_tun_flags
) ||
1836 device_create_file(&tun
->dev
->dev
, &dev_attr_owner
) ||
1837 device_create_file(&tun
->dev
->dev
, &dev_attr_group
))
1838 pr_err("Failed to create tun sysfs files\n");
1841 netif_carrier_on(tun
->dev
);
1843 tun_debug(KERN_INFO
, tun
, "tun_set_iff\n");
1845 // ------------- START of KNOX_VPN ------------------//
1846 if (ifr
->ifr_flags
& IFF_META_HDR
) {
1847 tun
->flags
|= TUN_META_HDR
;
1849 tun
->flags
&= ~TUN_META_HDR
;
1851 // ------------- END of KNOX_VPN -------------------//
1853 if (ifr
->ifr_flags
& IFF_NO_PI
)
1854 tun
->flags
|= TUN_NO_PI
;
1856 tun
->flags
&= ~TUN_NO_PI
;
1858 /* This flag has no real effect. We track the value for backwards
1861 if (ifr
->ifr_flags
& IFF_ONE_QUEUE
)
1862 tun
->flags
|= TUN_ONE_QUEUE
;
1864 tun
->flags
&= ~TUN_ONE_QUEUE
;
1866 if (ifr
->ifr_flags
& IFF_VNET_HDR
)
1867 tun
->flags
|= TUN_VNET_HDR
;
1869 tun
->flags
&= ~TUN_VNET_HDR
;
1871 if (ifr
->ifr_flags
& IFF_MULTI_QUEUE
)
1872 tun
->flags
|= TUN_TAP_MQ
;
1874 tun
->flags
&= ~TUN_TAP_MQ
;
1876 /* Make sure persistent devices do not get stuck in
1879 if (netif_running(tun
->dev
))
1880 netif_tx_wake_all_queues(tun
->dev
);
1882 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
1886 tun_detach_all(dev
);
1888 tun_flow_uninit(tun
);
1889 security_tun_dev_free_security(tun
->security
);
1895 static void tun_get_iff(struct net
*net
, struct tun_struct
*tun
,
1898 tun_debug(KERN_INFO
, tun
, "tun_get_iff\n");
1900 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
1902 ifr
->ifr_flags
= tun_flags(tun
);
1906 /* This is like a cut-down ethtool ops, except done via tun fd so no
1907 * privs required. */
1908 static int set_offload(struct tun_struct
*tun
, unsigned long arg
)
1910 netdev_features_t features
= 0;
1912 if (arg
& TUN_F_CSUM
) {
1913 features
|= NETIF_F_HW_CSUM
;
1916 if (arg
& (TUN_F_TSO4
|TUN_F_TSO6
)) {
1917 if (arg
& TUN_F_TSO_ECN
) {
1918 features
|= NETIF_F_TSO_ECN
;
1919 arg
&= ~TUN_F_TSO_ECN
;
1921 if (arg
& TUN_F_TSO4
)
1922 features
|= NETIF_F_TSO
;
1923 if (arg
& TUN_F_TSO6
)
1924 features
|= NETIF_F_TSO6
;
1925 arg
&= ~(TUN_F_TSO4
|TUN_F_TSO6
);
1928 if (arg
& TUN_F_UFO
) {
1929 features
|= NETIF_F_UFO
;
1934 /* This gives the user a way to test for new features in future by
1935 * trying to set them. */
1939 tun
->set_features
= features
;
1940 netdev_update_features(tun
->dev
);
1945 static void tun_detach_filter(struct tun_struct
*tun
, int n
)
1948 struct tun_file
*tfile
;
1950 for (i
= 0; i
< n
; i
++) {
1951 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1952 sk_detach_filter(tfile
->socket
.sk
);
1955 tun
->filter_attached
= false;
1958 static int tun_attach_filter(struct tun_struct
*tun
)
1961 struct tun_file
*tfile
;
1963 for (i
= 0; i
< tun
->numqueues
; i
++) {
1964 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1965 ret
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
1967 tun_detach_filter(tun
, i
);
1972 tun
->filter_attached
= true;
1976 static void tun_set_sndbuf(struct tun_struct
*tun
)
1978 struct tun_file
*tfile
;
1981 for (i
= 0; i
< tun
->numqueues
; i
++) {
1982 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1983 tfile
->socket
.sk
->sk_sndbuf
= tun
->sndbuf
;
1987 static int tun_set_queue(struct file
*file
, struct ifreq
*ifr
)
1989 struct tun_file
*tfile
= file
->private_data
;
1990 struct tun_struct
*tun
;
1995 if (ifr
->ifr_flags
& IFF_ATTACH_QUEUE
) {
1996 tun
= tfile
->detached
;
2001 ret
= security_tun_dev_attach_queue(tun
->security
);
2004 ret
= tun_attach(tun
, file
);
2005 } else if (ifr
->ifr_flags
& IFF_DETACH_QUEUE
) {
2006 tun
= rtnl_dereference(tfile
->tun
);
2007 if (!tun
|| !(tun
->flags
& TUN_TAP_MQ
) || tfile
->detached
)
2010 __tun_detach(tfile
, false);
2019 static long __tun_chr_ioctl(struct file
*file
, unsigned int cmd
,
2020 unsigned long arg
, int ifreq_len
)
2022 struct tun_file
*tfile
= file
->private_data
;
2023 struct tun_struct
*tun
;
2024 void __user
* argp
= (void __user
*)arg
;
2031 // ------------- START of KNOX_VPN ------------------//
2035 // ------------- END of KNOX_VPN -------------------//
2037 #ifdef CONFIG_ANDROID_PARANOID_NETWORK
2038 if (cmd
!= TUNGETIFF
&& !capable(CAP_NET_ADMIN
)) {
2043 if (cmd
== TUNSETIFF
|| cmd
== TUNSETQUEUE
|| _IOC_TYPE(cmd
) == 0x89) {
2044 if (copy_from_user(&ifr
, argp
, ifreq_len
))
2047 memset(&ifr
, 0, sizeof(ifr
));
2049 if (cmd
== TUNGETFEATURES
) {
2050 /* Currently this just means: "what IFF flags are valid?".
2051 * This is needed because we never checked for invalid flags on
2053 // ------------- START of KNOX_VPN ------------------//
2054 knox_flag
|= IFF_META_HDR
;
2055 return put_user(IFF_TUN
| IFF_TAP
| IFF_NO_PI
| IFF_ONE_QUEUE
|
2056 IFF_VNET_HDR
| IFF_MULTI_QUEUE
| knox_flag
,
2057 (unsigned int __user
*)argp
);
2058 // ------------- END of KNOX_VPN -------------------//
2059 } else if (cmd
== TUNSETQUEUE
)
2060 return tun_set_queue(file
, &ifr
);
2065 tun
= __tun_get(tfile
);
2066 if (cmd
== TUNSETIFF
&& !tun
) {
2067 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
2069 ret
= tun_set_iff(tfile
->net
, file
, &ifr
);
2074 if (copy_to_user(argp
, &ifr
, ifreq_len
))
2083 tun_debug(KERN_INFO
, tun
, "tun_chr_ioctl cmd %u\n", cmd
);
2088 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
2090 if (copy_to_user(argp
, &ifr
, ifreq_len
))
2095 /* Disable/Enable checksum */
2097 /* [unimplemented] */
2098 tun_debug(KERN_INFO
, tun
, "ignored: set checksum %s\n",
2099 arg
? "disabled" : "enabled");
2103 /* Disable/Enable persist mode. Keep an extra reference to the
2104 * module to prevent the module being unprobed.
2106 if (arg
&& !(tun
->flags
& TUN_PERSIST
)) {
2107 tun
->flags
|= TUN_PERSIST
;
2108 __module_get(THIS_MODULE
);
2110 if (!arg
&& (tun
->flags
& TUN_PERSIST
)) {
2111 tun
->flags
&= ~TUN_PERSIST
;
2112 module_put(THIS_MODULE
);
2115 tun_debug(KERN_INFO
, tun
, "persist %s\n",
2116 arg
? "enabled" : "disabled");
2120 /* Set owner of the device */
2121 owner
= make_kuid(current_user_ns(), arg
);
2122 if (!uid_valid(owner
)) {
2127 tun_debug(KERN_INFO
, tun
, "owner set to %u\n",
2128 from_kuid(&init_user_ns
, tun
->owner
));
2132 /* Set group of the device */
2133 group
= make_kgid(current_user_ns(), arg
);
2134 if (!gid_valid(group
)) {
2139 tun_debug(KERN_INFO
, tun
, "group set to %u\n",
2140 from_kgid(&init_user_ns
, tun
->group
));
2144 /* Only allow setting the type when the interface is down */
2145 if (tun
->dev
->flags
& IFF_UP
) {
2146 tun_debug(KERN_INFO
, tun
,
2147 "Linktype set failed because interface is up\n");
2150 tun
->dev
->type
= (int) arg
;
2151 tun_debug(KERN_INFO
, tun
, "linktype set to %d\n",
2163 ret
= set_offload(tun
, arg
);
2166 case TUNSETTXFILTER
:
2167 /* Can be set only for TAPs */
2169 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2171 ret
= update_filter(&tun
->txflt
, (void __user
*)arg
);
2175 /* Get hw address */
2176 memcpy(ifr
.ifr_hwaddr
.sa_data
, tun
->dev
->dev_addr
, ETH_ALEN
);
2177 ifr
.ifr_hwaddr
.sa_family
= tun
->dev
->type
;
2178 if (copy_to_user(argp
, &ifr
, ifreq_len
))
2183 /* Set hw address */
2184 tun_debug(KERN_DEBUG
, tun
, "set hw address: %pM\n",
2185 ifr
.ifr_hwaddr
.sa_data
);
2187 ret
= dev_set_mac_address(tun
->dev
, &ifr
.ifr_hwaddr
);
2191 sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
2192 if (copy_to_user(argp
, &sndbuf
, sizeof(sndbuf
)))
2197 if (copy_from_user(&sndbuf
, argp
, sizeof(sndbuf
))) {
2202 tun
->sndbuf
= sndbuf
;
2203 tun_set_sndbuf(tun
);
2206 case TUNGETVNETHDRSZ
:
2207 vnet_hdr_sz
= tun
->vnet_hdr_sz
;
2208 if (copy_to_user(argp
, &vnet_hdr_sz
, sizeof(vnet_hdr_sz
)))
2212 case TUNSETVNETHDRSZ
:
2213 if (copy_from_user(&vnet_hdr_sz
, argp
, sizeof(vnet_hdr_sz
))) {
2217 if (vnet_hdr_sz
< (int)sizeof(struct virtio_net_hdr
)) {
2222 tun
->vnet_hdr_sz
= vnet_hdr_sz
;
2224 // ------------- START of KNOX_VPN ------------------//
2225 case TUNGETMETAPARAM
:
2227 if (copy_from_user(&tun_meta_param
, argp
,
2228 sizeof(tun_meta_param
))) {
2234 switch (tun_meta_param
) {
2235 case TUN_GET_META_HDR_SZ
:
2236 tun_meta_value
= TUN_META_HDR_SZ
;
2239 case TUN_GET_META_MARK_OFFSET
:
2240 tun_meta_value
= TUN_META_MARK_OFFSET
;
2249 if (copy_to_user(argp
, &tun_meta_value
,
2250 sizeof(tun_meta_value
)))
2254 // ------------- END of KNOX_VPN -------------------//
2255 case TUNATTACHFILTER
:
2256 /* Can be set only for TAPs */
2258 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2261 if (copy_from_user(&tun
->fprog
, argp
, sizeof(tun
->fprog
)))
2264 ret
= tun_attach_filter(tun
);
2267 case TUNDETACHFILTER
:
2268 /* Can be set only for TAPs */
2270 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2273 tun_detach_filter(tun
, tun
->numqueues
);
2288 static long tun_chr_ioctl(struct file
*file
,
2289 unsigned int cmd
, unsigned long arg
)
2291 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof (struct ifreq
));
2294 #ifdef CONFIG_COMPAT
2295 static long tun_chr_compat_ioctl(struct file
*file
,
2296 unsigned int cmd
, unsigned long arg
)
2301 case TUNSETTXFILTER
:
2306 arg
= (unsigned long)compat_ptr(arg
);
2309 arg
= (compat_ulong_t
)arg
;
2314 * compat_ifreq is shorter than ifreq, so we must not access beyond
2315 * the end of that structure. All fields that are used in this
2316 * driver are compatible though, we don't need to convert the
2319 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof(struct compat_ifreq
));
2321 #endif /* CONFIG_COMPAT */
2323 static int tun_chr_fasync(int fd
, struct file
*file
, int on
)
2325 struct tun_file
*tfile
= file
->private_data
;
2328 if ((ret
= fasync_helper(fd
, file
, on
, &tfile
->fasync
)) < 0)
2332 ret
= __f_setown(file
, task_pid(current
), PIDTYPE_PID
, 0);
2335 tfile
->flags
|= TUN_FASYNC
;
2337 tfile
->flags
&= ~TUN_FASYNC
;
2343 static int tun_chr_open(struct inode
*inode
, struct file
* file
)
2345 struct tun_file
*tfile
;
2347 DBG1(KERN_INFO
, "tunX: tun_chr_open\n");
2349 tfile
= (struct tun_file
*)sk_alloc(&init_net
, AF_UNSPEC
, GFP_KERNEL
,
2353 rcu_assign_pointer(tfile
->tun
, NULL
);
2354 tfile
->net
= get_net(current
->nsproxy
->net_ns
);
2357 rcu_assign_pointer(tfile
->socket
.wq
, &tfile
->wq
);
2358 init_waitqueue_head(&tfile
->wq
.wait
);
2360 tfile
->socket
.file
= file
;
2361 tfile
->socket
.ops
= &tun_socket_ops
;
2363 sock_init_data(&tfile
->socket
, &tfile
->sk
);
2364 sk_change_net(&tfile
->sk
, tfile
->net
);
2366 tfile
->sk
.sk_write_space
= tun_sock_write_space
;
2367 tfile
->sk
.sk_sndbuf
= INT_MAX
;
2369 file
->private_data
= tfile
;
2370 set_bit(SOCK_EXTERNALLY_ALLOCATED
, &tfile
->socket
.flags
);
2371 INIT_LIST_HEAD(&tfile
->next
);
2373 sock_set_flag(&tfile
->sk
, SOCK_ZEROCOPY
);
2378 static int tun_chr_close(struct inode
*inode
, struct file
*file
)
2380 struct tun_file
*tfile
= file
->private_data
;
2381 struct net
*net
= tfile
->net
;
2383 tun_detach(tfile
, true);
2389 static const struct file_operations tun_fops
= {
2390 .owner
= THIS_MODULE
,
2391 .llseek
= no_llseek
,
2392 .read
= do_sync_read
,
2393 .aio_read
= tun_chr_aio_read
,
2394 .write
= do_sync_write
,
2395 .aio_write
= tun_chr_aio_write
,
2396 .poll
= tun_chr_poll
,
2397 .unlocked_ioctl
= tun_chr_ioctl
,
2398 #ifdef CONFIG_COMPAT
2399 .compat_ioctl
= tun_chr_compat_ioctl
,
2401 .open
= tun_chr_open
,
2402 .release
= tun_chr_close
,
2403 .fasync
= tun_chr_fasync
2406 static struct miscdevice tun_miscdev
= {
2409 .nodename
= "net/tun",
2413 /* ethtool interface */
2415 static int tun_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2418 cmd
->advertising
= 0;
2419 ethtool_cmd_speed_set(cmd
, SPEED_10
);
2420 cmd
->duplex
= DUPLEX_FULL
;
2421 cmd
->port
= PORT_TP
;
2422 cmd
->phy_address
= 0;
2423 cmd
->transceiver
= XCVR_INTERNAL
;
2424 cmd
->autoneg
= AUTONEG_DISABLE
;
2430 static void tun_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2432 struct tun_struct
*tun
= netdev_priv(dev
);
2434 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
2435 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
2437 switch (tun
->flags
& TUN_TYPE_MASK
) {
2439 strlcpy(info
->bus_info
, "tun", sizeof(info
->bus_info
));
2442 strlcpy(info
->bus_info
, "tap", sizeof(info
->bus_info
));
2447 static u32
tun_get_msglevel(struct net_device
*dev
)
2450 struct tun_struct
*tun
= netdev_priv(dev
);
2457 static void tun_set_msglevel(struct net_device
*dev
, u32 value
)
2460 struct tun_struct
*tun
= netdev_priv(dev
);
2465 static const struct ethtool_ops tun_ethtool_ops
= {
2466 .get_settings
= tun_get_settings
,
2467 .get_drvinfo
= tun_get_drvinfo
,
2468 .get_msglevel
= tun_get_msglevel
,
2469 .set_msglevel
= tun_set_msglevel
,
2470 .get_link
= ethtool_op_get_link
,
2474 static int __init
tun_init(void)
2478 pr_info("%s, %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2479 pr_info("%s\n", DRV_COPYRIGHT
);
2481 ret
= rtnl_link_register(&tun_link_ops
);
2483 pr_err("Can't register link_ops\n");
2487 ret
= misc_register(&tun_miscdev
);
2489 pr_err("Can't register misc device %d\n", TUN_MINOR
);
2494 rtnl_link_unregister(&tun_link_ops
);
2499 static void tun_cleanup(void)
2501 misc_deregister(&tun_miscdev
);
2502 rtnl_link_unregister(&tun_link_ops
);
2505 /* Get an underlying socket object from tun file. Returns error unless file is
2506 * attached to a device. The returned object works like a packet socket, it
2507 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
2508 * holding a reference to the file for as long as the socket is in use. */
2509 struct socket
*tun_get_socket(struct file
*file
)
2511 struct tun_file
*tfile
;
2512 if (file
->f_op
!= &tun_fops
)
2513 return ERR_PTR(-EINVAL
);
2514 tfile
= file
->private_data
;
2516 return ERR_PTR(-EBADFD
);
2517 return &tfile
->socket
;
2519 EXPORT_SYMBOL_GPL(tun_get_socket
);
2521 module_init(tun_init
);
2522 module_exit(tun_cleanup
);
2523 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
2524 MODULE_AUTHOR(DRV_COPYRIGHT
);
2525 MODULE_LICENSE("GPL");
2526 MODULE_ALIAS_MISCDEV(TUN_MINOR
);
2527 MODULE_ALIAS("devname:net/tun");