netpoll: fix NULL pointer dereference in netpoll_cleanup
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / tun.c
CommitLineData
1da177e4
LT
1/*
2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16 */
17
18/*
19 * Changes:
20 *
ff4cc3ac
MK
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
23 *
1da177e4 24 * Mark Smith <markzzzsmith@yahoo.com.au>
344dc8ed 25 * Use eth_random_addr() for tap MAC address.
1da177e4
LT
26 *
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
30 * Added ethtool API.
31 * Minor cleanups
32 *
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
35 */
36
6b8a66ee
JP
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
1da177e4
LT
39#define DRV_NAME "tun"
40#define DRV_VERSION "1.6"
41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
43
1da177e4
LT
44#include <linux/module.h>
45#include <linux/errno.h>
46#include <linux/kernel.h>
47#include <linux/major.h>
48#include <linux/slab.h>
49#include <linux/poll.h>
50#include <linux/fcntl.h>
51#include <linux/init.h>
52#include <linux/skbuff.h>
53#include <linux/netdevice.h>
54#include <linux/etherdevice.h>
55#include <linux/miscdevice.h>
56#include <linux/ethtool.h>
57#include <linux/rtnetlink.h>
50857e2a 58#include <linux/compat.h>
1da177e4
LT
59#include <linux/if.h>
60#include <linux/if_arp.h>
61#include <linux/if_ether.h>
62#include <linux/if_tun.h>
63#include <linux/crc32.h>
d647a591 64#include <linux/nsproxy.h>
f43798c2 65#include <linux/virtio_net.h>
99405162 66#include <linux/rcupdate.h>
881d966b 67#include <net/net_namespace.h>
79d17604 68#include <net/netns/generic.h>
f019a7a5 69#include <net/rtnetlink.h>
33dccbb0 70#include <net/sock.h>
1da177e4 71
1da177e4
LT
72#include <asm/uaccess.h>
73
14daa021
RR
74/* Uncomment to enable debugging */
75/* #define TUN_DEBUG 1 */
76
1da177e4
LT
77#ifdef TUN_DEBUG
78static int debug;
14daa021 79
6b8a66ee
JP
80#define tun_debug(level, tun, fmt, args...) \
81do { \
82 if (tun->debug) \
83 netdev_printk(level, tun->dev, fmt, ##args); \
84} while (0)
85#define DBG1(level, fmt, args...) \
86do { \
87 if (debug == 2) \
88 printk(level fmt, ##args); \
89} while (0)
14daa021 90#else
6b8a66ee
JP
91#define tun_debug(level, tun, fmt, args...) \
92do { \
93 if (0) \
94 netdev_printk(level, tun->dev, fmt, ##args); \
95} while (0)
96#define DBG1(level, fmt, args...) \
97do { \
98 if (0) \
99 printk(level fmt, ##args); \
100} while (0)
14daa021
RR
101#endif
102
0690899b
MT
103#define GOODCOPY_LEN 128
104
f271b2cc
MK
105#define FLT_EXACT_COUNT 8
106struct tap_filter {
107 unsigned int count; /* Number of addrs. Zero means disabled */
108 u32 mask[2]; /* Mask of the hashed addrs */
109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
110};
111
edfb6a14
JW
112/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
113 * the netdevice to be fit in one page. So we can make sure the success of
114 * memory allocation. TODO: increase the limit. */
115#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
b8732fb7 116#define MAX_TAP_FLOWS 4096
c8d68e6b 117
96442e42
JW
118#define TUN_FLOW_EXPIRE (3 * HZ)
119
54f968d6
JW
120/* A tun_file connects an open character device to a tuntap netdevice. It
121 * also contains all socket related strctures (except sock_fprog and tap_filter)
122 * to serve as one transmit queue for tuntap device. The sock_fprog and
123 * tap_filter were kept in tun_struct since they were used for filtering for the
36fe8c09 124 * netdevice not for a specific queue (at least I didn't see the requirement for
54f968d6 125 * this).
6e914fc7
JW
126 *
127 * RCU usage:
36fe8c09 128 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
6e914fc7 129 * other can only be read while rcu_read_lock or rtnl_lock is held.
54f968d6 130 */
631ab46b 131struct tun_file {
54f968d6
JW
132 struct sock sk;
133 struct socket socket;
134 struct socket_wq wq;
6e914fc7 135 struct tun_struct __rcu *tun;
36b50bab 136 struct net *net;
54f968d6
JW
137 struct fasync_struct *fasync;
138 /* only used for fasnyc */
139 unsigned int flags;
c8d68e6b 140 u16 queue_index;
4008e97f
JW
141 struct list_head next;
142 struct tun_struct *detached;
631ab46b
EB
143};
144
96442e42
JW
145struct tun_flow_entry {
146 struct hlist_node hash_link;
147 struct rcu_head rcu;
148 struct tun_struct *tun;
149
150 u32 rxhash;
151 int queue_index;
152 unsigned long updated;
153};
154
155#define TUN_NUM_FLOW_ENTRIES 1024
156
54f968d6 157/* Since the socket were moved to tun_file, to preserve the behavior of persist
36fe8c09 158 * device, socket filter, sndbuf and vnet header size were restore when the
54f968d6
JW
159 * file were attached to a persist device.
160 */
14daa021 161struct tun_struct {
c8d68e6b
JW
162 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
163 unsigned int numqueues;
f271b2cc 164 unsigned int flags;
0625c883
EB
165 kuid_t owner;
166 kgid_t group;
14daa021 167
14daa021 168 struct net_device *dev;
c8f44aff 169 netdev_features_t set_features;
88255375
MM
170#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
171 NETIF_F_TSO6|NETIF_F_UFO)
d9d52b51
MT
172
173 int vnet_hdr_sz;
54f968d6
JW
174 int sndbuf;
175 struct tap_filter txflt;
176 struct sock_fprog fprog;
177 /* protected by rtnl lock */
178 bool filter_attached;
14daa021
RR
179#ifdef TUN_DEBUG
180 int debug;
1da177e4 181#endif
96442e42 182 spinlock_t lock;
96442e42
JW
183 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
184 struct timer_list flow_gc_timer;
185 unsigned long ageing_time;
4008e97f
JW
186 unsigned int numdisabled;
187 struct list_head disabled;
5dbbaf2d 188 void *security;
b8732fb7 189 u32 flow_count;
14daa021 190};
1da177e4 191
96442e42
JW
192static inline u32 tun_hashfn(u32 rxhash)
193{
194 return rxhash & 0x3ff;
195}
196
197static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
198{
199 struct tun_flow_entry *e;
96442e42 200
b67bfe0d 201 hlist_for_each_entry_rcu(e, head, hash_link) {
96442e42
JW
202 if (e->rxhash == rxhash)
203 return e;
204 }
205 return NULL;
206}
207
208static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
209 struct hlist_head *head,
210 u32 rxhash, u16 queue_index)
211{
9fdc6bef
ED
212 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
213
96442e42
JW
214 if (e) {
215 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
216 rxhash, queue_index);
217 e->updated = jiffies;
218 e->rxhash = rxhash;
219 e->queue_index = queue_index;
220 e->tun = tun;
221 hlist_add_head_rcu(&e->hash_link, head);
b8732fb7 222 ++tun->flow_count;
96442e42
JW
223 }
224 return e;
225}
226
96442e42
JW
227static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
228{
229 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
230 e->rxhash, e->queue_index);
231 hlist_del_rcu(&e->hash_link);
9fdc6bef 232 kfree_rcu(e, rcu);
b8732fb7 233 --tun->flow_count;
96442e42
JW
234}
235
236static void tun_flow_flush(struct tun_struct *tun)
237{
238 int i;
239
240 spin_lock_bh(&tun->lock);
241 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
242 struct tun_flow_entry *e;
b67bfe0d 243 struct hlist_node *n;
96442e42 244
b67bfe0d 245 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
96442e42
JW
246 tun_flow_delete(tun, e);
247 }
248 spin_unlock_bh(&tun->lock);
249}
250
251static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
252{
253 int i;
254
255 spin_lock_bh(&tun->lock);
256 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
257 struct tun_flow_entry *e;
b67bfe0d 258 struct hlist_node *n;
96442e42 259
b67bfe0d 260 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
96442e42
JW
261 if (e->queue_index == queue_index)
262 tun_flow_delete(tun, e);
263 }
264 }
265 spin_unlock_bh(&tun->lock);
266}
267
268static void tun_flow_cleanup(unsigned long data)
269{
270 struct tun_struct *tun = (struct tun_struct *)data;
271 unsigned long delay = tun->ageing_time;
272 unsigned long next_timer = jiffies + delay;
273 unsigned long count = 0;
274 int i;
275
276 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
277
278 spin_lock_bh(&tun->lock);
279 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
280 struct tun_flow_entry *e;
b67bfe0d 281 struct hlist_node *n;
96442e42 282
b67bfe0d 283 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
96442e42
JW
284 unsigned long this_timer;
285 count++;
286 this_timer = e->updated + delay;
287 if (time_before_eq(this_timer, jiffies))
288 tun_flow_delete(tun, e);
289 else if (time_before(this_timer, next_timer))
290 next_timer = this_timer;
291 }
292 }
293
294 if (count)
295 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
296 spin_unlock_bh(&tun->lock);
297}
298
49974420 299static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
9e85722d 300 struct tun_file *tfile)
96442e42
JW
301{
302 struct hlist_head *head;
303 struct tun_flow_entry *e;
304 unsigned long delay = tun->ageing_time;
9e85722d 305 u16 queue_index = tfile->queue_index;
96442e42
JW
306
307 if (!rxhash)
308 return;
309 else
310 head = &tun->flows[tun_hashfn(rxhash)];
311
312 rcu_read_lock();
313
9e85722d
JW
314 /* We may get a very small possibility of OOO during switching, not
315 * worth to optimize.*/
316 if (tun->numqueues == 1 || tfile->detached)
96442e42
JW
317 goto unlock;
318
319 e = tun_flow_find(head, rxhash);
320 if (likely(e)) {
321 /* TODO: keep queueing to old queue until it's empty? */
322 e->queue_index = queue_index;
323 e->updated = jiffies;
324 } else {
325 spin_lock_bh(&tun->lock);
b8732fb7
JW
326 if (!tun_flow_find(head, rxhash) &&
327 tun->flow_count < MAX_TAP_FLOWS)
96442e42
JW
328 tun_flow_create(tun, head, rxhash, queue_index);
329
330 if (!timer_pending(&tun->flow_gc_timer))
331 mod_timer(&tun->flow_gc_timer,
332 round_jiffies_up(jiffies + delay));
333 spin_unlock_bh(&tun->lock);
334 }
335
336unlock:
337 rcu_read_unlock();
338}
339
c8d68e6b
JW
340/* We try to identify a flow through its rxhash first. The reason that
341 * we do not check rxq no. is becuase some cards(e.g 82599), chooses
342 * the rxq based on the txq where the last packet of the flow comes. As
343 * the userspace application move between processors, we may get a
344 * different rxq no. here. If we could not get rxhash, then we would
345 * hope the rxq no. may help here.
346 */
347static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
348{
349 struct tun_struct *tun = netdev_priv(dev);
96442e42 350 struct tun_flow_entry *e;
c8d68e6b
JW
351 u32 txq = 0;
352 u32 numqueues = 0;
353
354 rcu_read_lock();
92bb73ea 355 numqueues = ACCESS_ONCE(tun->numqueues);
c8d68e6b
JW
356
357 txq = skb_get_rxhash(skb);
358 if (txq) {
96442e42
JW
359 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
360 if (e)
361 txq = e->queue_index;
362 else
363 /* use multiply and shift instead of expensive divide */
364 txq = ((u64)txq * numqueues) >> 32;
c8d68e6b
JW
365 } else if (likely(skb_rx_queue_recorded(skb))) {
366 txq = skb_get_rx_queue(skb);
367 while (unlikely(txq >= numqueues))
368 txq -= numqueues;
369 }
370
371 rcu_read_unlock();
372 return txq;
373}
374
cde8b15f
JW
375static inline bool tun_not_capable(struct tun_struct *tun)
376{
377 const struct cred *cred = current_cred();
c260b772 378 struct net *net = dev_net(tun->dev);
cde8b15f
JW
379
380 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
381 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
c260b772 382 !ns_capable(net->user_ns, CAP_NET_ADMIN);
cde8b15f
JW
383}
384
c8d68e6b
JW
385static void tun_set_real_num_queues(struct tun_struct *tun)
386{
387 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
388 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
389}
390
4008e97f
JW
391static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
392{
393 tfile->detached = tun;
394 list_add_tail(&tfile->next, &tun->disabled);
395 ++tun->numdisabled;
396}
397
d32649d1 398static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
4008e97f
JW
399{
400 struct tun_struct *tun = tfile->detached;
401
402 tfile->detached = NULL;
403 list_del_init(&tfile->next);
404 --tun->numdisabled;
405 return tun;
406}
407
c8d68e6b
JW
408static void __tun_detach(struct tun_file *tfile, bool clean)
409{
410 struct tun_file *ntfile;
411 struct tun_struct *tun;
c8d68e6b 412
b8deabd3
JW
413 tun = rtnl_dereference(tfile->tun);
414
9e85722d 415 if (tun && !tfile->detached) {
c8d68e6b
JW
416 u16 index = tfile->queue_index;
417 BUG_ON(index >= tun->numqueues);
c8d68e6b
JW
418
419 rcu_assign_pointer(tun->tfiles[index],
420 tun->tfiles[tun->numqueues - 1]);
b8deabd3 421 ntfile = rtnl_dereference(tun->tfiles[index]);
c8d68e6b
JW
422 ntfile->queue_index = index;
423
424 --tun->numqueues;
9e85722d
JW
425 if (clean) {
426 rcu_assign_pointer(tfile->tun, NULL);
4008e97f 427 sock_put(&tfile->sk);
9e85722d 428 } else
4008e97f 429 tun_disable_queue(tun, tfile);
c8d68e6b
JW
430
431 synchronize_net();
96442e42 432 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
c8d68e6b
JW
433 /* Drop read queue */
434 skb_queue_purge(&tfile->sk.sk_receive_queue);
435 tun_set_real_num_queues(tun);
dd38bd85 436 } else if (tfile->detached && clean) {
4008e97f 437 tun = tun_enable_queue(tfile);
dd38bd85
JW
438 sock_put(&tfile->sk);
439 }
c8d68e6b
JW
440
441 if (clean) {
af668b3c
MT
442 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
443 netif_carrier_off(tun->dev);
444
445 if (!(tun->flags & TUN_PERSIST) &&
446 tun->dev->reg_state == NETREG_REGISTERED)
4008e97f 447 unregister_netdevice(tun->dev);
af668b3c 448 }
4008e97f 449
c8d68e6b
JW
450 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
451 &tfile->socket.flags));
452 sk_release_kernel(&tfile->sk);
453 }
454}
455
456static void tun_detach(struct tun_file *tfile, bool clean)
457{
458 rtnl_lock();
459 __tun_detach(tfile, clean);
460 rtnl_unlock();
461}
462
463static void tun_detach_all(struct net_device *dev)
464{
465 struct tun_struct *tun = netdev_priv(dev);
4008e97f 466 struct tun_file *tfile, *tmp;
c8d68e6b
JW
467 int i, n = tun->numqueues;
468
469 for (i = 0; i < n; i++) {
b8deabd3 470 tfile = rtnl_dereference(tun->tfiles[i]);
c8d68e6b
JW
471 BUG_ON(!tfile);
472 wake_up_all(&tfile->wq.wait);
473 rcu_assign_pointer(tfile->tun, NULL);
474 --tun->numqueues;
475 }
9e85722d
JW
476 list_for_each_entry(tfile, &tun->disabled, next) {
477 wake_up_all(&tfile->wq.wait);
478 rcu_assign_pointer(tfile->tun, NULL);
479 }
c8d68e6b
JW
480 BUG_ON(tun->numqueues != 0);
481
482 synchronize_net();
483 for (i = 0; i < n; i++) {
b8deabd3 484 tfile = rtnl_dereference(tun->tfiles[i]);
c8d68e6b
JW
485 /* Drop read queue */
486 skb_queue_purge(&tfile->sk.sk_receive_queue);
487 sock_put(&tfile->sk);
488 }
4008e97f
JW
489 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
490 tun_enable_queue(tfile);
491 skb_queue_purge(&tfile->sk.sk_receive_queue);
492 sock_put(&tfile->sk);
493 }
494 BUG_ON(tun->numdisabled != 0);
dd38bd85
JW
495
496 if (tun->flags & TUN_PERSIST)
497 module_put(THIS_MODULE);
c8d68e6b
JW
498}
499
a7385ba2
EB
500static int tun_attach(struct tun_struct *tun, struct file *file)
501{
631ab46b 502 struct tun_file *tfile = file->private_data;
38231b7a 503 int err;
a7385ba2 504
5dbbaf2d
PM
505 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
506 if (err < 0)
507 goto out;
508
38231b7a 509 err = -EINVAL;
9e85722d 510 if (rtnl_dereference(tfile->tun) && !tfile->detached)
38231b7a
EB
511 goto out;
512
513 err = -EBUSY;
c8d68e6b
JW
514 if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
515 goto out;
516
517 err = -E2BIG;
4008e97f
JW
518 if (!tfile->detached &&
519 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
38231b7a
EB
520 goto out;
521
522 err = 0;
54f968d6 523
c8d68e6b 524 /* Re-attach the filter to presist device */
54f968d6
JW
525 if (tun->filter_attached == true) {
526 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
527 if (!err)
528 goto out;
529 }
c8d68e6b 530 tfile->queue_index = tun->numqueues;
6e914fc7 531 rcu_assign_pointer(tfile->tun, tun);
c8d68e6b 532 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
c8d68e6b 533 tun->numqueues++;
a7385ba2 534
4008e97f
JW
535 if (tfile->detached)
536 tun_enable_queue(tfile);
537 else
538 sock_hold(&tfile->sk);
539
c8d68e6b 540 tun_set_real_num_queues(tun);
a7385ba2 541
c8d68e6b
JW
542 /* device is allowed to go away first, so no need to hold extra
543 * refcnt.
544 */
545
546out:
547 return err;
631ab46b
EB
548}
549
550static struct tun_struct *__tun_get(struct tun_file *tfile)
551{
6e914fc7 552 struct tun_struct *tun;
c70f1829 553
6e914fc7
JW
554 rcu_read_lock();
555 tun = rcu_dereference(tfile->tun);
556 if (tun)
557 dev_hold(tun->dev);
558 rcu_read_unlock();
c70f1829
EB
559
560 return tun;
631ab46b
EB
561}
562
563static struct tun_struct *tun_get(struct file *file)
564{
565 return __tun_get(file->private_data);
566}
567
568static void tun_put(struct tun_struct *tun)
569{
6e914fc7 570 dev_put(tun->dev);
631ab46b
EB
571}
572
6b8a66ee 573/* TAP filtering */
f271b2cc
MK
574static void addr_hash_set(u32 *mask, const u8 *addr)
575{
576 int n = ether_crc(ETH_ALEN, addr) >> 26;
577 mask[n >> 5] |= (1 << (n & 31));
578}
579
580static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
581{
582 int n = ether_crc(ETH_ALEN, addr) >> 26;
583 return mask[n >> 5] & (1 << (n & 31));
584}
585
586static int update_filter(struct tap_filter *filter, void __user *arg)
587{
588 struct { u8 u[ETH_ALEN]; } *addr;
589 struct tun_filter uf;
590 int err, alen, n, nexact;
591
592 if (copy_from_user(&uf, arg, sizeof(uf)))
593 return -EFAULT;
594
595 if (!uf.count) {
596 /* Disabled */
597 filter->count = 0;
598 return 0;
599 }
600
601 alen = ETH_ALEN * uf.count;
602 addr = kmalloc(alen, GFP_KERNEL);
603 if (!addr)
604 return -ENOMEM;
605
606 if (copy_from_user(addr, arg + sizeof(uf), alen)) {
607 err = -EFAULT;
608 goto done;
609 }
610
611 /* The filter is updated without holding any locks. Which is
612 * perfectly safe. We disable it first and in the worst
613 * case we'll accept a few undesired packets. */
614 filter->count = 0;
615 wmb();
616
617 /* Use first set of addresses as an exact filter */
618 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
619 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
620
621 nexact = n;
622
cfbf84fc
AW
623 /* Remaining multicast addresses are hashed,
624 * unicast will leave the filter disabled. */
f271b2cc 625 memset(filter->mask, 0, sizeof(filter->mask));
cfbf84fc
AW
626 for (; n < uf.count; n++) {
627 if (!is_multicast_ether_addr(addr[n].u)) {
628 err = 0; /* no filter */
629 goto done;
630 }
f271b2cc 631 addr_hash_set(filter->mask, addr[n].u);
cfbf84fc 632 }
f271b2cc
MK
633
634 /* For ALLMULTI just set the mask to all ones.
635 * This overrides the mask populated above. */
636 if ((uf.flags & TUN_FLT_ALLMULTI))
637 memset(filter->mask, ~0, sizeof(filter->mask));
638
639 /* Now enable the filter */
640 wmb();
641 filter->count = nexact;
642
643 /* Return the number of exact filters */
644 err = nexact;
645
646done:
647 kfree(addr);
648 return err;
649}
650
651/* Returns: 0 - drop, !=0 - accept */
652static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
653{
654 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
655 * at this point. */
656 struct ethhdr *eh = (struct ethhdr *) skb->data;
657 int i;
658
659 /* Exact match */
660 for (i = 0; i < filter->count; i++)
2e42e474 661 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
f271b2cc
MK
662 return 1;
663
664 /* Inexact match (multicast only) */
665 if (is_multicast_ether_addr(eh->h_dest))
666 return addr_hash_test(filter->mask, eh->h_dest);
667
668 return 0;
669}
670
671/*
672 * Checks whether the packet is accepted or not.
673 * Returns: 0 - drop, !=0 - accept
674 */
675static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
676{
677 if (!filter->count)
678 return 1;
679
680 return run_filter(filter, skb);
681}
682
1da177e4
LT
683/* Network device part of the driver */
684
7282d491 685static const struct ethtool_ops tun_ethtool_ops;
1da177e4 686
c70f1829
EB
687/* Net device detach from fd. */
688static void tun_net_uninit(struct net_device *dev)
689{
c8d68e6b 690 tun_detach_all(dev);
c70f1829
EB
691}
692
1da177e4
LT
693/* Net device open. */
694static int tun_net_open(struct net_device *dev)
695{
c8d68e6b 696 netif_tx_start_all_queues(dev);
1da177e4
LT
697 return 0;
698}
699
700/* Net device close. */
701static int tun_net_close(struct net_device *dev)
702{
c8d68e6b 703 netif_tx_stop_all_queues(dev);
1da177e4
LT
704 return 0;
705}
706
707/* Net device start xmit */
424efe9c 708static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
709{
710 struct tun_struct *tun = netdev_priv(dev);
c8d68e6b 711 int txq = skb->queue_mapping;
6e914fc7 712 struct tun_file *tfile;
1da177e4 713
6e914fc7 714 rcu_read_lock();
c8d68e6b
JW
715 tfile = rcu_dereference(tun->tfiles[txq]);
716
1da177e4 717 /* Drop packet if interface is not attached */
c8d68e6b 718 if (txq >= tun->numqueues)
1da177e4
LT
719 goto drop;
720
6e914fc7
JW
721 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
722
c8d68e6b
JW
723 BUG_ON(!tfile);
724
f271b2cc
MK
725 /* Drop if the filter does not like it.
726 * This is a noop if the filter is disabled.
727 * Filter can be enabled only for the TAP devices. */
728 if (!check_filter(&tun->txflt, skb))
729 goto drop;
730
54f968d6
JW
731 if (tfile->socket.sk->sk_filter &&
732 sk_filter(tfile->socket.sk, skb))
99405162
MT
733 goto drop;
734
36fe8c09 735 /* Limit the number of packets queued by dividing txq length with the
c8d68e6b
JW
736 * number of queues.
737 */
54f968d6 738 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
5d097109
MT
739 >= dev->tx_queue_len / tun->numqueues)
740 goto drop;
1da177e4 741
0110d6f2
MT
742 /* Orphan the skb - required as we might hang on to it
743 * for indefinite time. */
868eefeb
MT
744 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
745 goto drop;
0110d6f2
MT
746 skb_orphan(skb);
747
f8af75f3
ED
748 nf_reset(skb);
749
f271b2cc 750 /* Enqueue packet */
54f968d6 751 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
1da177e4
LT
752
753 /* Notify and wake up reader process */
54f968d6
JW
754 if (tfile->flags & TUN_FASYNC)
755 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
756 wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
05c2828c 757 POLLRDNORM | POLLRDBAND);
6e914fc7
JW
758
759 rcu_read_unlock();
6ed10654 760 return NETDEV_TX_OK;
1da177e4
LT
761
762drop:
09f75cd7 763 dev->stats.tx_dropped++;
149d36f7 764 skb_tx_error(skb);
1da177e4 765 kfree_skb(skb);
6e914fc7 766 rcu_read_unlock();
6ed10654 767 return NETDEV_TX_OK;
1da177e4
LT
768}
769
f271b2cc 770static void tun_net_mclist(struct net_device *dev)
1da177e4 771{
f271b2cc
MK
772 /*
773 * This callback is supposed to deal with mc filter in
774 * _rx_ path and has nothing to do with the _tx_ path.
775 * In rx path we always accept everything userspace gives us.
776 */
1da177e4
LT
777}
778
4885a504
ES
779#define MIN_MTU 68
780#define MAX_MTU 65535
781
782static int
783tun_net_change_mtu(struct net_device *dev, int new_mtu)
784{
785 if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
786 return -EINVAL;
787 dev->mtu = new_mtu;
788 return 0;
789}
790
c8f44aff
MM
791static netdev_features_t tun_net_fix_features(struct net_device *dev,
792 netdev_features_t features)
88255375
MM
793{
794 struct tun_struct *tun = netdev_priv(dev);
795
796 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
797}
bebd097a
NH
798#ifdef CONFIG_NET_POLL_CONTROLLER
799static void tun_poll_controller(struct net_device *dev)
800{
801 /*
802 * Tun only receives frames when:
803 * 1) the char device endpoint gets data from user space
804 * 2) the tun socket gets a sendmsg call from user space
805 * Since both of those are syncronous operations, we are guaranteed
806 * never to have pending data when we poll for it
807 * so theres nothing to do here but return.
808 * We need this though so netpoll recognizes us as an interface that
809 * supports polling, which enables bridge devices in virt setups to
810 * still use netconsole
811 */
812 return;
813}
814#endif
758e43b7 815static const struct net_device_ops tun_netdev_ops = {
c70f1829 816 .ndo_uninit = tun_net_uninit,
758e43b7
SH
817 .ndo_open = tun_net_open,
818 .ndo_stop = tun_net_close,
00829823 819 .ndo_start_xmit = tun_net_xmit,
758e43b7 820 .ndo_change_mtu = tun_net_change_mtu,
88255375 821 .ndo_fix_features = tun_net_fix_features,
c8d68e6b 822 .ndo_select_queue = tun_select_queue,
bebd097a
NH
823#ifdef CONFIG_NET_POLL_CONTROLLER
824 .ndo_poll_controller = tun_poll_controller,
825#endif
758e43b7
SH
826};
827
828static const struct net_device_ops tap_netdev_ops = {
c70f1829 829 .ndo_uninit = tun_net_uninit,
758e43b7
SH
830 .ndo_open = tun_net_open,
831 .ndo_stop = tun_net_close,
00829823 832 .ndo_start_xmit = tun_net_xmit,
758e43b7 833 .ndo_change_mtu = tun_net_change_mtu,
88255375 834 .ndo_fix_features = tun_net_fix_features,
afc4b13d 835 .ndo_set_rx_mode = tun_net_mclist,
758e43b7
SH
836 .ndo_set_mac_address = eth_mac_addr,
837 .ndo_validate_addr = eth_validate_addr,
c8d68e6b 838 .ndo_select_queue = tun_select_queue,
bebd097a
NH
839#ifdef CONFIG_NET_POLL_CONTROLLER
840 .ndo_poll_controller = tun_poll_controller,
841#endif
758e43b7
SH
842};
843
96442e42
JW
844static int tun_flow_init(struct tun_struct *tun)
845{
846 int i;
847
96442e42
JW
848 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
849 INIT_HLIST_HEAD(&tun->flows[i]);
850
851 tun->ageing_time = TUN_FLOW_EXPIRE;
852 setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
853 mod_timer(&tun->flow_gc_timer,
854 round_jiffies_up(jiffies + tun->ageing_time));
855
856 return 0;
857}
858
859static void tun_flow_uninit(struct tun_struct *tun)
860{
861 del_timer_sync(&tun->flow_gc_timer);
862 tun_flow_flush(tun);
96442e42
JW
863}
864
1da177e4
LT
865/* Initialize net device. */
866static void tun_net_init(struct net_device *dev)
867{
868 struct tun_struct *tun = netdev_priv(dev);
6aa20a22 869
1da177e4
LT
870 switch (tun->flags & TUN_TYPE_MASK) {
871 case TUN_TUN_DEV:
758e43b7
SH
872 dev->netdev_ops = &tun_netdev_ops;
873
1da177e4
LT
874 /* Point-to-Point TUN Device */
875 dev->hard_header_len = 0;
876 dev->addr_len = 0;
877 dev->mtu = 1500;
878
879 /* Zero header length */
6aa20a22 880 dev->type = ARPHRD_NONE;
1da177e4
LT
881 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
882 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
883 break;
884
885 case TUN_TAP_DEV:
7a0a9608 886 dev->netdev_ops = &tap_netdev_ops;
1da177e4 887 /* Ethernet TAP Device */
1da177e4 888 ether_setup(dev);
550fd08c 889 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
a676847b 890 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
36226a8d 891
f2cedb63 892 eth_hw_addr_random(dev);
36226a8d 893
1da177e4
LT
894 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
895 break;
896 }
897}
898
899/* Character device part */
900
901/* Poll */
c8d68e6b 902static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
6aa20a22 903{
b2430de3
EB
904 struct tun_file *tfile = file->private_data;
905 struct tun_struct *tun = __tun_get(tfile);
3c8a9c63 906 struct sock *sk;
33dccbb0 907 unsigned int mask = 0;
1da177e4
LT
908
909 if (!tun)
eac9e902 910 return POLLERR;
1da177e4 911
54f968d6 912 sk = tfile->socket.sk;
3c8a9c63 913
6b8a66ee 914 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1da177e4 915
54f968d6 916 poll_wait(file, &tfile->wq.wait, wait);
6aa20a22 917
89f56d1e 918 if (!skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
919 mask |= POLLIN | POLLRDNORM;
920
33dccbb0
HX
921 if (sock_writeable(sk) ||
922 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
923 sock_writeable(sk)))
924 mask |= POLLOUT | POLLWRNORM;
925
c70f1829
EB
926 if (tun->dev->reg_state != NETREG_REGISTERED)
927 mask = POLLERR;
928
631ab46b 929 tun_put(tun);
1da177e4
LT
930 return mask;
931}
932
f42157cb
RR
933/* prepad is the amount to reserve at front. len is length after that.
934 * linear is a hint as to how much to copy (usually headers). */
54f968d6 935static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
6f7c156c 936 size_t prepad, size_t len,
937 size_t linear, int noblock)
f42157cb 938{
54f968d6 939 struct sock *sk = tfile->socket.sk;
f42157cb 940 struct sk_buff *skb;
33dccbb0 941 int err;
f42157cb
RR
942
943 /* Under a page? Don't bother with paged skb. */
0eca93bc 944 if (prepad + len < PAGE_SIZE || !linear)
33dccbb0 945 linear = len;
f42157cb 946
33dccbb0
HX
947 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
948 &err);
f42157cb 949 if (!skb)
33dccbb0 950 return ERR_PTR(err);
f42157cb
RR
951
952 skb_reserve(skb, prepad);
953 skb_put(skb, linear);
33dccbb0
HX
954 skb->data_len = len - linear;
955 skb->len += len - linear;
f42157cb
RR
956
957 return skb;
958}
959
0690899b
MT
960/* set skb frags from iovec, this can move to core network code for reuse */
961static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
962 int offset, size_t count)
963{
964 int len = iov_length(from, count) - offset;
965 int copy = skb_headlen(skb);
966 int size, offset1 = 0;
967 int i = 0;
968
969 /* Skip over from offset */
970 while (count && (offset >= from->iov_len)) {
971 offset -= from->iov_len;
972 ++from;
973 --count;
974 }
975
976 /* copy up to skb headlen */
977 while (count && (copy > 0)) {
978 size = min_t(unsigned int, copy, from->iov_len - offset);
979 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
980 size))
981 return -EFAULT;
982 if (copy > size) {
983 ++from;
984 --count;
985 offset = 0;
986 } else
987 offset += size;
988 copy -= size;
989 offset1 += size;
990 }
991
992 if (len == offset1)
993 return 0;
994
995 while (count--) {
996 struct page *page[MAX_SKB_FRAGS];
997 int num_pages;
998 unsigned long base;
999 unsigned long truesize;
1000
1001 len = from->iov_len - offset;
1002 if (!len) {
1003 offset = 0;
1004 ++from;
1005 continue;
1006 }
1007 base = (unsigned long)from->iov_base + offset;
1008 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1009 if (i + size > MAX_SKB_FRAGS)
1010 return -EMSGSIZE;
1011 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
1012 if (num_pages != size) {
7e24bfbe
MT
1013 int j;
1014
1015 for (j = 0; j < num_pages; j++)
1016 put_page(page[i + j]);
0690899b
MT
1017 return -EFAULT;
1018 }
1019 truesize = size * PAGE_SIZE;
1020 skb->data_len += len;
1021 skb->len += len;
1022 skb->truesize += truesize;
1023 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
1024 while (len) {
1025 int off = base & ~PAGE_MASK;
1026 int size = min_t(int, len, PAGE_SIZE - off);
1027 __skb_fill_page_desc(skb, i, page[i], off, size);
1028 skb_shinfo(skb)->nr_frags++;
1029 /* increase sk_wmem_alloc */
1030 base += size;
1031 len -= size;
1032 i++;
1033 }
1034 offset = 0;
1035 ++from;
1036 }
1037 return 0;
1038}
1039
05464d21
JW
1040static unsigned long iov_pages(const struct iovec *iv, int offset,
1041 unsigned long nr_segs)
1042{
1043 unsigned long seg, base;
1044 int pages = 0, len, size;
1045
1046 while (nr_segs && (offset >= iv->iov_len)) {
1047 offset -= iv->iov_len;
1048 ++iv;
1049 --nr_segs;
1050 }
1051
1052 for (seg = 0; seg < nr_segs; seg++) {
1053 base = (unsigned long)iv[seg].iov_base + offset;
1054 len = iv[seg].iov_len - offset;
1055 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1056 pages += size;
1057 offset = 0;
1058 }
1059
1060 return pages;
1061}
1062
1da177e4 1063/* Get packet from user space buffer */
54f968d6
JW
1064static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1065 void *msg_control, const struct iovec *iv,
1066 size_t total_len, size_t count, int noblock)
1da177e4 1067{
09640e63 1068 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1da177e4 1069 struct sk_buff *skb;
d09ec76a 1070 size_t len = total_len, align = NET_SKB_PAD, linear;
f43798c2 1071 struct virtio_net_hdr gso = { 0 };
6f26c9a7 1072 int offset = 0;
0690899b
MT
1073 int copylen;
1074 bool zerocopy = false;
1075 int err;
49974420 1076 u32 rxhash;
1da177e4
LT
1077
1078 if (!(tun->flags & TUN_NO_PI)) {
b59bde78 1079 if (len < sizeof(pi))
1da177e4 1080 return -EINVAL;
b59bde78 1081 len -= sizeof(pi);
1da177e4 1082
6f26c9a7 1083 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
1da177e4 1084 return -EFAULT;
6f26c9a7 1085 offset += sizeof(pi);
1da177e4
LT
1086 }
1087
f43798c2 1088 if (tun->flags & TUN_VNET_HDR) {
b59bde78 1089 if (len < tun->vnet_hdr_sz)
f43798c2 1090 return -EINVAL;
b59bde78 1091 len -= tun->vnet_hdr_sz;
f43798c2 1092
6f26c9a7 1093 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
f43798c2
RR
1094 return -EFAULT;
1095
4909122f
HX
1096 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1097 gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
1098 gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
1099
f43798c2
RR
1100 if (gso.hdr_len > len)
1101 return -EINVAL;
d9d52b51 1102 offset += tun->vnet_hdr_sz;
f43798c2
RR
1103 }
1104
e01bf1c8 1105 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
a504b86e 1106 align += NET_IP_ALIGN;
0eca93bc
HX
1107 if (unlikely(len < ETH_HLEN ||
1108 (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
e01bf1c8
RR
1109 return -EINVAL;
1110 }
6aa20a22 1111
05464d21
JW
1112 if (msg_control) {
1113 /* There are 256 bytes to be copied in skb, so there is
1114 * enough room for skb expand head in case it is used.
0690899b
MT
1115 * The rest of the buffer is mapped from userspace.
1116 */
05464d21 1117 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
d09ec76a 1118 linear = copylen;
05464d21
JW
1119 if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1120 zerocopy = true;
1121 }
1122
1123 if (!zerocopy) {
0690899b 1124 copylen = len;
d09ec76a
JW
1125 linear = gso.hdr_len;
1126 }
0690899b 1127
d09ec76a 1128 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
33dccbb0
HX
1129 if (IS_ERR(skb)) {
1130 if (PTR_ERR(skb) != -EAGAIN)
1131 tun->dev->stats.rx_dropped++;
1132 return PTR_ERR(skb);
1da177e4
LT
1133 }
1134
0690899b
MT
1135 if (zerocopy)
1136 err = zerocopy_sg_from_iovec(skb, iv, offset, count);
05464d21 1137 else {
0690899b 1138 err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
05464d21
JW
1139 if (!err && msg_control) {
1140 struct ubuf_info *uarg = msg_control;
1141 uarg->callback(uarg, false);
1142 }
1143 }
0690899b
MT
1144
1145 if (err) {
09f75cd7 1146 tun->dev->stats.rx_dropped++;
8f22757e 1147 kfree_skb(skb);
1da177e4 1148 return -EFAULT;
8f22757e 1149 }
1da177e4 1150
f43798c2
RR
1151 if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1152 if (!skb_partial_csum_set(skb, gso.csum_start,
1153 gso.csum_offset)) {
1154 tun->dev->stats.rx_frame_errors++;
1155 kfree_skb(skb);
1156 return -EINVAL;
1157 }
88255375 1158 }
f43798c2 1159
1da177e4
LT
1160 switch (tun->flags & TUN_TYPE_MASK) {
1161 case TUN_TUN_DEV:
f09f7ee2
AWC
1162 if (tun->flags & TUN_NO_PI) {
1163 switch (skb->data[0] & 0xf0) {
1164 case 0x40:
1165 pi.proto = htons(ETH_P_IP);
1166 break;
1167 case 0x60:
1168 pi.proto = htons(ETH_P_IPV6);
1169 break;
1170 default:
1171 tun->dev->stats.rx_dropped++;
1172 kfree_skb(skb);
1173 return -EINVAL;
1174 }
1175 }
1176
459a98ed 1177 skb_reset_mac_header(skb);
1da177e4 1178 skb->protocol = pi.proto;
4c13eb66 1179 skb->dev = tun->dev;
1da177e4
LT
1180 break;
1181 case TUN_TAP_DEV:
1182 skb->protocol = eth_type_trans(skb, tun->dev);
1183 break;
6403eab1 1184 }
1da177e4 1185
f43798c2
RR
1186 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1187 pr_debug("GSO!\n");
1188 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1189 case VIRTIO_NET_HDR_GSO_TCPV4:
c9af6db4 1190 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
f43798c2
RR
1191 break;
1192 case VIRTIO_NET_HDR_GSO_TCPV6:
c9af6db4 1193 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
f43798c2 1194 break;
e36aa25a 1195 case VIRTIO_NET_HDR_GSO_UDP:
c9af6db4 1196 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
e36aa25a 1197 break;
f43798c2
RR
1198 default:
1199 tun->dev->stats.rx_frame_errors++;
1200 kfree_skb(skb);
1201 return -EINVAL;
1202 }
1203
1204 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
c9af6db4 1205 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
f43798c2
RR
1206
1207 skb_shinfo(skb)->gso_size = gso.gso_size;
1208 if (skb_shinfo(skb)->gso_size == 0) {
1209 tun->dev->stats.rx_frame_errors++;
1210 kfree_skb(skb);
1211 return -EINVAL;
1212 }
1213
1214 /* Header must be checked, and gso_segs computed. */
1215 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1216 skb_shinfo(skb)->gso_segs = 0;
1217 }
6aa20a22 1218
0690899b
MT
1219 /* copy skb_ubuf_info for callback when skb has no error */
1220 if (zerocopy) {
1221 skb_shinfo(skb)->destructor_arg = msg_control;
1222 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
c9af6db4 1223 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
0690899b
MT
1224 }
1225
76fe4581 1226 skb_reset_network_header(skb);
40893fd0 1227 skb_probe_transport_header(skb, 0);
38502af7 1228
49974420 1229 rxhash = skb_get_rxhash(skb);
1da177e4 1230 netif_rx_ni(skb);
6aa20a22 1231
09f75cd7
JG
1232 tun->dev->stats.rx_packets++;
1233 tun->dev->stats.rx_bytes += len;
1da177e4 1234
9e85722d 1235 tun_flow_update(tun, rxhash, tfile);
0690899b 1236 return total_len;
6aa20a22 1237}
1da177e4 1238
ee0b3e67
BP
1239static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
1240 unsigned long count, loff_t pos)
1da177e4 1241{
33dccbb0 1242 struct file *file = iocb->ki_filp;
ab46d779 1243 struct tun_struct *tun = tun_get(file);
54f968d6 1244 struct tun_file *tfile = file->private_data;
631ab46b 1245 ssize_t result;
1da177e4
LT
1246
1247 if (!tun)
1248 return -EBADFD;
1249
6b8a66ee 1250 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
1da177e4 1251
54f968d6
JW
1252 result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
1253 count, file->f_flags & O_NONBLOCK);
631ab46b
EB
1254
1255 tun_put(tun);
1256 return result;
1da177e4
LT
1257}
1258
1da177e4 1259/* Put packet to the user space buffer */
6f7c156c 1260static ssize_t tun_put_user(struct tun_struct *tun,
54f968d6 1261 struct tun_file *tfile,
6f7c156c 1262 struct sk_buff *skb,
1263 const struct iovec *iv, int len)
1da177e4
LT
1264{
1265 struct tun_pi pi = { 0, skb->protocol };
1266 ssize_t total = 0;
1267
1268 if (!(tun->flags & TUN_NO_PI)) {
1269 if ((len -= sizeof(pi)) < 0)
1270 return -EINVAL;
1271
1272 if (len < skb->len) {
1273 /* Packet will be striped */
1274 pi.flags |= TUN_PKT_STRIP;
1275 }
6aa20a22 1276
43b39dcd 1277 if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
1da177e4
LT
1278 return -EFAULT;
1279 total += sizeof(pi);
6aa20a22 1280 }
1da177e4 1281
f43798c2
RR
1282 if (tun->flags & TUN_VNET_HDR) {
1283 struct virtio_net_hdr gso = { 0 }; /* no info leak */
d9d52b51 1284 if ((len -= tun->vnet_hdr_sz) < 0)
f43798c2
RR
1285 return -EINVAL;
1286
1287 if (skb_is_gso(skb)) {
1288 struct skb_shared_info *sinfo = skb_shinfo(skb);
1289
1290 /* This is a hint as to how much should be linear. */
1291 gso.hdr_len = skb_headlen(skb);
1292 gso.gso_size = sinfo->gso_size;
1293 if (sinfo->gso_type & SKB_GSO_TCPV4)
1294 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1295 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1296 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
e36aa25a
SS
1297 else if (sinfo->gso_type & SKB_GSO_UDP)
1298 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
ef3db4a5 1299 else {
6b8a66ee 1300 pr_err("unexpected GSO type: "
ef3db4a5
MT
1301 "0x%x, gso_size %d, hdr_len %d\n",
1302 sinfo->gso_type, gso.gso_size,
1303 gso.hdr_len);
1304 print_hex_dump(KERN_ERR, "tun: ",
1305 DUMP_PREFIX_NONE,
1306 16, 1, skb->head,
1307 min((int)gso.hdr_len, 64), true);
1308 WARN_ON_ONCE(1);
1309 return -EINVAL;
1310 }
f43798c2
RR
1311 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1312 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1313 } else
1314 gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1315
1316 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1317 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
55508d60 1318 gso.csum_start = skb_checksum_start_offset(skb);
f43798c2 1319 gso.csum_offset = skb->csum_offset;
10a8d94a
JW
1320 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1321 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
f43798c2
RR
1322 } /* else everything is zero */
1323
43b39dcd
MT
1324 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1325 sizeof(gso))))
f43798c2 1326 return -EFAULT;
d9d52b51 1327 total += tun->vnet_hdr_sz;
f43798c2
RR
1328 }
1329
1da177e4
LT
1330 len = min_t(int, skb->len, len);
1331
43b39dcd 1332 skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
05c2828c 1333 total += skb->len;
1da177e4 1334
09f75cd7
JG
1335 tun->dev->stats.tx_packets++;
1336 tun->dev->stats.tx_bytes += len;
1da177e4
LT
1337
1338 return total;
1339}
1340
54f968d6 1341static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
05c2828c
MT
1342 struct kiocb *iocb, const struct iovec *iv,
1343 ssize_t len, int noblock)
1da177e4 1344{
1da177e4
LT
1345 DECLARE_WAITQUEUE(wait, current);
1346 struct sk_buff *skb;
05c2828c 1347 ssize_t ret = 0;
1da177e4 1348
3872baf6 1349 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1da177e4 1350
61a5ff15 1351 if (unlikely(!noblock))
54f968d6 1352 add_wait_queue(&tfile->wq.wait, &wait);
1da177e4 1353 while (len) {
1da177e4
LT
1354 current->state = TASK_INTERRUPTIBLE;
1355
1356 /* Read frames from the queue */
54f968d6 1357 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
05c2828c 1358 if (noblock) {
1da177e4
LT
1359 ret = -EAGAIN;
1360 break;
1361 }
1362 if (signal_pending(current)) {
1363 ret = -ERESTARTSYS;
1364 break;
1365 }
c70f1829
EB
1366 if (tun->dev->reg_state != NETREG_REGISTERED) {
1367 ret = -EIO;
1368 break;
1369 }
1da177e4
LT
1370
1371 /* Nothing to read, let's sleep */
1372 schedule();
1373 continue;
1374 }
1da177e4 1375
54f968d6 1376 ret = tun_put_user(tun, tfile, skb, iv, len);
f271b2cc
MK
1377 kfree_skb(skb);
1378 break;
1da177e4
LT
1379 }
1380
1381 current->state = TASK_RUNNING;
61a5ff15 1382 if (unlikely(!noblock))
54f968d6 1383 remove_wait_queue(&tfile->wq.wait, &wait);
1da177e4 1384
05c2828c
MT
1385 return ret;
1386}
1387
1388static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1389 unsigned long count, loff_t pos)
1390{
1391 struct file *file = iocb->ki_filp;
1392 struct tun_file *tfile = file->private_data;
1393 struct tun_struct *tun = __tun_get(tfile);
1394 ssize_t len, ret;
1395
1396 if (!tun)
1397 return -EBADFD;
1398 len = iov_length(iv, count);
1399 if (len < 0) {
1400 ret = -EINVAL;
1401 goto out;
1402 }
1403
54f968d6
JW
1404 ret = tun_do_read(tun, tfile, iocb, iv, len,
1405 file->f_flags & O_NONBLOCK);
05c2828c 1406 ret = min_t(ssize_t, ret, len);
631ab46b
EB
1407out:
1408 tun_put(tun);
1da177e4
LT
1409 return ret;
1410}
1411
96442e42
JW
1412static void tun_free_netdev(struct net_device *dev)
1413{
1414 struct tun_struct *tun = netdev_priv(dev);
1415
4008e97f 1416 BUG_ON(!(list_empty(&tun->disabled)));
96442e42 1417 tun_flow_uninit(tun);
5dbbaf2d 1418 security_tun_dev_free_security(tun->security);
96442e42
JW
1419 free_netdev(dev);
1420}
1421
1da177e4
LT
1422static void tun_setup(struct net_device *dev)
1423{
1424 struct tun_struct *tun = netdev_priv(dev);
1425
0625c883
EB
1426 tun->owner = INVALID_UID;
1427 tun->group = INVALID_GID;
1da177e4 1428
1da177e4 1429 dev->ethtool_ops = &tun_ethtool_ops;
96442e42 1430 dev->destructor = tun_free_netdev;
1da177e4
LT
1431}
1432
f019a7a5
EB
1433/* Trivial set of netlink ops to allow deleting tun or tap
1434 * device with netlink.
1435 */
1436static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1437{
1438 return -EINVAL;
1439}
1440
1441static struct rtnl_link_ops tun_link_ops __read_mostly = {
1442 .kind = DRV_NAME,
1443 .priv_size = sizeof(struct tun_struct),
1444 .setup = tun_setup,
1445 .validate = tun_validate,
1446};
1447
33dccbb0
HX
1448static void tun_sock_write_space(struct sock *sk)
1449{
54f968d6 1450 struct tun_file *tfile;
43815482 1451 wait_queue_head_t *wqueue;
33dccbb0
HX
1452
1453 if (!sock_writeable(sk))
1454 return;
1455
33dccbb0
HX
1456 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
1457 return;
1458
43815482
ED
1459 wqueue = sk_sleep(sk);
1460 if (wqueue && waitqueue_active(wqueue))
1461 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
05c2828c 1462 POLLWRNORM | POLLWRBAND);
c722c625 1463
54f968d6
JW
1464 tfile = container_of(sk, struct tun_file, sk);
1465 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
33dccbb0
HX
1466}
1467
05c2828c
MT
1468static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1469 struct msghdr *m, size_t total_len)
1470{
54f968d6
JW
1471 int ret;
1472 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1473 struct tun_struct *tun = __tun_get(tfile);
1474
1475 if (!tun)
1476 return -EBADFD;
54f968d6
JW
1477 ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
1478 m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
1479 tun_put(tun);
1480 return ret;
05c2828c
MT
1481}
1482
54f968d6 1483
05c2828c
MT
1484static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1485 struct msghdr *m, size_t total_len,
1486 int flags)
1487{
54f968d6
JW
1488 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1489 struct tun_struct *tun = __tun_get(tfile);
05c2828c 1490 int ret;
54f968d6
JW
1491
1492 if (!tun)
1493 return -EBADFD;
1494
3811ae76
G
1495 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1496 ret = -EINVAL;
1497 goto out;
1498 }
54f968d6 1499 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
05c2828c
MT
1500 flags & MSG_DONTWAIT);
1501 if (ret > total_len) {
1502 m->msg_flags |= MSG_TRUNC;
1503 ret = flags & MSG_TRUNC ? ret : total_len;
1504 }
3811ae76 1505out:
54f968d6 1506 tun_put(tun);
05c2828c
MT
1507 return ret;
1508}
1509
1ab5ecb9
SK
1510static int tun_release(struct socket *sock)
1511{
1512 if (sock->sk)
1513 sock_put(sock->sk);
1514 return 0;
1515}
1516
05c2828c
MT
1517/* Ops structure to mimic raw sockets with tun */
1518static const struct proto_ops tun_socket_ops = {
1519 .sendmsg = tun_sendmsg,
1520 .recvmsg = tun_recvmsg,
1ab5ecb9 1521 .release = tun_release,
05c2828c
MT
1522};
1523
33dccbb0
HX
1524static struct proto tun_proto = {
1525 .name = "tun",
1526 .owner = THIS_MODULE,
54f968d6 1527 .obj_size = sizeof(struct tun_file),
33dccbb0 1528};
f019a7a5 1529
980c9e8c
DW
1530static int tun_flags(struct tun_struct *tun)
1531{
1532 int flags = 0;
1533
1534 if (tun->flags & TUN_TUN_DEV)
1535 flags |= IFF_TUN;
1536 else
1537 flags |= IFF_TAP;
1538
1539 if (tun->flags & TUN_NO_PI)
1540 flags |= IFF_NO_PI;
1541
5d097109
MT
1542 /* This flag has no real effect. We track the value for backwards
1543 * compatibility.
1544 */
980c9e8c
DW
1545 if (tun->flags & TUN_ONE_QUEUE)
1546 flags |= IFF_ONE_QUEUE;
1547
1548 if (tun->flags & TUN_VNET_HDR)
1549 flags |= IFF_VNET_HDR;
1550
c8d68e6b
JW
1551 if (tun->flags & TUN_TAP_MQ)
1552 flags |= IFF_MULTI_QUEUE;
1553
980c9e8c
DW
1554 return flags;
1555}
1556
1557static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1558 char *buf)
1559{
1560 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1561 return sprintf(buf, "0x%x\n", tun_flags(tun));
1562}
1563
1564static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1565 char *buf)
1566{
1567 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
0625c883
EB
1568 return uid_valid(tun->owner)?
1569 sprintf(buf, "%u\n",
1570 from_kuid_munged(current_user_ns(), tun->owner)):
1571 sprintf(buf, "-1\n");
980c9e8c
DW
1572}
1573
1574static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1575 char *buf)
1576{
1577 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
0625c883
EB
1578 return gid_valid(tun->group) ?
1579 sprintf(buf, "%u\n",
1580 from_kgid_munged(current_user_ns(), tun->group)):
1581 sprintf(buf, "-1\n");
980c9e8c
DW
1582}
1583
1584static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1585static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1586static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1587
d647a591 1588static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1da177e4
LT
1589{
1590 struct tun_struct *tun;
54f968d6 1591 struct tun_file *tfile = file->private_data;
1da177e4
LT
1592 struct net_device *dev;
1593 int err;
1594
7c0c3b1a
JW
1595 if (tfile->detached)
1596 return -EINVAL;
1597
74a3e5a7
EB
1598 dev = __dev_get_by_name(net, ifr->ifr_name);
1599 if (dev) {
f85ba780
DW
1600 if (ifr->ifr_flags & IFF_TUN_EXCL)
1601 return -EBUSY;
74a3e5a7
EB
1602 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1603 tun = netdev_priv(dev);
1604 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1605 tun = netdev_priv(dev);
1606 else
1607 return -EINVAL;
1608
8e6d91ae
JW
1609 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1610 !!(tun->flags & TUN_TAP_MQ))
1611 return -EINVAL;
1612
cde8b15f 1613 if (tun_not_capable(tun))
2b980dbd 1614 return -EPERM;
5dbbaf2d 1615 err = security_tun_dev_open(tun->security);
2b980dbd
PM
1616 if (err < 0)
1617 return err;
1618
a7385ba2
EB
1619 err = tun_attach(tun, file);
1620 if (err < 0)
1621 return err;
4008e97f
JW
1622
1623 if (tun->flags & TUN_TAP_MQ &&
e8dbad66
JW
1624 (tun->numqueues + tun->numdisabled > 1)) {
1625 /* One or more queue has already been attached, no need
1626 * to initialize the device again.
1627 */
1628 return 0;
1629 }
6aa20a22 1630 }
1da177e4
LT
1631 else {
1632 char *name;
1633 unsigned long flags = 0;
edfb6a14
JW
1634 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1635 MAX_TAP_QUEUES : 1;
1da177e4 1636
c260b772 1637 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
ca6bb5d7 1638 return -EPERM;
2b980dbd
PM
1639 err = security_tun_dev_create();
1640 if (err < 0)
1641 return err;
ca6bb5d7 1642
1da177e4
LT
1643 /* Set dev type */
1644 if (ifr->ifr_flags & IFF_TUN) {
1645 /* TUN device */
1646 flags |= TUN_TUN_DEV;
1647 name = "tun%d";
1648 } else if (ifr->ifr_flags & IFF_TAP) {
1649 /* TAP device */
1650 flags |= TUN_TAP_DEV;
1651 name = "tap%d";
6aa20a22 1652 } else
36989b90 1653 return -EINVAL;
6aa20a22 1654
1da177e4
LT
1655 if (*ifr->ifr_name)
1656 name = ifr->ifr_name;
1657
c8d68e6b 1658 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
edfb6a14
JW
1659 tun_setup, queues, queues);
1660
1da177e4
LT
1661 if (!dev)
1662 return -ENOMEM;
1663
fc54c658 1664 dev_net_set(dev, net);
f019a7a5 1665 dev->rtnl_link_ops = &tun_link_ops;
758e43b7 1666
1da177e4
LT
1667 tun = netdev_priv(dev);
1668 tun->dev = dev;
1669 tun->flags = flags;
f271b2cc 1670 tun->txflt.count = 0;
d9d52b51 1671 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
33dccbb0 1672
54f968d6
JW
1673 tun->filter_attached = false;
1674 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
33dccbb0 1675
96442e42
JW
1676 spin_lock_init(&tun->lock);
1677
5dbbaf2d
PM
1678 err = security_tun_dev_alloc_security(&tun->security);
1679 if (err < 0)
1680 goto err_free_dev;
2b980dbd 1681
1da177e4
LT
1682 tun_net_init(dev);
1683
b3943aef
PM
1684 err = tun_flow_init(tun);
1685 if (err < 0)
96442e42
JW
1686 goto err_free_dev;
1687
88255375
MM
1688 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1689 TUN_USER_FEATURES;
1690 dev->features = dev->hw_features;
c0317998 1691 dev->vlan_features = dev->features;
88255375 1692
4008e97f 1693 INIT_LIST_HEAD(&tun->disabled);
eb0fb363
JW
1694 err = tun_attach(tun, file);
1695 if (err < 0)
1696 goto err_free_dev;
1697
1da177e4
LT
1698 err = register_netdevice(tun->dev);
1699 if (err < 0)
54f968d6 1700 goto err_free_dev;
9c3fea6a 1701
980c9e8c
DW
1702 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1703 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1704 device_create_file(&tun->dev->dev, &dev_attr_group))
6b8a66ee 1705 pr_err("Failed to create tun sysfs files\n");
1da177e4
LT
1706 }
1707
af668b3c
MT
1708 netif_carrier_on(tun->dev);
1709
6b8a66ee 1710 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1da177e4
LT
1711
1712 if (ifr->ifr_flags & IFF_NO_PI)
1713 tun->flags |= TUN_NO_PI;
a26af1e0
NF
1714 else
1715 tun->flags &= ~TUN_NO_PI;
1da177e4 1716
5d097109
MT
1717 /* This flag has no real effect. We track the value for backwards
1718 * compatibility.
1719 */
1da177e4
LT
1720 if (ifr->ifr_flags & IFF_ONE_QUEUE)
1721 tun->flags |= TUN_ONE_QUEUE;
a26af1e0
NF
1722 else
1723 tun->flags &= ~TUN_ONE_QUEUE;
1da177e4 1724
f43798c2
RR
1725 if (ifr->ifr_flags & IFF_VNET_HDR)
1726 tun->flags |= TUN_VNET_HDR;
1727 else
1728 tun->flags &= ~TUN_VNET_HDR;
1729
c8d68e6b
JW
1730 if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1731 tun->flags |= TUN_TAP_MQ;
1732 else
1733 tun->flags &= ~TUN_TAP_MQ;
1734
e35259a9
MK
1735 /* Make sure persistent devices do not get stuck in
1736 * xoff state.
1737 */
1738 if (netif_running(tun->dev))
c8d68e6b 1739 netif_tx_wake_all_queues(tun->dev);
e35259a9 1740
1da177e4
LT
1741 strcpy(ifr->ifr_name, tun->dev->name);
1742 return 0;
1743
1744 err_free_dev:
1745 free_netdev(dev);
1da177e4
LT
1746 return err;
1747}
1748
9ce99cf6 1749static void tun_get_iff(struct net *net, struct tun_struct *tun,
876bfd4d 1750 struct ifreq *ifr)
e3b99556 1751{
6b8a66ee 1752 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
e3b99556
MM
1753
1754 strcpy(ifr->ifr_name, tun->dev->name);
1755
980c9e8c 1756 ifr->ifr_flags = tun_flags(tun);
e3b99556 1757
e3b99556
MM
1758}
1759
5228ddc9
RR
1760/* This is like a cut-down ethtool ops, except done via tun fd so no
1761 * privs required. */
88255375 1762static int set_offload(struct tun_struct *tun, unsigned long arg)
5228ddc9 1763{
c8f44aff 1764 netdev_features_t features = 0;
5228ddc9
RR
1765
1766 if (arg & TUN_F_CSUM) {
88255375 1767 features |= NETIF_F_HW_CSUM;
5228ddc9
RR
1768 arg &= ~TUN_F_CSUM;
1769
1770 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1771 if (arg & TUN_F_TSO_ECN) {
1772 features |= NETIF_F_TSO_ECN;
1773 arg &= ~TUN_F_TSO_ECN;
1774 }
1775 if (arg & TUN_F_TSO4)
1776 features |= NETIF_F_TSO;
1777 if (arg & TUN_F_TSO6)
1778 features |= NETIF_F_TSO6;
1779 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1780 }
e36aa25a
SS
1781
1782 if (arg & TUN_F_UFO) {
1783 features |= NETIF_F_UFO;
1784 arg &= ~TUN_F_UFO;
1785 }
5228ddc9
RR
1786 }
1787
1788 /* This gives the user a way to test for new features in future by
1789 * trying to set them. */
1790 if (arg)
1791 return -EINVAL;
1792
88255375
MM
1793 tun->set_features = features;
1794 netdev_update_features(tun->dev);
5228ddc9
RR
1795
1796 return 0;
1797}
1798
c8d68e6b
JW
1799static void tun_detach_filter(struct tun_struct *tun, int n)
1800{
1801 int i;
1802 struct tun_file *tfile;
1803
1804 for (i = 0; i < n; i++) {
b8deabd3 1805 tfile = rtnl_dereference(tun->tfiles[i]);
c8d68e6b
JW
1806 sk_detach_filter(tfile->socket.sk);
1807 }
1808
1809 tun->filter_attached = false;
1810}
1811
1812static int tun_attach_filter(struct tun_struct *tun)
1813{
1814 int i, ret = 0;
1815 struct tun_file *tfile;
1816
1817 for (i = 0; i < tun->numqueues; i++) {
b8deabd3 1818 tfile = rtnl_dereference(tun->tfiles[i]);
c8d68e6b
JW
1819 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1820 if (ret) {
1821 tun_detach_filter(tun, i);
1822 return ret;
1823 }
1824 }
1825
1826 tun->filter_attached = true;
1827 return ret;
1828}
1829
1830static void tun_set_sndbuf(struct tun_struct *tun)
1831{
1832 struct tun_file *tfile;
1833 int i;
1834
1835 for (i = 0; i < tun->numqueues; i++) {
b8deabd3 1836 tfile = rtnl_dereference(tun->tfiles[i]);
c8d68e6b
JW
1837 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1838 }
1839}
1840
cde8b15f
JW
1841static int tun_set_queue(struct file *file, struct ifreq *ifr)
1842{
1843 struct tun_file *tfile = file->private_data;
1844 struct tun_struct *tun;
cde8b15f
JW
1845 int ret = 0;
1846
1847 rtnl_lock();
1848
1849 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
4008e97f 1850 tun = tfile->detached;
5dbbaf2d 1851 if (!tun) {
cde8b15f 1852 ret = -EINVAL;
5dbbaf2d
PM
1853 goto unlock;
1854 }
1855 ret = security_tun_dev_attach_queue(tun->security);
1856 if (ret < 0)
1857 goto unlock;
1858 ret = tun_attach(tun, file);
4008e97f 1859 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
b8deabd3 1860 tun = rtnl_dereference(tfile->tun);
9e85722d 1861 if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
4008e97f
JW
1862 ret = -EINVAL;
1863 else
1864 __tun_detach(tfile, false);
1865 } else
cde8b15f
JW
1866 ret = -EINVAL;
1867
5dbbaf2d 1868unlock:
cde8b15f
JW
1869 rtnl_unlock();
1870 return ret;
1871}
1872
50857e2a
AB
1873static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1874 unsigned long arg, int ifreq_len)
1da177e4 1875{
36b50bab 1876 struct tun_file *tfile = file->private_data;
631ab46b 1877 struct tun_struct *tun;
1da177e4
LT
1878 void __user* argp = (void __user*)arg;
1879 struct ifreq ifr;
0625c883
EB
1880 kuid_t owner;
1881 kgid_t group;
33dccbb0 1882 int sndbuf;
d9d52b51 1883 int vnet_hdr_sz;
f271b2cc 1884 int ret;
1da177e4 1885
cde8b15f 1886 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
50857e2a 1887 if (copy_from_user(&ifr, argp, ifreq_len))
1da177e4 1888 return -EFAULT;
8bbb1813 1889 } else {
a117dacd 1890 memset(&ifr, 0, sizeof(ifr));
8bbb1813 1891 }
631ab46b
EB
1892 if (cmd == TUNGETFEATURES) {
1893 /* Currently this just means: "what IFF flags are valid?".
1894 * This is needed because we never checked for invalid flags on
1895 * TUNSETIFF. */
1896 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
cde8b15f 1897 IFF_VNET_HDR | IFF_MULTI_QUEUE,
631ab46b 1898 (unsigned int __user*)argp);
cde8b15f
JW
1899 } else if (cmd == TUNSETQUEUE)
1900 return tun_set_queue(file, &ifr);
631ab46b 1901
c8d68e6b 1902 ret = 0;
876bfd4d
HX
1903 rtnl_lock();
1904
36b50bab 1905 tun = __tun_get(tfile);
1da177e4 1906 if (cmd == TUNSETIFF && !tun) {
1da177e4
LT
1907 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1908
876bfd4d 1909 ret = tun_set_iff(tfile->net, file, &ifr);
1da177e4 1910
876bfd4d
HX
1911 if (ret)
1912 goto unlock;
1da177e4 1913
50857e2a 1914 if (copy_to_user(argp, &ifr, ifreq_len))
876bfd4d
HX
1915 ret = -EFAULT;
1916 goto unlock;
1da177e4
LT
1917 }
1918
876bfd4d 1919 ret = -EBADFD;
1da177e4 1920 if (!tun)
876bfd4d 1921 goto unlock;
1da177e4 1922
1e588338 1923 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1da177e4 1924
631ab46b 1925 ret = 0;
1da177e4 1926 switch (cmd) {
e3b99556 1927 case TUNGETIFF:
9ce99cf6 1928 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
e3b99556 1929
50857e2a 1930 if (copy_to_user(argp, &ifr, ifreq_len))
631ab46b 1931 ret = -EFAULT;
e3b99556
MM
1932 break;
1933
1da177e4
LT
1934 case TUNSETNOCSUM:
1935 /* Disable/Enable checksum */
1da177e4 1936
88255375
MM
1937 /* [unimplemented] */
1938 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
6b8a66ee 1939 arg ? "disabled" : "enabled");
1da177e4
LT
1940 break;
1941
1942 case TUNSETPERSIST:
54f968d6
JW
1943 /* Disable/Enable persist mode. Keep an extra reference to the
1944 * module to prevent the module being unprobed.
1945 */
dd38bd85 1946 if (arg && !(tun->flags & TUN_PERSIST)) {
1da177e4 1947 tun->flags |= TUN_PERSIST;
54f968d6 1948 __module_get(THIS_MODULE);
dd38bd85
JW
1949 }
1950 if (!arg && (tun->flags & TUN_PERSIST)) {
1da177e4 1951 tun->flags &= ~TUN_PERSIST;
54f968d6
JW
1952 module_put(THIS_MODULE);
1953 }
1da177e4 1954
6b8a66ee
JP
1955 tun_debug(KERN_INFO, tun, "persist %s\n",
1956 arg ? "enabled" : "disabled");
1da177e4
LT
1957 break;
1958
1959 case TUNSETOWNER:
1960 /* Set owner of the device */
0625c883
EB
1961 owner = make_kuid(current_user_ns(), arg);
1962 if (!uid_valid(owner)) {
1963 ret = -EINVAL;
1964 break;
1965 }
1966 tun->owner = owner;
1e588338 1967 tun_debug(KERN_INFO, tun, "owner set to %u\n",
0625c883 1968 from_kuid(&init_user_ns, tun->owner));
1da177e4
LT
1969 break;
1970
8c644623
GG
1971 case TUNSETGROUP:
1972 /* Set group of the device */
0625c883
EB
1973 group = make_kgid(current_user_ns(), arg);
1974 if (!gid_valid(group)) {
1975 ret = -EINVAL;
1976 break;
1977 }
1978 tun->group = group;
1e588338 1979 tun_debug(KERN_INFO, tun, "group set to %u\n",
0625c883 1980 from_kgid(&init_user_ns, tun->group));
8c644623
GG
1981 break;
1982
ff4cc3ac
MK
1983 case TUNSETLINK:
1984 /* Only allow setting the type when the interface is down */
1985 if (tun->dev->flags & IFF_UP) {
6b8a66ee
JP
1986 tun_debug(KERN_INFO, tun,
1987 "Linktype set failed because interface is up\n");
48abfe05 1988 ret = -EBUSY;
ff4cc3ac
MK
1989 } else {
1990 tun->dev->type = (int) arg;
6b8a66ee
JP
1991 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1992 tun->dev->type);
48abfe05 1993 ret = 0;
ff4cc3ac 1994 }
631ab46b 1995 break;
ff4cc3ac 1996
1da177e4
LT
1997#ifdef TUN_DEBUG
1998 case TUNSETDEBUG:
1999 tun->debug = arg;
2000 break;
2001#endif
5228ddc9 2002 case TUNSETOFFLOAD:
88255375 2003 ret = set_offload(tun, arg);
631ab46b 2004 break;
5228ddc9 2005
f271b2cc
MK
2006 case TUNSETTXFILTER:
2007 /* Can be set only for TAPs */
631ab46b 2008 ret = -EINVAL;
f271b2cc 2009 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
631ab46b 2010 break;
c0e5a8c2 2011 ret = update_filter(&tun->txflt, (void __user *)arg);
631ab46b 2012 break;
1da177e4
LT
2013
2014 case SIOCGIFHWADDR:
b595076a 2015 /* Get hw address */
f271b2cc
MK
2016 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
2017 ifr.ifr_hwaddr.sa_family = tun->dev->type;
50857e2a 2018 if (copy_to_user(argp, &ifr, ifreq_len))
631ab46b
EB
2019 ret = -EFAULT;
2020 break;
1da177e4
LT
2021
2022 case SIOCSIFHWADDR:
f271b2cc 2023 /* Set hw address */
6b8a66ee
JP
2024 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
2025 ifr.ifr_hwaddr.sa_data);
40102371 2026
40102371 2027 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
631ab46b 2028 break;
33dccbb0
HX
2029
2030 case TUNGETSNDBUF:
54f968d6 2031 sndbuf = tfile->socket.sk->sk_sndbuf;
33dccbb0
HX
2032 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
2033 ret = -EFAULT;
2034 break;
2035
2036 case TUNSETSNDBUF:
2037 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
2038 ret = -EFAULT;
2039 break;
2040 }
2041
c8d68e6b
JW
2042 tun->sndbuf = sndbuf;
2043 tun_set_sndbuf(tun);
33dccbb0
HX
2044 break;
2045
d9d52b51
MT
2046 case TUNGETVNETHDRSZ:
2047 vnet_hdr_sz = tun->vnet_hdr_sz;
2048 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2049 ret = -EFAULT;
2050 break;
2051
2052 case TUNSETVNETHDRSZ:
2053 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2054 ret = -EFAULT;
2055 break;
2056 }
2057 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2058 ret = -EINVAL;
2059 break;
2060 }
2061
2062 tun->vnet_hdr_sz = vnet_hdr_sz;
2063 break;
2064
99405162
MT
2065 case TUNATTACHFILTER:
2066 /* Can be set only for TAPs */
2067 ret = -EINVAL;
2068 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2069 break;
2070 ret = -EFAULT;
54f968d6 2071 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
99405162
MT
2072 break;
2073
c8d68e6b 2074 ret = tun_attach_filter(tun);
99405162
MT
2075 break;
2076
2077 case TUNDETACHFILTER:
2078 /* Can be set only for TAPs */
2079 ret = -EINVAL;
2080 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2081 break;
c8d68e6b
JW
2082 ret = 0;
2083 tun_detach_filter(tun, tun->numqueues);
99405162
MT
2084 break;
2085
1da177e4 2086 default:
631ab46b
EB
2087 ret = -EINVAL;
2088 break;
ee289b64 2089 }
1da177e4 2090
876bfd4d
HX
2091unlock:
2092 rtnl_unlock();
2093 if (tun)
2094 tun_put(tun);
631ab46b 2095 return ret;
1da177e4
LT
2096}
2097
50857e2a
AB
2098static long tun_chr_ioctl(struct file *file,
2099 unsigned int cmd, unsigned long arg)
2100{
2101 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2102}
2103
2104#ifdef CONFIG_COMPAT
2105static long tun_chr_compat_ioctl(struct file *file,
2106 unsigned int cmd, unsigned long arg)
2107{
2108 switch (cmd) {
2109 case TUNSETIFF:
2110 case TUNGETIFF:
2111 case TUNSETTXFILTER:
2112 case TUNGETSNDBUF:
2113 case TUNSETSNDBUF:
2114 case SIOCGIFHWADDR:
2115 case SIOCSIFHWADDR:
2116 arg = (unsigned long)compat_ptr(arg);
2117 break;
2118 default:
2119 arg = (compat_ulong_t)arg;
2120 break;
2121 }
2122
2123 /*
2124 * compat_ifreq is shorter than ifreq, so we must not access beyond
2125 * the end of that structure. All fields that are used in this
2126 * driver are compatible though, we don't need to convert the
2127 * contents.
2128 */
2129 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2130}
2131#endif /* CONFIG_COMPAT */
2132
1da177e4
LT
2133static int tun_chr_fasync(int fd, struct file *file, int on)
2134{
54f968d6 2135 struct tun_file *tfile = file->private_data;
1da177e4
LT
2136 int ret;
2137
54f968d6 2138 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
9d319522 2139 goto out;
6aa20a22 2140
1da177e4 2141 if (on) {
609d7fa9 2142 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
1da177e4 2143 if (ret)
9d319522 2144 goto out;
54f968d6 2145 tfile->flags |= TUN_FASYNC;
6aa20a22 2146 } else
54f968d6 2147 tfile->flags &= ~TUN_FASYNC;
9d319522
JC
2148 ret = 0;
2149out:
9d319522 2150 return ret;
1da177e4
LT
2151}
2152
2153static int tun_chr_open(struct inode *inode, struct file * file)
2154{
631ab46b 2155 struct tun_file *tfile;
deed49fb 2156
6b8a66ee 2157 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
631ab46b 2158
54f968d6
JW
2159 tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
2160 &tun_proto);
631ab46b
EB
2161 if (!tfile)
2162 return -ENOMEM;
6e914fc7 2163 rcu_assign_pointer(tfile->tun, NULL);
36b50bab 2164 tfile->net = get_net(current->nsproxy->net_ns);
54f968d6
JW
2165 tfile->flags = 0;
2166
2167 rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2168 init_waitqueue_head(&tfile->wq.wait);
2169
2170 tfile->socket.file = file;
2171 tfile->socket.ops = &tun_socket_ops;
2172
2173 sock_init_data(&tfile->socket, &tfile->sk);
2174 sk_change_net(&tfile->sk, tfile->net);
2175
2176 tfile->sk.sk_write_space = tun_sock_write_space;
2177 tfile->sk.sk_sndbuf = INT_MAX;
2178
631ab46b 2179 file->private_data = tfile;
54f968d6 2180 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
4008e97f 2181 INIT_LIST_HEAD(&tfile->next);
54f968d6 2182
19a6afb2
JW
2183 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2184
1da177e4
LT
2185 return 0;
2186}
2187
2188static int tun_chr_close(struct inode *inode, struct file *file)
2189{
631ab46b 2190 struct tun_file *tfile = file->private_data;
54f968d6 2191 struct net *net = tfile->net;
1da177e4 2192
c8d68e6b 2193 tun_detach(tfile, true);
54f968d6 2194 put_net(net);
1da177e4
LT
2195
2196 return 0;
2197}
2198
d54b1fdb 2199static const struct file_operations tun_fops = {
6aa20a22 2200 .owner = THIS_MODULE,
1da177e4 2201 .llseek = no_llseek,
ee0b3e67
BP
2202 .read = do_sync_read,
2203 .aio_read = tun_chr_aio_read,
2204 .write = do_sync_write,
2205 .aio_write = tun_chr_aio_write,
1da177e4 2206 .poll = tun_chr_poll,
50857e2a
AB
2207 .unlocked_ioctl = tun_chr_ioctl,
2208#ifdef CONFIG_COMPAT
2209 .compat_ioctl = tun_chr_compat_ioctl,
2210#endif
1da177e4
LT
2211 .open = tun_chr_open,
2212 .release = tun_chr_close,
6aa20a22 2213 .fasync = tun_chr_fasync
1da177e4
LT
2214};
2215
2216static struct miscdevice tun_miscdev = {
2217 .minor = TUN_MINOR,
2218 .name = "tun",
e454cea2 2219 .nodename = "net/tun",
1da177e4 2220 .fops = &tun_fops,
1da177e4
LT
2221};
2222
2223/* ethtool interface */
2224
2225static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2226{
2227 cmd->supported = 0;
2228 cmd->advertising = 0;
70739497 2229 ethtool_cmd_speed_set(cmd, SPEED_10);
1da177e4
LT
2230 cmd->duplex = DUPLEX_FULL;
2231 cmd->port = PORT_TP;
2232 cmd->phy_address = 0;
2233 cmd->transceiver = XCVR_INTERNAL;
2234 cmd->autoneg = AUTONEG_DISABLE;
2235 cmd->maxtxpkt = 0;
2236 cmd->maxrxpkt = 0;
2237 return 0;
2238}
2239
2240static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2241{
2242 struct tun_struct *tun = netdev_priv(dev);
2243
33a5ba14
RJ
2244 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2245 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1da177e4
LT
2246
2247 switch (tun->flags & TUN_TYPE_MASK) {
2248 case TUN_TUN_DEV:
33a5ba14 2249 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
1da177e4
LT
2250 break;
2251 case TUN_TAP_DEV:
33a5ba14 2252 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
1da177e4
LT
2253 break;
2254 }
2255}
2256
2257static u32 tun_get_msglevel(struct net_device *dev)
2258{
2259#ifdef TUN_DEBUG
2260 struct tun_struct *tun = netdev_priv(dev);
2261 return tun->debug;
2262#else
2263 return -EOPNOTSUPP;
2264#endif
2265}
2266
2267static void tun_set_msglevel(struct net_device *dev, u32 value)
2268{
2269#ifdef TUN_DEBUG
2270 struct tun_struct *tun = netdev_priv(dev);
2271 tun->debug = value;
2272#endif
2273}
2274
7282d491 2275static const struct ethtool_ops tun_ethtool_ops = {
1da177e4
LT
2276 .get_settings = tun_get_settings,
2277 .get_drvinfo = tun_get_drvinfo,
2278 .get_msglevel = tun_get_msglevel,
2279 .set_msglevel = tun_set_msglevel,
bee31369 2280 .get_link = ethtool_op_get_link,
1da177e4
LT
2281};
2282
79d17604 2283
1da177e4
LT
2284static int __init tun_init(void)
2285{
2286 int ret = 0;
2287
6b8a66ee
JP
2288 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2289 pr_info("%s\n", DRV_COPYRIGHT);
1da177e4 2290
f019a7a5 2291 ret = rtnl_link_register(&tun_link_ops);
79d17604 2292 if (ret) {
6b8a66ee 2293 pr_err("Can't register link_ops\n");
f019a7a5 2294 goto err_linkops;
79d17604
PE
2295 }
2296
1da177e4 2297 ret = misc_register(&tun_miscdev);
79d17604 2298 if (ret) {
6b8a66ee 2299 pr_err("Can't register misc device %d\n", TUN_MINOR);
79d17604
PE
2300 goto err_misc;
2301 }
f019a7a5 2302 return 0;
79d17604 2303err_misc:
f019a7a5
EB
2304 rtnl_link_unregister(&tun_link_ops);
2305err_linkops:
1da177e4
LT
2306 return ret;
2307}
2308
2309static void tun_cleanup(void)
2310{
6aa20a22 2311 misc_deregister(&tun_miscdev);
f019a7a5 2312 rtnl_link_unregister(&tun_link_ops);
1da177e4
LT
2313}
2314
05c2828c
MT
2315/* Get an underlying socket object from tun file. Returns error unless file is
2316 * attached to a device. The returned object works like a packet socket, it
2317 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
2318 * holding a reference to the file for as long as the socket is in use. */
2319struct socket *tun_get_socket(struct file *file)
2320{
6e914fc7 2321 struct tun_file *tfile;
05c2828c
MT
2322 if (file->f_op != &tun_fops)
2323 return ERR_PTR(-EINVAL);
6e914fc7
JW
2324 tfile = file->private_data;
2325 if (!tfile)
05c2828c 2326 return ERR_PTR(-EBADFD);
54f968d6 2327 return &tfile->socket;
05c2828c
MT
2328}
2329EXPORT_SYMBOL_GPL(tun_get_socket);
2330
1da177e4
LT
2331module_init(tun_init);
2332module_exit(tun_cleanup);
2333MODULE_DESCRIPTION(DRV_DESCRIPTION);
2334MODULE_AUTHOR(DRV_COPYRIGHT);
2335MODULE_LICENSE("GPL");
2336MODULE_ALIAS_MISCDEV(TUN_MINOR);
578454ff 2337MODULE_ALIAS("devname:net/tun");