net: alx: Work around the DMA RX overflow issue
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / macvlan.c
1 /*
2 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of
7 * the License, or (at your option) any later version.
8 *
9 * The code this is based on carried the following copyright notice:
10 * ---
11 * (C) Copyright 2001-2006
12 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
13 * Re-worked by Ben Greear <greearb@candelatech.com>
14 * ---
15 */
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/rculist.h>
24 #include <linux/notifier.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_vlan.h>
30 #include <linux/if_link.h>
31 #include <linux/if_macvlan.h>
32 #include <linux/hash.h>
33 #include <net/rtnetlink.h>
34 #include <net/xfrm.h>
35
36 #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
37
38 struct macvlan_port {
39 struct net_device *dev;
40 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
41 struct list_head vlans;
42 struct rcu_head rcu;
43 bool passthru;
44 int count;
45 };
46
47 static void macvlan_port_destroy(struct net_device *dev);
48
49 static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
50 {
51 return rcu_dereference(dev->rx_handler_data);
52 }
53
54 static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
55 {
56 return rtnl_dereference(dev->rx_handler_data);
57 }
58
59 #define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
60
61 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
62 const unsigned char *addr)
63 {
64 struct macvlan_dev *vlan;
65
66 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
67 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
68 return vlan;
69 }
70 return NULL;
71 }
72
73 static void macvlan_hash_add(struct macvlan_dev *vlan)
74 {
75 struct macvlan_port *port = vlan->port;
76 const unsigned char *addr = vlan->dev->dev_addr;
77
78 hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]);
79 }
80
81 static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync)
82 {
83 hlist_del_rcu(&vlan->hlist);
84 if (sync)
85 synchronize_rcu();
86 }
87
88 static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
89 const unsigned char *addr)
90 {
91 macvlan_hash_del(vlan, true);
92 /* Now that we are unhashed it is safe to change the device
93 * address without confusing packet delivery.
94 */
95 memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
96 macvlan_hash_add(vlan);
97 }
98
99 static int macvlan_addr_busy(const struct macvlan_port *port,
100 const unsigned char *addr)
101 {
102 /* Test to see if the specified multicast address is
103 * currently in use by the underlying device or
104 * another macvlan.
105 */
106 if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
107 return 1;
108
109 if (macvlan_hash_lookup(port, addr))
110 return 1;
111
112 return 0;
113 }
114
115
116 static int macvlan_broadcast_one(struct sk_buff *skb,
117 const struct macvlan_dev *vlan,
118 const struct ethhdr *eth, bool local)
119 {
120 struct net_device *dev = vlan->dev;
121 if (!skb)
122 return NET_RX_DROP;
123
124 if (local)
125 return vlan->forward(dev, skb);
126
127 skb->dev = dev;
128 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
129 skb->pkt_type = PACKET_BROADCAST;
130 else
131 skb->pkt_type = PACKET_MULTICAST;
132
133 return vlan->receive(skb);
134 }
135
136 static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
137 {
138 return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
139 }
140
141
142 static unsigned int mc_hash(const struct macvlan_dev *vlan,
143 const unsigned char *addr)
144 {
145 u32 val = __get_unaligned_cpu32(addr + 2);
146
147 val ^= macvlan_hash_mix(vlan);
148 return hash_32(val, MACVLAN_MC_FILTER_BITS);
149 }
150
151 static void macvlan_broadcast(struct sk_buff *skb,
152 const struct macvlan_port *port,
153 struct net_device *src,
154 enum macvlan_mode mode)
155 {
156 const struct ethhdr *eth = eth_hdr(skb);
157 const struct macvlan_dev *vlan;
158 struct sk_buff *nskb;
159 unsigned int i;
160 int err;
161 unsigned int hash;
162
163 if (skb->protocol == htons(ETH_P_PAUSE))
164 return;
165
166 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
167 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
168 if (vlan->dev == src || !(vlan->mode & mode))
169 continue;
170
171 hash = mc_hash(vlan, eth->h_dest);
172 if (!test_bit(hash, vlan->mc_filter))
173 continue;
174 nskb = skb_clone(skb, GFP_ATOMIC);
175 err = macvlan_broadcast_one(nskb, vlan, eth,
176 mode == MACVLAN_MODE_BRIDGE);
177 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
178 err == NET_RX_SUCCESS, 1);
179 }
180 }
181 }
182
183 /* called under rcu_read_lock() from netif_receive_skb */
184 static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
185 {
186 struct macvlan_port *port;
187 struct sk_buff *skb = *pskb;
188 const struct ethhdr *eth = eth_hdr(skb);
189 const struct macvlan_dev *vlan;
190 const struct macvlan_dev *src;
191 struct net_device *dev;
192 unsigned int len = 0;
193 int ret = NET_RX_DROP;
194
195 port = macvlan_port_get_rcu(skb->dev);
196 if (is_multicast_ether_addr(eth->h_dest)) {
197 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
198 if (!skb)
199 return RX_HANDLER_CONSUMED;
200 eth = eth_hdr(skb);
201 src = macvlan_hash_lookup(port, eth->h_source);
202 if (!src)
203 /* frame comes from an external address */
204 macvlan_broadcast(skb, port, NULL,
205 MACVLAN_MODE_PRIVATE |
206 MACVLAN_MODE_VEPA |
207 MACVLAN_MODE_PASSTHRU|
208 MACVLAN_MODE_BRIDGE);
209 else if (src->mode == MACVLAN_MODE_VEPA)
210 /* flood to everyone except source */
211 macvlan_broadcast(skb, port, src->dev,
212 MACVLAN_MODE_VEPA |
213 MACVLAN_MODE_BRIDGE);
214 else if (src->mode == MACVLAN_MODE_BRIDGE)
215 /*
216 * flood only to VEPA ports, bridge ports
217 * already saw the frame on the way out.
218 */
219 macvlan_broadcast(skb, port, src->dev,
220 MACVLAN_MODE_VEPA);
221 else {
222 /* forward to original port. */
223 vlan = src;
224 ret = macvlan_broadcast_one(skb, vlan, eth, 0);
225 goto out;
226 }
227
228 return RX_HANDLER_PASS;
229 }
230
231 if (port->passthru)
232 vlan = list_first_or_null_rcu(&port->vlans,
233 struct macvlan_dev, list);
234 else
235 vlan = macvlan_hash_lookup(port, eth->h_dest);
236 if (vlan == NULL)
237 return RX_HANDLER_PASS;
238
239 dev = vlan->dev;
240 if (unlikely(!(dev->flags & IFF_UP))) {
241 kfree_skb(skb);
242 return RX_HANDLER_CONSUMED;
243 }
244 len = skb->len + ETH_HLEN;
245 skb = skb_share_check(skb, GFP_ATOMIC);
246 if (!skb)
247 goto out;
248
249 skb->dev = dev;
250 skb->pkt_type = PACKET_HOST;
251
252 ret = vlan->receive(skb);
253
254 out:
255 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
256 return RX_HANDLER_CONSUMED;
257 }
258
259 static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
260 {
261 const struct macvlan_dev *vlan = netdev_priv(dev);
262 const struct macvlan_port *port = vlan->port;
263 const struct macvlan_dev *dest;
264
265 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
266 const struct ethhdr *eth = (void *)skb->data;
267
268 /* send to other bridge ports directly */
269 if (is_multicast_ether_addr(eth->h_dest)) {
270 macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
271 goto xmit_world;
272 }
273
274 dest = macvlan_hash_lookup(port, eth->h_dest);
275 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
276 /* send to lowerdev first for its network taps */
277 dev_forward_skb(vlan->lowerdev, skb);
278
279 return NET_XMIT_SUCCESS;
280 }
281 }
282
283 xmit_world:
284 skb->dev = vlan->lowerdev;
285 return dev_queue_xmit(skb);
286 }
287
288 netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
289 struct net_device *dev)
290 {
291 unsigned int len = skb->len;
292 int ret;
293 const struct macvlan_dev *vlan = netdev_priv(dev);
294
295 ret = macvlan_queue_xmit(skb, dev);
296 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
297 struct macvlan_pcpu_stats *pcpu_stats;
298
299 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
300 u64_stats_update_begin(&pcpu_stats->syncp);
301 pcpu_stats->tx_packets++;
302 pcpu_stats->tx_bytes += len;
303 u64_stats_update_end(&pcpu_stats->syncp);
304 } else {
305 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
306 }
307 return ret;
308 }
309 EXPORT_SYMBOL_GPL(macvlan_start_xmit);
310
311 static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
312 unsigned short type, const void *daddr,
313 const void *saddr, unsigned len)
314 {
315 const struct macvlan_dev *vlan = netdev_priv(dev);
316 struct net_device *lowerdev = vlan->lowerdev;
317
318 return dev_hard_header(skb, lowerdev, type, daddr,
319 saddr ? : dev->dev_addr, len);
320 }
321
322 static const struct header_ops macvlan_hard_header_ops = {
323 .create = macvlan_hard_header,
324 .rebuild = eth_rebuild_header,
325 .parse = eth_header_parse,
326 .cache = eth_header_cache,
327 .cache_update = eth_header_cache_update,
328 };
329
330 static int macvlan_open(struct net_device *dev)
331 {
332 struct macvlan_dev *vlan = netdev_priv(dev);
333 struct net_device *lowerdev = vlan->lowerdev;
334 int err;
335
336 if (vlan->port->passthru) {
337 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
338 dev_set_promiscuity(lowerdev, 1);
339 goto hash_add;
340 }
341
342 err = -EBUSY;
343 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
344 goto out;
345
346 err = dev_uc_add(lowerdev, dev->dev_addr);
347 if (err < 0)
348 goto out;
349 if (dev->flags & IFF_ALLMULTI) {
350 err = dev_set_allmulti(lowerdev, 1);
351 if (err < 0)
352 goto del_unicast;
353 }
354
355 hash_add:
356 macvlan_hash_add(vlan);
357 return 0;
358
359 del_unicast:
360 dev_uc_del(lowerdev, dev->dev_addr);
361 out:
362 return err;
363 }
364
365 static int macvlan_stop(struct net_device *dev)
366 {
367 struct macvlan_dev *vlan = netdev_priv(dev);
368 struct net_device *lowerdev = vlan->lowerdev;
369
370 dev_uc_unsync(lowerdev, dev);
371 dev_mc_unsync(lowerdev, dev);
372
373 if (vlan->port->passthru) {
374 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
375 dev_set_promiscuity(lowerdev, -1);
376 goto hash_del;
377 }
378
379 if (dev->flags & IFF_ALLMULTI)
380 dev_set_allmulti(lowerdev, -1);
381
382 dev_uc_del(lowerdev, dev->dev_addr);
383
384 hash_del:
385 macvlan_hash_del(vlan, !dev->dismantle);
386 return 0;
387 }
388
389 static int macvlan_set_mac_address(struct net_device *dev, void *p)
390 {
391 struct macvlan_dev *vlan = netdev_priv(dev);
392 struct net_device *lowerdev = vlan->lowerdev;
393 struct sockaddr *addr = p;
394 int err;
395
396 if (!is_valid_ether_addr(addr->sa_data))
397 return -EADDRNOTAVAIL;
398
399 if (!(dev->flags & IFF_UP)) {
400 /* Just copy in the new address */
401 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
402 } else {
403 /* Rehash and update the device filters */
404 if (macvlan_addr_busy(vlan->port, addr->sa_data))
405 return -EBUSY;
406
407 err = dev_uc_add(lowerdev, addr->sa_data);
408 if (err)
409 return err;
410
411 dev_uc_del(lowerdev, dev->dev_addr);
412
413 macvlan_hash_change_addr(vlan, addr->sa_data);
414 }
415 return 0;
416 }
417
418 static void macvlan_change_rx_flags(struct net_device *dev, int change)
419 {
420 struct macvlan_dev *vlan = netdev_priv(dev);
421 struct net_device *lowerdev = vlan->lowerdev;
422
423 if (dev->flags & IFF_UP) {
424 if (change & IFF_ALLMULTI)
425 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
426 }
427 }
428
429 static void macvlan_set_mac_lists(struct net_device *dev)
430 {
431 struct macvlan_dev *vlan = netdev_priv(dev);
432
433 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
434 bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
435 } else {
436 struct netdev_hw_addr *ha;
437 DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
438
439 bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
440 netdev_for_each_mc_addr(ha, dev) {
441 __set_bit(mc_hash(vlan, ha->addr), filter);
442 }
443
444 __set_bit(mc_hash(vlan, dev->broadcast), filter);
445
446 bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
447 }
448 dev_uc_sync(vlan->lowerdev, dev);
449 dev_mc_sync(vlan->lowerdev, dev);
450 }
451
452 static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
453 {
454 struct macvlan_dev *vlan = netdev_priv(dev);
455
456 if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu)
457 return -EINVAL;
458 dev->mtu = new_mtu;
459 return 0;
460 }
461
462 /*
463 * macvlan network devices have devices nesting below it and are a special
464 * "super class" of normal network devices; split their locks off into a
465 * separate class since they always nest.
466 */
467 static struct lock_class_key macvlan_netdev_xmit_lock_key;
468 static struct lock_class_key macvlan_netdev_addr_lock_key;
469
470 #define MACVLAN_FEATURES \
471 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
472 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
473 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
474 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
475
476 #define MACVLAN_STATE_MASK \
477 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
478
479 static void macvlan_set_lockdep_class_one(struct net_device *dev,
480 struct netdev_queue *txq,
481 void *_unused)
482 {
483 lockdep_set_class(&txq->_xmit_lock,
484 &macvlan_netdev_xmit_lock_key);
485 }
486
487 static void macvlan_set_lockdep_class(struct net_device *dev)
488 {
489 lockdep_set_class(&dev->addr_list_lock,
490 &macvlan_netdev_addr_lock_key);
491 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
492 }
493
494 static int macvlan_init(struct net_device *dev)
495 {
496 struct macvlan_dev *vlan = netdev_priv(dev);
497 const struct net_device *lowerdev = vlan->lowerdev;
498
499 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
500 (lowerdev->state & MACVLAN_STATE_MASK);
501 dev->features = lowerdev->features & MACVLAN_FEATURES;
502 dev->features |= NETIF_F_LLTX;
503 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
504 dev->gso_max_size = lowerdev->gso_max_size;
505 dev->iflink = lowerdev->ifindex;
506 dev->hard_header_len = lowerdev->hard_header_len;
507
508 macvlan_set_lockdep_class(dev);
509
510 vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
511 if (!vlan->pcpu_stats)
512 return -ENOMEM;
513
514 return 0;
515 }
516
517 static void macvlan_uninit(struct net_device *dev)
518 {
519 struct macvlan_dev *vlan = netdev_priv(dev);
520 struct macvlan_port *port = vlan->port;
521
522 free_percpu(vlan->pcpu_stats);
523
524 port->count -= 1;
525 if (!port->count)
526 macvlan_port_destroy(port->dev);
527 }
528
529 static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
530 struct rtnl_link_stats64 *stats)
531 {
532 struct macvlan_dev *vlan = netdev_priv(dev);
533
534 if (vlan->pcpu_stats) {
535 struct macvlan_pcpu_stats *p;
536 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
537 u32 rx_errors = 0, tx_dropped = 0;
538 unsigned int start;
539 int i;
540
541 for_each_possible_cpu(i) {
542 p = per_cpu_ptr(vlan->pcpu_stats, i);
543 do {
544 start = u64_stats_fetch_begin_bh(&p->syncp);
545 rx_packets = p->rx_packets;
546 rx_bytes = p->rx_bytes;
547 rx_multicast = p->rx_multicast;
548 tx_packets = p->tx_packets;
549 tx_bytes = p->tx_bytes;
550 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
551
552 stats->rx_packets += rx_packets;
553 stats->rx_bytes += rx_bytes;
554 stats->multicast += rx_multicast;
555 stats->tx_packets += tx_packets;
556 stats->tx_bytes += tx_bytes;
557 /* rx_errors & tx_dropped are u32, updated
558 * without syncp protection.
559 */
560 rx_errors += p->rx_errors;
561 tx_dropped += p->tx_dropped;
562 }
563 stats->rx_errors = rx_errors;
564 stats->rx_dropped = rx_errors;
565 stats->tx_dropped = tx_dropped;
566 }
567 return stats;
568 }
569
570 static int macvlan_vlan_rx_add_vid(struct net_device *dev,
571 __be16 proto, u16 vid)
572 {
573 struct macvlan_dev *vlan = netdev_priv(dev);
574 struct net_device *lowerdev = vlan->lowerdev;
575
576 return vlan_vid_add(lowerdev, proto, vid);
577 }
578
579 static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
580 __be16 proto, u16 vid)
581 {
582 struct macvlan_dev *vlan = netdev_priv(dev);
583 struct net_device *lowerdev = vlan->lowerdev;
584
585 vlan_vid_del(lowerdev, proto, vid);
586 return 0;
587 }
588
589 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
590 struct net_device *dev,
591 const unsigned char *addr,
592 u16 flags)
593 {
594 struct macvlan_dev *vlan = netdev_priv(dev);
595 int err = -EINVAL;
596
597 if (!vlan->port->passthru)
598 return -EOPNOTSUPP;
599
600 if (is_unicast_ether_addr(addr))
601 err = dev_uc_add_excl(dev, addr);
602 else if (is_multicast_ether_addr(addr))
603 err = dev_mc_add_excl(dev, addr);
604
605 return err;
606 }
607
608 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
609 struct net_device *dev,
610 const unsigned char *addr)
611 {
612 struct macvlan_dev *vlan = netdev_priv(dev);
613 int err = -EINVAL;
614
615 if (!vlan->port->passthru)
616 return -EOPNOTSUPP;
617
618 if (is_unicast_ether_addr(addr))
619 err = dev_uc_del(dev, addr);
620 else if (is_multicast_ether_addr(addr))
621 err = dev_mc_del(dev, addr);
622
623 return err;
624 }
625
626 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
627 struct ethtool_drvinfo *drvinfo)
628 {
629 strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
630 strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
631 }
632
633 static int macvlan_ethtool_get_settings(struct net_device *dev,
634 struct ethtool_cmd *cmd)
635 {
636 const struct macvlan_dev *vlan = netdev_priv(dev);
637
638 return __ethtool_get_settings(vlan->lowerdev, cmd);
639 }
640
641 static const struct ethtool_ops macvlan_ethtool_ops = {
642 .get_link = ethtool_op_get_link,
643 .get_settings = macvlan_ethtool_get_settings,
644 .get_drvinfo = macvlan_ethtool_get_drvinfo,
645 };
646
647 static const struct net_device_ops macvlan_netdev_ops = {
648 .ndo_init = macvlan_init,
649 .ndo_uninit = macvlan_uninit,
650 .ndo_open = macvlan_open,
651 .ndo_stop = macvlan_stop,
652 .ndo_start_xmit = macvlan_start_xmit,
653 .ndo_change_mtu = macvlan_change_mtu,
654 .ndo_change_rx_flags = macvlan_change_rx_flags,
655 .ndo_set_mac_address = macvlan_set_mac_address,
656 .ndo_set_rx_mode = macvlan_set_mac_lists,
657 .ndo_get_stats64 = macvlan_dev_get_stats64,
658 .ndo_validate_addr = eth_validate_addr,
659 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
660 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
661 .ndo_fdb_add = macvlan_fdb_add,
662 .ndo_fdb_del = macvlan_fdb_del,
663 .ndo_fdb_dump = ndo_dflt_fdb_dump,
664 };
665
666 void macvlan_common_setup(struct net_device *dev)
667 {
668 ether_setup(dev);
669
670 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
671 dev->priv_flags |= IFF_UNICAST_FLT;
672 dev->netdev_ops = &macvlan_netdev_ops;
673 dev->destructor = free_netdev;
674 dev->header_ops = &macvlan_hard_header_ops,
675 dev->ethtool_ops = &macvlan_ethtool_ops;
676 }
677 EXPORT_SYMBOL_GPL(macvlan_common_setup);
678
679 static void macvlan_setup(struct net_device *dev)
680 {
681 macvlan_common_setup(dev);
682 dev->tx_queue_len = 0;
683 }
684
685 static int macvlan_port_create(struct net_device *dev)
686 {
687 struct macvlan_port *port;
688 unsigned int i;
689 int err;
690
691 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
692 return -EINVAL;
693
694 port = kzalloc(sizeof(*port), GFP_KERNEL);
695 if (port == NULL)
696 return -ENOMEM;
697
698 port->passthru = false;
699 port->dev = dev;
700 INIT_LIST_HEAD(&port->vlans);
701 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
702 INIT_HLIST_HEAD(&port->vlan_hash[i]);
703
704 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
705 if (err)
706 kfree(port);
707 else
708 dev->priv_flags |= IFF_MACVLAN_PORT;
709 return err;
710 }
711
712 static void macvlan_port_destroy(struct net_device *dev)
713 {
714 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
715
716 dev->priv_flags &= ~IFF_MACVLAN_PORT;
717 netdev_rx_handler_unregister(dev);
718 kfree_rcu(port, rcu);
719 }
720
721 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
722 {
723 if (tb[IFLA_ADDRESS]) {
724 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
725 return -EINVAL;
726 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
727 return -EADDRNOTAVAIL;
728 }
729
730 if (data && data[IFLA_MACVLAN_FLAGS] &&
731 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
732 return -EINVAL;
733
734 if (data && data[IFLA_MACVLAN_MODE]) {
735 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
736 case MACVLAN_MODE_PRIVATE:
737 case MACVLAN_MODE_VEPA:
738 case MACVLAN_MODE_BRIDGE:
739 case MACVLAN_MODE_PASSTHRU:
740 break;
741 default:
742 return -EINVAL;
743 }
744 }
745 return 0;
746 }
747
748 int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
749 struct nlattr *tb[], struct nlattr *data[],
750 int (*receive)(struct sk_buff *skb),
751 int (*forward)(struct net_device *dev,
752 struct sk_buff *skb))
753 {
754 struct macvlan_dev *vlan = netdev_priv(dev);
755 struct macvlan_port *port;
756 struct net_device *lowerdev;
757 int err;
758
759 if (!tb[IFLA_LINK])
760 return -EINVAL;
761
762 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
763 if (lowerdev == NULL)
764 return -ENODEV;
765
766 /* When creating macvlans on top of other macvlans - use
767 * the real device as the lowerdev.
768 */
769 if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
770 struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
771 lowerdev = lowervlan->lowerdev;
772 }
773
774 if (!tb[IFLA_MTU])
775 dev->mtu = lowerdev->mtu;
776 else if (dev->mtu > lowerdev->mtu)
777 return -EINVAL;
778
779 if (!tb[IFLA_ADDRESS])
780 eth_hw_addr_random(dev);
781
782 if (!macvlan_port_exists(lowerdev)) {
783 err = macvlan_port_create(lowerdev);
784 if (err < 0)
785 return err;
786 }
787 port = macvlan_port_get_rtnl(lowerdev);
788
789 /* Only 1 macvlan device can be created in passthru mode */
790 if (port->passthru)
791 return -EINVAL;
792
793 vlan->lowerdev = lowerdev;
794 vlan->dev = dev;
795 vlan->port = port;
796 vlan->receive = receive;
797 vlan->forward = forward;
798
799 vlan->mode = MACVLAN_MODE_VEPA;
800 if (data && data[IFLA_MACVLAN_MODE])
801 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
802
803 if (data && data[IFLA_MACVLAN_FLAGS])
804 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
805
806 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
807 if (port->count)
808 return -EINVAL;
809 port->passthru = true;
810 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
811 }
812
813 err = netdev_upper_dev_link(lowerdev, dev);
814 if (err)
815 goto destroy_port;
816
817 port->count += 1;
818 err = register_netdevice(dev);
819 if (err < 0)
820 goto upper_dev_unlink;
821
822 list_add_tail_rcu(&vlan->list, &port->vlans);
823 netif_stacked_transfer_operstate(lowerdev, dev);
824
825 return 0;
826
827 upper_dev_unlink:
828 netdev_upper_dev_unlink(lowerdev, dev);
829 destroy_port:
830 port->count -= 1;
831 if (!port->count)
832 macvlan_port_destroy(lowerdev);
833
834 return err;
835 }
836 EXPORT_SYMBOL_GPL(macvlan_common_newlink);
837
838 static int macvlan_newlink(struct net *src_net, struct net_device *dev,
839 struct nlattr *tb[], struct nlattr *data[])
840 {
841 return macvlan_common_newlink(src_net, dev, tb, data,
842 netif_rx,
843 dev_forward_skb);
844 }
845
846 void macvlan_dellink(struct net_device *dev, struct list_head *head)
847 {
848 struct macvlan_dev *vlan = netdev_priv(dev);
849
850 list_del_rcu(&vlan->list);
851 unregister_netdevice_queue(dev, head);
852 netdev_upper_dev_unlink(vlan->lowerdev, dev);
853 }
854 EXPORT_SYMBOL_GPL(macvlan_dellink);
855
856 static int macvlan_changelink(struct net_device *dev,
857 struct nlattr *tb[], struct nlattr *data[])
858 {
859 struct macvlan_dev *vlan = netdev_priv(dev);
860
861 if (data && data[IFLA_MACVLAN_FLAGS]) {
862 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
863 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
864 if (vlan->port->passthru && promisc) {
865 int err;
866
867 if (flags & MACVLAN_FLAG_NOPROMISC)
868 err = dev_set_promiscuity(vlan->lowerdev, -1);
869 else
870 err = dev_set_promiscuity(vlan->lowerdev, 1);
871 if (err < 0)
872 return err;
873 }
874 vlan->flags = flags;
875 }
876 if (data && data[IFLA_MACVLAN_MODE])
877 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
878 return 0;
879 }
880
881 static size_t macvlan_get_size(const struct net_device *dev)
882 {
883 return (0
884 + nla_total_size(4) /* IFLA_MACVLAN_MODE */
885 + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
886 );
887 }
888
889 static int macvlan_fill_info(struct sk_buff *skb,
890 const struct net_device *dev)
891 {
892 struct macvlan_dev *vlan = netdev_priv(dev);
893
894 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
895 goto nla_put_failure;
896 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
897 goto nla_put_failure;
898 return 0;
899
900 nla_put_failure:
901 return -EMSGSIZE;
902 }
903
904 static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
905 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
906 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
907 };
908
909 int macvlan_link_register(struct rtnl_link_ops *ops)
910 {
911 /* common fields */
912 ops->priv_size = sizeof(struct macvlan_dev);
913 ops->validate = macvlan_validate;
914 ops->maxtype = IFLA_MACVLAN_MAX;
915 ops->policy = macvlan_policy;
916 ops->changelink = macvlan_changelink;
917 ops->get_size = macvlan_get_size;
918 ops->fill_info = macvlan_fill_info;
919
920 return rtnl_link_register(ops);
921 };
922 EXPORT_SYMBOL_GPL(macvlan_link_register);
923
924 static struct rtnl_link_ops macvlan_link_ops = {
925 .kind = "macvlan",
926 .setup = macvlan_setup,
927 .newlink = macvlan_newlink,
928 .dellink = macvlan_dellink,
929 };
930
931 static int macvlan_device_event(struct notifier_block *unused,
932 unsigned long event, void *ptr)
933 {
934 struct net_device *dev = ptr;
935 struct macvlan_dev *vlan, *next;
936 struct macvlan_port *port;
937 LIST_HEAD(list_kill);
938
939 if (!macvlan_port_exists(dev))
940 return NOTIFY_DONE;
941
942 port = macvlan_port_get_rtnl(dev);
943
944 switch (event) {
945 case NETDEV_CHANGE:
946 list_for_each_entry(vlan, &port->vlans, list)
947 netif_stacked_transfer_operstate(vlan->lowerdev,
948 vlan->dev);
949 break;
950 case NETDEV_FEAT_CHANGE:
951 list_for_each_entry(vlan, &port->vlans, list) {
952 vlan->dev->features = dev->features & MACVLAN_FEATURES;
953 vlan->dev->gso_max_size = dev->gso_max_size;
954 netdev_features_change(vlan->dev);
955 }
956 break;
957 case NETDEV_UNREGISTER:
958 /* twiddle thumbs on netns device moves */
959 if (dev->reg_state != NETREG_UNREGISTERING)
960 break;
961
962 list_for_each_entry_safe(vlan, next, &port->vlans, list)
963 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
964 unregister_netdevice_many(&list_kill);
965 break;
966 case NETDEV_PRE_TYPE_CHANGE:
967 /* Forbid underlaying device to change its type. */
968 return NOTIFY_BAD;
969 }
970 return NOTIFY_DONE;
971 }
972
973 static struct notifier_block macvlan_notifier_block __read_mostly = {
974 .notifier_call = macvlan_device_event,
975 };
976
977 static int __init macvlan_init_module(void)
978 {
979 int err;
980
981 register_netdevice_notifier(&macvlan_notifier_block);
982
983 err = macvlan_link_register(&macvlan_link_ops);
984 if (err < 0)
985 goto err1;
986 return 0;
987 err1:
988 unregister_netdevice_notifier(&macvlan_notifier_block);
989 return err;
990 }
991
992 static void __exit macvlan_cleanup_module(void)
993 {
994 rtnl_link_unregister(&macvlan_link_ops);
995 unregister_netdevice_notifier(&macvlan_notifier_block);
996 }
997
998 module_init(macvlan_init_module);
999 module_exit(macvlan_cleanup_module);
1000
1001 MODULE_LICENSE("GPL");
1002 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
1003 MODULE_DESCRIPTION("Driver for MAC address based VLANs");
1004 MODULE_ALIAS_RTNL_LINK("macvlan");