static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *stop_at;
int i, res;
bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_add_vid) {
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
- }
+ res = vlan_vid_add(slave->dev, vid);
+ if (res)
+ goto unwind;
}
res = bond_add_vlan(bond, vid);
}
return 0;
+
+unwind:
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+ vlan_vid_del(slave->dev, vid);
+
+ return res;
}
/**
struct slave *slave;
int i, res;
- bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_kill_vid) {
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
- }
- }
+ bond_for_each_slave(bond, slave, i)
+ vlan_vid_del(slave->dev, vid);
res = bond_del_vlan(bond, vid);
if (res) {
static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
{
struct vlan_entry *vlan;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_add_vid))
- return;
+ int res;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ res = vlan_vid_add(slave_dev, vlan->vlan_id);
+ if (res)
+ pr_warning("%s: Failed to add vlan id %d to device %s\n",
+ bond->dev->name, vlan->vlan_id,
+ slave_dev->name);
+ }
}
static void bond_del_vlans_from_slave(struct bonding *bond,
struct net_device *slave_dev)
{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct vlan_entry *vlan;
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_kill_vid))
- return;
-
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
if (!vlan->vlan_id)
continue;
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+ vlan_vid_del(slave_dev, vlan->vlan_id);
}
}
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <net/rtnetlink.h>
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_add_vid)
- return ops->ndo_vlan_rx_add_vid(lowerdev, vid);
- return 0;
+ return vlan_vid_add(lowerdev, vid);
}
static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_kill_vid)
- return ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+ vlan_vid_del(lowerdev, vid);
return 0;
}
#include <linux/ctype.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/socket.h>
#include <linux/etherdevice.h>
{
struct team *team = netdev_priv(dev);
struct team_port *port;
+ int err;
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list) {
- const struct net_device_ops *ops = port->dev->netdev_ops;
-
- if (ops->ndo_vlan_rx_add_vid)
- ops->ndo_vlan_rx_add_vid(port->dev, vid);
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = vlan_vid_add(port->dev, vid);
+ if (err)
+ goto unwind;
}
- rcu_read_unlock();
+ mutex_unlock(&team->lock);
return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ mutex_unlock(&team->lock);
+
+ return err;
}
static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
struct team_port *port;
rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list) {
- const struct net_device_ops *ops = port->dev->netdev_ops;
-
- if (ops->ndo_vlan_rx_kill_vid)
- ops->ndo_vlan_rx_kill_vid(port->dev, vid);
- }
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
rcu_read_unlock();
return 0;
extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
+
#else
static inline struct net_device *
__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
{
return skb;
}
+
+static inline int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+ return 0;
+}
+
+static inline void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+}
#endif
/**
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
- const struct net_device_ops *ops = real_dev->netdev_ops;
struct vlan_group *grp;
u16 vlan_id = vlan->vlan_id;
* HW accelerating devices or SW vlan input packet processing if
* VLAN is not 0 (leave it there for 802.1p).
*/
- if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
- ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+ if (vlan_id)
+ vlan_vid_del(real_dev, vlan_id);
grp->nr_vlans--;
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
- const struct net_device_ops *ops = real_dev->netdev_ops;
u16 vlan_id = vlan->vlan_id;
struct vlan_group *grp, *ngrp = NULL;
int err;
if (ngrp) {
rcu_assign_pointer(real_dev->vlgrp, ngrp);
}
- if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
- ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
+ vlan_vid_add(real_dev, vlan_id);
return 0;
__vlan_device_event(dev, event);
if ((event == NETDEV_UP) &&
- (dev->features & NETIF_F_HW_VLAN_FILTER) &&
- dev->netdev_ops->ndo_vlan_rx_add_vid) {
+ (dev->features & NETIF_F_HW_VLAN_FILTER)) {
pr_info("adding VLAN 0 to HW filter on device %s\n",
dev->name);
- dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
+ vlan_vid_add(dev, 0);
}
grp = rtnl_dereference(dev->vlgrp);
kfree_skb(skb);
return NULL;
}
+
+int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ ops->ndo_vlan_rx_add_vid) {
+ return ops->ndo_vlan_rx_add_vid(dev, vid);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(vlan_vid_add);
+
+void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ ops->ndo_vlan_rx_kill_vid) {
+ ops->ndo_vlan_rx_kill_vid(dev, vid);
+ }
+}
+EXPORT_SYMBOL(vlan_vid_del);