static struct sk_buff *bond_handle_frame(struct sk_buff *skb)
{
- struct net_device *slave_dev;
+ struct slave *slave;
struct net_device *bond_dev;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
- slave_dev = skb->dev;
- bond_dev = ACCESS_ONCE(slave_dev->master);
+
+ slave = bond_slave_get_rcu(skb->dev);
+ bond_dev = ACCESS_ONCE(slave->dev->master);
if (unlikely(!bond_dev))
return skb;
if (bond_dev->priv_flags & IFF_MASTER_ARPMON)
- slave_dev->last_rx = jiffies;
+ slave->dev->last_rx = jiffies;
- if (bond_should_deliver_exact_match(skb, slave_dev, bond_dev)) {
+ if (bond_should_deliver_exact_match(skb, slave->dev, bond_dev)) {
skb->deliver_no_wcard = 1;
return skb;
}
pr_debug("Error %d calling netdev_set_bond_master\n", res);
goto err_restore_mac;
}
- res = netdev_rx_handler_register(slave_dev, bond_handle_frame, NULL);
+ res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
+ new_slave);
if (res) {
pr_debug("Error %d calling netdev_rx_handler_register\n", res);
goto err_unset_master;
err_unreg_rxhandler:
netdev_rx_handler_unregister(slave_dev);
+ synchronize_net();
err_unset_master:
netdev_set_bond_master(slave_dev, NULL);
}
netdev_rx_handler_unregister(slave_dev);
+ synchronize_net();
netdev_set_bond_master(slave_dev, NULL);
slave_disable_netpoll(slave);
}
netdev_rx_handler_unregister(slave_dev);
+ synchronize_net();
netdev_set_bond_master(slave_dev, NULL);
slave_disable_netpoll(slave);