netpoll: add generic support for bridge and bonding devices
authorWANG Cong <amwang@redhat.com>
Thu, 6 May 2010 07:47:21 +0000 (00:47 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 6 May 2010 07:47:21 +0000 (00:47 -0700)
This whole patchset is for adding netpoll support to bridge and bonding
devices. I already tested it for bridge, bonding, bridge over bonding,
and bonding over bridge. It looks fine now.

To make bridge and bonding support netpoll, we need to adjust
some netpoll generic code. This patch does the following things:

1) introduce two new priv_flags for struct net_device:
   IFF_IN_NETPOLL which identifies we are processing a netpoll;
   IFF_DISABLE_NETPOLL is used to disable netpoll support for a device
   at run-time;

2) introduce one new method for netdev_ops:
   ->ndo_netpoll_cleanup() is used to clean up netpoll when a device is
     removed.

3) introduce netpoll_poll_dev() which takes a struct net_device * parameter;
   export netpoll_send_skb() and netpoll_poll_dev() which will be used later;

4) hide a pointer to struct netpoll in struct netpoll_info, ditto.

5) introduce ->real_dev for struct netpoll.

6) introduce a new status NETDEV_BONDING_DESLAE, which is used to disable
   netconsole before releasing a slave, to avoid deadlocks.

Cc: David Miller <davem@davemloft.net>
Cc: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: WANG Cong <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/netconsole.c
include/linux/if.h
include/linux/netdevice.h
include/linux/netpoll.h
include/linux/notifier.h
net/core/netpoll.c

index a361dea35574db454a82d25ddcc9050f057c4a90..ca142c47b2e4c631a90c5b61382c99c49f784dd2 100644 (file)
@@ -665,7 +665,8 @@ static int netconsole_netdev_event(struct notifier_block *this,
        struct netconsole_target *nt;
        struct net_device *dev = ptr;
 
-       if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER))
+       if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
+             event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
                goto done;
 
        spin_lock_irqsave(&target_list_lock, flags);
@@ -677,19 +678,21 @@ static int netconsole_netdev_event(struct notifier_block *this,
                                strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
                                break;
                        case NETDEV_UNREGISTER:
-                               if (!nt->enabled)
-                                       break;
                                netpoll_cleanup(&nt->np);
+                               /* Fall through */
+                       case NETDEV_GOING_DOWN:
+                       case NETDEV_BONDING_DESLAVE:
                                nt->enabled = 0;
-                               printk(KERN_INFO "netconsole: network logging stopped"
-                                       ", interface %s unregistered\n",
-                                       dev->name);
                                break;
                        }
                }
                netconsole_target_put(nt);
        }
        spin_unlock_irqrestore(&target_list_lock, flags);
+       if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
+               printk(KERN_INFO "netconsole: network logging stopped, "
+                       "interface %s %s\n",  dev->name,
+                       event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
 
 done:
        return NOTIFY_DONE;
index 3a9f410a296b35291150d27e900fb73d27d0c3d6..be350e62a905438533b0231439d8d65fd53d7de4 100644 (file)
@@ -71,6 +71,8 @@
                                         * release skb->dst
                                         */
 #define IFF_DONT_BRIDGE 0x800          /* disallow bridging this ether dev */
+#define IFF_IN_NETPOLL 0x1000          /* whether we are processing netpoll */
+#define IFF_DISABLE_NETPOLL    0x2000  /* disable netpoll at run-time */
 
 #define IF_GET_IFACE   0x0001          /* for querying only */
 #define IF_GET_PROTO   0x0002
index 98112fbddefd293fc0a1a953d2a94cb1101ff5b9..69022d47d6f2ee5848af3f2211e0c3f2007633c1 100644 (file)
@@ -724,6 +724,7 @@ struct net_device_ops {
                                                        unsigned short vid);
 #ifdef CONFIG_NET_POLL_CONTROLLER
        void                    (*ndo_poll_controller)(struct net_device *dev);
+       void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
 #endif
        int                     (*ndo_set_vf_mac)(struct net_device *dev,
                                                  int queue, u8 *mac);
index a765ea89854989318d50d2371ad71dee60566048..017e604d05f842fe899724e02a024404f7f35d83 100644 (file)
@@ -14,6 +14,7 @@
 
 struct netpoll {
        struct net_device *dev;
+       struct net_device *real_dev;
        char dev_name[IFNAMSIZ];
        const char *name;
        void (*rx_hook)(struct netpoll *, int, char *, int);
@@ -36,8 +37,11 @@ struct netpoll_info {
        struct sk_buff_head txq;
 
        struct delayed_work tx_work;
+
+       struct netpoll *netpoll;
 };
 
+void netpoll_poll_dev(struct net_device *dev);
 void netpoll_poll(struct netpoll *np);
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
 void netpoll_print_options(struct netpoll *np);
@@ -47,6 +51,7 @@ int netpoll_trap(void);
 void netpoll_set_trap(int trap);
 void netpoll_cleanup(struct netpoll *np);
 int __netpoll_rx(struct sk_buff *skb);
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
 
 
 #ifdef CONFIG_NETPOLL
index 9c5d3fad01f37d99174d372beec1aac21ff6eaa0..7c36096223340a1d674b253430e5128b71f7abaa 100644 (file)
@@ -206,6 +206,7 @@ static inline int notifier_to_errno(int ret)
 #define NETDEV_POST_TYPE_CHANGE        0x000F
 #define NETDEV_POST_INIT       0x0010
 #define NETDEV_UNREGISTER_BATCH 0x0011
+#define NETDEV_BONDING_DESLAVE  0x0012
 
 #define SYS_DOWN       0x0001  /* Notify of system down */
 #define SYS_RESTART    SYS_DOWN
index a58f59b975974ec6d0daf13bbe71aecc1d2e0bd2..94825b109551e81b1c22a09459b5e0262a97d5e4 100644 (file)
@@ -179,9 +179,8 @@ static void service_arp_queue(struct netpoll_info *npi)
        }
 }
 
-void netpoll_poll(struct netpoll *np)
+void netpoll_poll_dev(struct net_device *dev)
 {
-       struct net_device *dev = np->dev;
        const struct net_device_ops *ops;
 
        if (!dev || !netif_running(dev))
@@ -201,6 +200,11 @@ void netpoll_poll(struct netpoll *np)
        zap_completion_queue();
 }
 
+void netpoll_poll(struct netpoll *np)
+{
+       netpoll_poll_dev(np->dev);
+}
+
 static void refill_skbs(void)
 {
        struct sk_buff *skb;
@@ -282,7 +286,7 @@ static int netpoll_owner_active(struct net_device *dev)
        return 0;
 }
 
-static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
        int status = NETDEV_TX_BUSY;
        unsigned long tries;
@@ -308,7 +312,9 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
                                if (!netif_tx_queue_stopped(txq)) {
+                                       dev->priv_flags |= IFF_IN_NETPOLL;
                                        status = ops->ndo_start_xmit(skb, dev);
+                                       dev->priv_flags &= ~IFF_IN_NETPOLL;
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
                                }
@@ -756,7 +762,10 @@ int netpoll_setup(struct netpoll *np)
                atomic_inc(&npinfo->refcnt);
        }
 
-       if (!ndev->netdev_ops->ndo_poll_controller) {
+       npinfo->netpoll = np;
+
+       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+           !ndev->netdev_ops->ndo_poll_controller) {
                printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
                       np->name, np->dev_name);
                err = -ENOTSUPP;
@@ -878,6 +887,7 @@ void netpoll_cleanup(struct netpoll *np)
                        }
 
                        if (atomic_dec_and_test(&npinfo->refcnt)) {
+                               const struct net_device_ops *ops;
                                skb_queue_purge(&npinfo->arp_tx);
                                skb_queue_purge(&npinfo->txq);
                                cancel_rearming_delayed_work(&npinfo->tx_work);
@@ -885,7 +895,11 @@ void netpoll_cleanup(struct netpoll *np)
                                /* clean after last, unfinished work */
                                __skb_queue_purge(&npinfo->txq);
                                kfree(npinfo);
-                               np->dev->npinfo = NULL;
+                               ops = np->dev->netdev_ops;
+                               if (ops->ndo_netpoll_cleanup)
+                                       ops->ndo_netpoll_cleanup(np->dev);
+                               else
+                                       np->dev->npinfo = NULL;
                        }
                }
 
@@ -908,6 +922,7 @@ void netpoll_set_trap(int trap)
                atomic_dec(&trapped);
 }
 
+EXPORT_SYMBOL(netpoll_send_skb);
 EXPORT_SYMBOL(netpoll_set_trap);
 EXPORT_SYMBOL(netpoll_trap);
 EXPORT_SYMBOL(netpoll_print_options);
@@ -915,4 +930,5 @@ EXPORT_SYMBOL(netpoll_parse_options);
 EXPORT_SYMBOL(netpoll_setup);
 EXPORT_SYMBOL(netpoll_cleanup);
 EXPORT_SYMBOL(netpoll_send_udp);
+EXPORT_SYMBOL(netpoll_poll_dev);
 EXPORT_SYMBOL(netpoll_poll);