netdevice: Consolidate to use existing macros where available.
authorKrishna Kumar <krkumar2@in.ibm.com>
Sat, 29 Aug 2009 20:21:21 +0000 (20:21 +0000)
committerDavid S. Miller <davem@davemloft.net>
Mon, 31 Aug 2009 05:16:20 +0000 (22:16 -0700)
Patch compiled and 32 simultaneous netperf testing ran fine.

Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h

index 9192cdf5bd2cf39f3180df1e584343630257b0be..60d3aac49ed485ca9ac2657d11ccc87500cbd6c5 100644 (file)
@@ -1257,7 +1257,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
 {
 #ifdef CONFIG_NETPOLL_TRAP
        if (netpoll_trap()) {
-               clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+               netif_tx_start_queue(dev_queue);
                return;
        }
 #endif
@@ -1363,7 +1363,8 @@ static inline int netif_running(const struct net_device *dev)
 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
 {
        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
-       clear_bit(__QUEUE_STATE_XOFF, &txq->state);
+
+       netif_tx_start_queue(txq);
 }
 
 /**
@@ -1380,7 +1381,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
        if (netpoll_trap())
                return;
 #endif
-       set_bit(__QUEUE_STATE_XOFF, &txq->state);
+       netif_tx_stop_queue(txq);
 }
 
 /**
@@ -1394,7 +1395,8 @@ static inline int __netif_subqueue_stopped(const struct net_device *dev,
                                         u16 queue_index)
 {
        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
-       return test_bit(__QUEUE_STATE_XOFF, &txq->state);
+
+       return netif_tx_queue_stopped(txq);
 }
 
 static inline int netif_subqueue_stopped(const struct net_device *dev,
@@ -1746,8 +1748,7 @@ static inline void netif_tx_unlock(struct net_device *dev)
                 * force a schedule.
                 */
                clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
-               if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
-                       __netif_schedule(txq->qdisc);
+               netif_schedule_queue(txq);
        }
        spin_unlock(&dev->tx_global_lock);
 }