static inline void qdisc_run(struct netdev_queue *txq)
{
+ struct Qdisc *q = txq->qdisc;
+
if (!netif_tx_queue_stopped(txq) &&
- !test_and_set_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state))
+ !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
__qdisc_run(txq);
}
/*
* NOTE: Called under queue->lock with locally disabled BH.
*
- * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process
- * this queue at a time. queue->lock serializes queue accesses for
+ * __QDISC_STATE_RUNNING guarantees only one CPU can process
+ * this qdisc at a time. queue->lock serializes queue accesses for
* this queue AND txq->qdisc pointer itself.
*
* netif_tx_lock serializes accesses to device driver.
* >0 - queue is not empty.
*
*/
-static inline int qdisc_restart(struct netdev_queue *txq)
+static inline int qdisc_restart(struct netdev_queue *txq,
+ struct Qdisc *q)
{
- struct Qdisc *q = txq->qdisc;
int ret = NETDEV_TX_BUSY;
struct net_device *dev;
struct sk_buff *skb;
HARD_TX_UNLOCK(dev, txq);
spin_lock(&txq->lock);
- q = txq->qdisc;
switch (ret) {
case NETDEV_TX_OK:
void __qdisc_run(struct netdev_queue *txq)
{
unsigned long start_time = jiffies;
+ struct Qdisc *q = txq->qdisc;
- while (qdisc_restart(txq)) {
+ while (qdisc_restart(txq, q)) {
if (netif_tx_queue_stopped(txq))
break;
}
}
- clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state);
+ clear_bit(__QDISC_STATE_RUNNING, &q->state);
}
static void dev_watchdog(unsigned long arg)
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *dev_queue;
+ struct Qdisc *q;
int val;
dev_queue = netdev_get_tx_queue(dev, i);
+ q = dev_queue->qdisc;
if (lock)
spin_lock_bh(&dev_queue->lock);
- val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state);
+ val = test_bit(__QDISC_STATE_RUNNING, &q->state);
if (lock)
spin_unlock_bh(&dev_queue->lock);