{
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) {
- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ netif_tx_start_queue(dev_queue);
return;
}
#endif
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
- clear_bit(__QUEUE_STATE_XOFF, &txq->state);
+
+ netif_tx_start_queue(txq);
}
/**
if (netpoll_trap())
return;
#endif
- set_bit(__QUEUE_STATE_XOFF, &txq->state);
+ netif_tx_stop_queue(txq);
}
/**
u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
- return test_bit(__QUEUE_STATE_XOFF, &txq->state);
+
+ return netif_tx_queue_stopped(txq);
}
static inline int netif_subqueue_stopped(const struct net_device *dev,
* force a schedule.
*/
clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
- if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
- __netif_schedule(txq->qdisc);
+ netif_schedule_queue(txq);
}
spin_unlock(&dev->tx_global_lock);
}