LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
netif_carrier_on(skge->netdev);
- if (skge->tx_avail > MAX_SKB_FRAGS + 1)
- netif_wake_queue(skge->netdev);
+ netif_wake_queue(skge->netdev);
if (netif_msg_link(skge))
printk(KERN_INFO PFX
if (err)
goto free_rx_ring;
- skge->tx_avail = skge->tx_ring.count - 1;
-
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
if (hw->chip_id == CHIP_ID_GENESIS)
return 0;
}
+static inline int skge_avail(const struct skge_ring *ring)
+{
+ return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
+ + (ring->to_clean - ring->to_use) - 1;
+}
+
static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
return NETDEV_TX_LOCKED;
}
- if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
+ if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
dev->name, e - ring->start, skb->len);
ring->to_use = e->next;
- skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1;
- if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
+ if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
pr_debug("%s: transmit queue full\n", dev->name);
netif_stop_queue(dev);
}
e->skb = NULL;
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
skb_headlen(skb), PCI_DMA_TODEVICE);
- ++skge->tx_avail;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
e = e->next;
pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
- ++skge->tx_avail;
}
dev_kfree_skb(skb);
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
- if (skge->tx_avail > MAX_SKB_FRAGS + 1)
+ if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
netif_wake_queue(skge->netdev);
spin_unlock(&skge->tx_lock);