struct sk_buff *skb;
int i;
+ /* Wait and kill tasklet */
+ tasklet_kill(&np->rx_tasklet);
+ tasklet_kill(&np->tx_tasklet);
+ np->cur_tx = 0;
+ np->dirty_tx = 0;
+ np->cur_task = 0;
+ np->last_tx = 0;
+
netif_stop_queue(dev);
if (netif_msg_ifdown(np)) {
/* Stop the chip's Tx and Rx processes. */
iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
- /* Wait and kill tasklet */
- tasklet_kill(&np->rx_tasklet);
- tasklet_kill(&np->tx_tasklet);
+ for (i = 2000; i > 0; i--) {
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+ break;
+ mdelay(1);
+ }
+
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr +ASICCtrl + 2);
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
+ break;
+ mdelay(1);
+ }
#ifdef __i386__
if (netif_msg_hw(np)) {
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
pci_unmap_single(np->pci_dev,