netif_tx_unlock(dev);
local_irq_restore(flags);
- schedule_delayed_work(&npinfo->tx_work, HZ/10);
+ if (atomic_read(&npinfo->refcnt))
+ schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
netif_tx_unlock(dev);
if (atomic_dec_and_test(&npinfo->refcnt)) {
skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
- cancel_rearming_delayed_work(&npinfo->tx_work);
+ cancel_delayed_work(&npinfo->tx_work);
flush_scheduled_work();
+ /* clean after last, unfinished work */
+ if (!skb_queue_empty(&npinfo->txq)) {
+ struct sk_buff *skb;
+ skb = __skb_dequeue(&npinfo->txq);
+ kfree_skb(skb);
+ }
kfree(npinfo);
}
}