int fw_mutex_depth;
struct completion *hostcmd_wait;
+ atomic_t watchdog_event_pending;
+
/* lock held over TX and TX reap */
spinlock_t tx_lock;
return -EBUSY;
}
+ if (atomic_read(&priv->watchdog_event_pending))
+ return 0;
+
/*
* The TX queues are stopped at this point, so this test
* doesn't need to take ->tx_lock.
spin_unlock_bh(&priv->tx_lock);
timeout = wait_for_completion_timeout(&tx_wait,
msecs_to_jiffies(MWL8K_TX_WAIT_TIMEOUT_MS));
+
+ if (atomic_read(&priv->watchdog_event_pending)) {
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_wait = NULL;
+ spin_unlock_bh(&priv->tx_lock);
+ return 0;
+ }
+
spin_lock_bh(&priv->tx_lock);
if (timeout) {
spin_unlock(&priv->stream_lock);
done:
+ atomic_dec(&priv->watchdog_event_pending);
status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
iowrite32((status | MWL8K_A2H_INT_BA_WATCHDOG),
priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
}
if (status & MWL8K_A2H_INT_BA_WATCHDOG) {
+ iowrite32(~MWL8K_A2H_INT_BA_WATCHDOG,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
+
+ atomic_inc(&priv->watchdog_event_pending);
status &= ~MWL8K_A2H_INT_BA_WATCHDOG;
ieee80211_queue_work(hw, &priv->watchdog_ba_handle);
}
priv->sniffer_enabled = false;
priv->wmm_enabled = false;
priv->pending_tx_pkts = 0;
+ atomic_set(&priv->watchdog_event_pending, 0);
rc = mwl8k_rxq_init(hw, 0);
if (rc)