struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS];
+ spinlock_t intr_lock;
struct tasklet_struct intr_tq;
struct tasklet_struct bcon_tasklet;
struct ath_hw *sc_ah;
common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock);
+ spin_lock_init(&sc->intr_lock);
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
spin_lock_init(&sc->chan_lock);
}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
-void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
u32 async_mask;
- if (!(ah->imask & ATH9K_INT_GLOBAL))
- return;
-
- if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
- ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
- atomic_read(&ah->intr_ref_cnt));
- return;
- }
-
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
AR_SREV_9561(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
}
+
+void ath9k_hw_resume_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ return;
+
+ if (atomic_read(&ah->intr_ref_cnt) != 0) {
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+ atomic_read(&ah->intr_ref_cnt));
+ return;
+ }
+
+ __ath9k_hw_enable_interrupts(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
+
+void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ return;
+
+ if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+ atomic_read(&ah->intr_ref_cnt));
+ return;
+ }
+
+ __ath9k_hw_enable_interrupts(ah);
+}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
void ath9k_hw_set_interrupts(struct ath_hw *ah)
void ath9k_hw_enable_interrupts(struct ath_hw *ah);
void ath9k_hw_disable_interrupts(struct ath_hw *ah);
void ath9k_hw_kill_interrupts(struct ath_hw *ah);
+void ath9k_hw_resume_interrupts(struct ath_hw *ah);
void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
struct ath_common *common = ath9k_hw_common(ah);
enum ath_reset_type type;
unsigned long flags;
- u32 status = sc->intrstatus;
+ u32 status;
u32 rxmask;
+ spin_lock_irqsave(&sc->intr_lock, flags);
+ status = sc->intrstatus;
+ sc->intrstatus = 0;
+ spin_unlock_irqrestore(&sc->intr_lock, flags);
+
ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
if (status & ATH9K_INT_FATAL) {
type = RESET_TYPE_FATAL_INT;
ath9k_queue_reset(sc, type);
-
- /*
- * Increment the ref. counter here so that
- * interrupts are enabled in the reset routine.
- */
- atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
goto out;
}
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
- /*
- * Increment the ref. counter here so that
- * interrupts are enabled in the reset routine.
- */
- atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET,
"BB_WATCHDOG: Skipping interrupts\n");
goto out;
if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
type = RESET_TYPE_TX_GTT;
ath9k_queue_reset(sc, type);
- atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET,
"GTT: Skipping interrupts\n");
goto out;
ath9k_btcoex_handle_interrupt(sc, status);
/* re-enable hardware interrupt */
- ath9k_hw_enable_interrupts(ah);
+ ath9k_hw_resume_interrupts(ah);
out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
return IRQ_NONE;
/* Cache the status */
- sc->intrstatus = status;
+ spin_lock(&sc->intr_lock);
+ sc->intrstatus |= status;
+ spin_unlock(&sc->intr_lock);
if (status & SCHED_INTR)
sched = true;
if (sched) {
/* turn off every interrupt */
- ath9k_hw_disable_interrupts(ah);
+ ath9k_hw_kill_interrupts(ah);
tasklet_schedule(&sc->intr_tq);
}