spin_lock_bh(&txq->axq_lock);
- if (txq->axq_depth || !list_empty(&txq->axq_acq))
+ if (txq->axq_depth)
pending = true;
+ if (txq->mac80211_qnum >= 0) {
+ struct list_head *list;
+
+ list = &sc->cur_chan->acq[txq->mac80211_qnum];
+ if (!list_empty(list))
+ pending = true;
+ }
spin_unlock_bh(&txq->axq_lock);
return pending;
}
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
unsigned long flags;
- int i;
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
}
work:
ath_restart_work(sc);
-
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (!ATH_TXQ_SETUP(sc, i))
- continue;
-
- spin_lock_bh(&sc->tx.txq[i].axq_lock);
- ath_txq_schedule(sc, &sc->tx.txq[i]);
- spin_unlock_bh(&sc->tx.txq[i].axq_lock);
- }
+ ath_txq_schedule_all(sc);
}
sc->gtt_cnt = 0;
avp->vif = vif;
+ /* XXX - will be removed once chanctx ops are added */
+ avp->chanctx = sc->cur_chan;
+ list_add_tail(&avp->list, &sc->cur_chan->vifs);
+
an->sc = sc;
an->sta = NULL;
an->vif = vif;
}
spin_unlock_bh(&sc->sc_pcu_lock);
+ list_del(&avp->list);
sc->nvifs--;
sc->tx99_vif = NULL;
ieee80211_tx_status(sc->hw, skb);
}
-static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
+static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid)
{
struct ath_atx_ac *ac = tid->ac;
+ struct list_head *list;
+ struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
+ struct ath_chanctx *ctx = avp->chanctx;
+
+ if (!ctx)
+ return;
if (tid->sched)
return;
return;
ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
+
+ list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
+ list_add_tail(&ac->list, list);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
if (!an->sleeping) {
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->ac->clear_ps_filter = true;
ac->clear_ps_filter = true;
if (ath_tid_has_buffered(tid)) {
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
ath_txq_schedule(sc, txq);
}
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
if (ath_tid_has_buffered(tid)) {
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
ath_txq_schedule(sc, txq);
}
txq->axq_link = NULL;
__skb_queue_head_init(&txq->complete_q);
INIT_LIST_HEAD(&txq->axq_q);
- INIT_LIST_HEAD(&txq->axq_acq);
spin_lock_init(&txq->axq_lock);
txq->axq_depth = 0;
txq->axq_ampdu_depth = 0;
sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
}
-/* For each axq_acq entry, for each tid, try to schedule packets
+/* For each acq entry, for each tid, try to schedule packets
* for transmit until ampdu_depth has reached min Q depth.
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
+ struct list_head *ac_list;
bool sent = false;
+ if (txq->mac80211_qnum < 0)
+ return;
+
+ ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+
if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
- list_empty(&txq->axq_acq))
+ list_empty(ac_list))
return;
rcu_read_lock();
- last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
- while (!list_empty(&txq->axq_acq)) {
+ last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
+ while (!list_empty(ac_list)) {
bool stop = false;
- ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
+ ac = list_first_entry(ac_list, struct ath_atx_ac, list);
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
list_del(&ac->list);
ac->sched = false;
* are pending for the tid
*/
if (ath_tid_has_buffered(tid))
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
if (stop || tid == last_tid)
break;
if (!list_empty(&ac->tid_q) && !ac->sched) {
ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
+ list_add_tail(&ac->list, ac_list);
}
if (stop)
break;
sent = false;
- last_ac = list_entry(txq->axq_acq.prev,
+ last_ac = list_entry(ac_list->prev,
struct ath_atx_ac, list);
}
}
rcu_read_unlock();
}
+void ath_txq_schedule_all(struct ath_softc *sc)
+{
+ struct ath_txq *txq;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ txq = sc->tx.txq_map[i];
+
+ spin_lock_bh(&txq->axq_lock);
+ ath_txq_schedule(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+}
+
/***********/
/* TX, DMA */
/***********/
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
__skb_queue_tail(&tid->buf_q, skb);
if (!txctl->an->sleeping)
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
ath_txq_schedule(sc, txq);
goto out;