}
}
+static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
+{
+ return !skb_queue_empty(&tid->buf_q);
+}
+
+static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
+{
+ return __skb_dequeue(&tid->buf_q);
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
memset(&ts, 0, sizeof(ts));
- while ((skb = __skb_dequeue(&tid->buf_q))) {
+ while ((skb = ath_tid_dequeue(tid))) {
fi = get_frame_info(skb);
bf = fi->bf;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((skb = __skb_dequeue(&tid->buf_q))) {
+ while ((skb = ath_tid_dequeue(tid))) {
fi = get_frame_info(skb);
bf = fi->bf;
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+ struct ath_atx_tid *tid, struct sk_buff_head **q)
{
struct ath_frame_info *fi;
struct sk_buff *skb;
u16 seqno;
while (1) {
- skb = skb_peek(&tid->buf_q);
+ *q = &tid->buf_q;
+ skb = skb_peek(*q);
if (!skb)
break;
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, *q);
ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, *q);
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
continue;
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
+ struct sk_buff_head *tid_q;
do {
- bf = ath_tx_get_tid_subframe(sc, txq, tid);
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
if (!bf) {
status = ATH_AGGR_BAW_CLOSED;
break;
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
bf->bf_state.ndelim = ndelim;
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, tid_q);
list_add_tail(&bf->list, bf_q);
if (bf_prev)
bf_prev->bf_next = bf;
bf_prev = bf;
- } while (!skb_queue_empty(&tid->buf_q));
+ } while (ath_tid_has_buffered(tid));
*aggr_len = al;
int aggr_len;
do {
- if (skb_queue_empty(&tid->buf_q))
+ if (!ath_tid_has_buffered(tid))
return;
INIT_LIST_HEAD(&bf_q);
ath_txq_lock(sc, txq);
- buffered = !skb_queue_empty(&tid->buf_q);
+ buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
- if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
+ if (!tid->paused && ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
tid->paused = false;
- if (!skb_queue_empty(&tid->buf_q)) {
+ if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
struct ieee80211_tx_info *info;
struct list_head bf_q;
struct ath_buf *bf_tail = NULL, *bf;
+ struct sk_buff_head *tid_q;
int sent = 0;
int i;
continue;
ath_txq_lock(sc, tid->ac->txq);
- while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
- bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
+ while (nframes > 0) {
+ bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
if (!bf)
break;
- __skb_unlink(bf->bf_mpdu, &tid->buf_q);
+ __skb_unlink(bf->bf_mpdu, tid_q);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
sent++;
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
- if (skb_queue_empty(&tid->buf_q))
+ if (!ath_tid_has_buffered(tid))
ieee80211_sta_set_buffered(an->sta, i, false);
}
ath_txq_unlock_complete(sc, tid->ac->txq);
* add tid to round-robin queue if more frames
* are pending for the tid
*/
- if (!skb_queue_empty(&tid->buf_q))
+ if (ath_tid_has_buffered(tid))
ath_tx_queue_tid(txq, tid);
if (tid == last_tid ||
* - seqno is not within block-ack window
* - h/w queue depth exceeds low water mark
*/
- if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
+ if ((ath_tid_has_buffered(tid) || tid->paused ||
!BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
txq != sc->tx.uapsdq) {