ath9k: use the new API for setting tx descriptors
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209
LR
20
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
f078f209 35
c6663876 36static u16 bits_per_symbol[][2] = {
f078f209
LR
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
46};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
82b873af 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c
FF
51 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 58 struct list_head *head, bool internal);
0cdd5c60
FF
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
44f1d26c
FF
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
c4288390 68
545750d3 69enum {
0e668cde
FF
70 MCS_HT20,
71 MCS_HT20_SGI,
545750d3
FF
72 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
0e668cde
FF
76static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
88 },
89 [MCS_HT40] = {
0e668cde
FF
90 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
94 },
95 [MCS_HT40_SGI] = {
0e668cde
FF
96 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
100 }
101};
102
e8324357
S
103/*********************/
104/* Aggregation logic */
105/*********************/
f078f209 106
e8324357 107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 108{
e8324357 109 struct ath_atx_ac *ac = tid->ac;
ff37e337 110
e8324357
S
111 if (tid->paused)
112 return;
ff37e337 113
e8324357
S
114 if (tid->sched)
115 return;
ff37e337 116
e8324357
S
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 119
e8324357
S
120 if (ac->sched)
121 return;
f078f209 122
e8324357
S
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
f078f209 126
e8324357 127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 128{
066dae93 129 struct ath_txq *txq = tid->ac->txq;
e6a9854b 130
75401849 131 WARN_ON(!tid->paused);
f078f209 132
75401849
LB
133 spin_lock_bh(&txq->axq_lock);
134 tid->paused = false;
f078f209 135
56dc6336 136 if (skb_queue_empty(&tid->buf_q))
e8324357 137 goto unlock;
f078f209 138
e8324357
S
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
528f0c6b 143}
f078f209 144
2d42efc4 145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
151}
152
e8324357 153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 154{
066dae93 155 struct ath_txq *txq = tid->ac->txq;
56dc6336 156 struct sk_buff *skb;
e8324357
S
157 struct ath_buf *bf;
158 struct list_head bf_head;
90fa539c 159 struct ath_tx_status ts;
2d42efc4 160 struct ath_frame_info *fi;
f078f209 161
90fa539c 162 INIT_LIST_HEAD(&bf_head);
e6a9854b 163
90fa539c 164 memset(&ts, 0, sizeof(ts));
75401849 165 spin_lock_bh(&txq->axq_lock);
f078f209 166
56dc6336
FF
167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
e1566d1f 171 spin_unlock_bh(&txq->axq_lock);
44f1d26c
FF
172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
6a0ddaef 174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
7d2c16be 175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
90fa539c 176 } else {
44f1d26c 177 ath_tx_send_normal(sc, txq, NULL, skb);
90fa539c 178 }
e1566d1f 179 spin_lock_bh(&txq->axq_lock);
528f0c6b 180 }
f078f209 181
e8324357 182 spin_unlock_bh(&txq->axq_lock);
528f0c6b 183}
f078f209 184
e8324357
S
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
528f0c6b 187{
e8324357 188 int index, cindex;
f078f209 189
e8324357
S
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 192
81ee13ba 193 __clear_bit(cindex, tid->tx_buf);
528f0c6b 194
81ee13ba 195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
528f0c6b 199}
f078f209 200
e8324357 201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 202 u16 seqno)
528f0c6b 203{
e8324357 204 int index, cindex;
528f0c6b 205
2d3bcba0 206 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 208 __set_bit(cindex, tid->tx_buf);
f078f209 209
e8324357
S
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 214 }
f078f209
LR
215}
216
217/*
e8324357
S
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
f078f209 222 */
e8324357
S
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
f078f209 225
f078f209 226{
56dc6336 227 struct sk_buff *skb;
e8324357
S
228 struct ath_buf *bf;
229 struct list_head bf_head;
db1a052b 230 struct ath_tx_status ts;
2d42efc4 231 struct ath_frame_info *fi;
db1a052b
FF
232
233 memset(&ts, 0, sizeof(ts));
e8324357 234 INIT_LIST_HEAD(&bf_head);
f078f209 235
56dc6336
FF
236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
f078f209 239
44f1d26c
FF
240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
56dc6336 247 list_add_tail(&bf->list, &bf_head);
f078f209 248
2d42efc4 249 if (fi->retries)
6a0ddaef 250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
f078f209 251
e8324357 252 spin_unlock(&txq->axq_lock);
db1a052b 253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
254 spin_lock(&txq->axq_lock);
255 }
f078f209 256
e8324357
S
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
f078f209
LR
259}
260
fec247c0 261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
2d42efc4 262 struct sk_buff *skb)
f078f209 263{
8b7f8532 264 struct ath_frame_info *fi = get_frame_info(skb);
e8324357 265 struct ieee80211_hdr *hdr;
f078f209 266
fec247c0 267 TX_STAT_INC(txq->axq_qnum, a_retries);
8b7f8532 268 if (fi->retries++ > 0)
2d42efc4 269 return;
f078f209 270
e8324357
S
271 hdr = (struct ieee80211_hdr *)skb->data;
272 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
273}
274
0a8cea84 275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 276{
0a8cea84 277 struct ath_buf *bf = NULL;
d43f3015
S
278
279 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
280
281 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
282 spin_unlock_bh(&sc->tx.txbuflock);
283 return NULL;
284 }
0a8cea84
FF
285
286 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
d43f3015
S
289 spin_unlock_bh(&sc->tx.txbuflock);
290
0a8cea84
FF
291 return bf;
292}
293
294static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
295{
296 spin_lock_bh(&sc->tx.txbuflock);
297 list_add_tail(&bf->list, &sc->tx.txbuf);
298 spin_unlock_bh(&sc->tx.txbuflock);
299}
300
301static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
302{
303 struct ath_buf *tbf;
304
305 tbf = ath_tx_get_buffer(sc);
306 if (WARN_ON(!tbf))
307 return NULL;
308
d43f3015
S
309 ATH_TXBUF_RESET(tbf);
310
311 tbf->bf_mpdu = bf->bf_mpdu;
312 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 313 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 314 tbf->bf_state = bf->bf_state;
d43f3015
S
315
316 return tbf;
317}
318
b572d033
FF
319static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
320 struct ath_tx_status *ts, int txok,
321 int *nframes, int *nbad)
322{
2d42efc4 323 struct ath_frame_info *fi;
b572d033
FF
324 u16 seq_st = 0;
325 u32 ba[WME_BA_BMP_SIZE >> 5];
326 int ba_index;
327 int isaggr = 0;
328
329 *nbad = 0;
330 *nframes = 0;
331
b572d033
FF
332 isaggr = bf_isaggr(bf);
333 if (isaggr) {
334 seq_st = ts->ts_seqnum;
335 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
336 }
337
338 while (bf) {
2d42efc4 339 fi = get_frame_info(bf->bf_mpdu);
6a0ddaef 340 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
b572d033
FF
341
342 (*nframes)++;
343 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
344 (*nbad)++;
345
346 bf = bf->bf_next;
347 }
348}
349
350
d43f3015
S
351static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 struct ath_buf *bf, struct list_head *bf_q,
c5992618 353 struct ath_tx_status *ts, int txok, bool retry)
f078f209 354{
e8324357
S
355 struct ath_node *an = NULL;
356 struct sk_buff *skb;
1286ec6d 357 struct ieee80211_sta *sta;
0cdd5c60 358 struct ieee80211_hw *hw = sc->hw;
1286ec6d 359 struct ieee80211_hdr *hdr;
76d5a9e8 360 struct ieee80211_tx_info *tx_info;
e8324357 361 struct ath_atx_tid *tid = NULL;
d43f3015 362 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
56dc6336
FF
363 struct list_head bf_head;
364 struct sk_buff_head bf_pending;
0934af23 365 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 366 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 bool rc_update = true;
78c4653a 369 struct ieee80211_tx_rate rates[4];
2d42efc4 370 struct ath_frame_info *fi;
ebd02287 371 int nframes;
5daefbd0 372 u8 tidno;
5519541d 373 bool clear_filter;
f078f209 374
a22be22a 375 skb = bf->bf_mpdu;
1286ec6d
S
376 hdr = (struct ieee80211_hdr *)skb->data;
377
76d5a9e8 378 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 379
78c4653a
FF
380 memcpy(rates, tx_info->control.rates, sizeof(rates));
381
1286ec6d 382 rcu_read_lock();
f078f209 383
686b9cb9 384 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
385 if (!sta) {
386 rcu_read_unlock();
73e19463 387
31e79a59
FF
388 INIT_LIST_HEAD(&bf_head);
389 while (bf) {
390 bf_next = bf->bf_next;
391
fce041be 392 if (!bf->bf_stale || bf_next != NULL)
31e79a59
FF
393 list_move_tail(&bf->list, &bf_head);
394
0cdd5c60 395 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
31e79a59
FF
396 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
397 0, 0);
398
399 bf = bf_next;
400 }
1286ec6d 401 return;
f078f209
LR
402 }
403
1286ec6d 404 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
405 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
406 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 407
b11b160d
FF
408 /*
409 * The hardware occasionally sends a tx status for the wrong TID.
410 * In this case, the BA status cannot be considered valid and all
411 * subframes need to be retransmitted
412 */
5daefbd0 413 if (tidno != ts->tid)
b11b160d
FF
414 txok = false;
415
e8324357 416 isaggr = bf_isaggr(bf);
d43f3015 417 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 418
d43f3015 419 if (isaggr && txok) {
db1a052b
FF
420 if (ts->ts_flags & ATH9K_TX_BA) {
421 seq_st = ts->ts_seqnum;
422 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 423 } else {
d43f3015
S
424 /*
425 * AR5416 can become deaf/mute when BA
426 * issue happens. Chip needs to be reset.
427 * But AP code may have sychronization issues
428 * when perform internal reset in this routine.
429 * Only enable reset in STA mode for now.
430 */
2660b81a 431 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 432 needreset = 1;
e8324357 433 }
f078f209
LR
434 }
435
56dc6336 436 __skb_queue_head_init(&bf_pending);
f078f209 437
b572d033 438 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 439 while (bf) {
6a0ddaef
FF
440 u16 seqno = bf->bf_state.seqno;
441
f0b8220c 442 txfail = txpending = sendbar = 0;
e8324357 443 bf_next = bf->bf_next;
f078f209 444
78c4653a
FF
445 skb = bf->bf_mpdu;
446 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 447 fi = get_frame_info(skb);
78c4653a 448
6a0ddaef 449 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
e8324357
S
450 /* transmit completion, subframe is
451 * acked by block ack */
0934af23 452 acked_cnt++;
e8324357
S
453 } else if (!isaggr && txok) {
454 /* transmit completion */
0934af23 455 acked_cnt++;
e8324357 456 } else {
5519541d 457 if ((tid->state & AGGR_CLEANUP) || !retry) {
e8324357
S
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
5519541d
FF
463 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
464 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
465 !an->sleeping)
466 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
467
468 clear_filter = true;
469 txpending = 1;
470 } else {
5519541d
FF
471 txfail = 1;
472 sendbar = 1;
473 txfail_cnt++;
e8324357
S
474 }
475 }
f078f209 476
fce041be
FF
477 /*
478 * Make sure the last desc is reclaimed if it
479 * not a holding desc.
480 */
56dc6336
FF
481 INIT_LIST_HEAD(&bf_head);
482 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
483 bf_next != NULL || !bf_last->bf_stale)
d43f3015 484 list_move_tail(&bf->list, &bf_head);
f078f209 485
90fa539c 486 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
487 /*
488 * complete the acked-ones/xretried ones; update
489 * block-ack window
490 */
491 spin_lock_bh(&txq->axq_lock);
6a0ddaef 492 ath_tx_update_baw(sc, tid, seqno);
e8324357 493 spin_unlock_bh(&txq->axq_lock);
f078f209 494
8a92e2ee 495 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 496 memcpy(tx_info->control.rates, rates, sizeof(rates));
0cdd5c60 497 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
8a92e2ee
VT
498 rc_update = false;
499 } else {
0cdd5c60 500 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
8a92e2ee
VT
501 }
502
db1a052b
FF
503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
e8324357 505 } else {
d43f3015 506 /* retry the un-acked ones */
e5003249
VT
507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
508 if (bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf;
510
511 tbf = ath_clone_txbuf(sc, bf_last);
512 /*
513 * Update tx baw and complete the
514 * frame with failed status if we
515 * run out of tx buf.
516 */
517 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock);
6a0ddaef 519 ath_tx_update_baw(sc, tid, seqno);
e5003249
VT
520 spin_unlock_bh(&txq->axq_lock);
521
0cdd5c60 522 ath_tx_rc_status(sc, bf, ts, nframes,
b572d033 523 nbad, 0, false);
e5003249
VT
524 ath_tx_complete_buf(sc, bf, txq,
525 &bf_head,
55797b1a 526 ts, 0, 1);
e5003249
VT
527 break;
528 }
529
56dc6336 530 fi->bf = tbf;
c41d92dc 531 }
e8324357
S
532 }
533
534 /*
535 * Put this buffer to the temporary pending
536 * queue to retain ordering
537 */
56dc6336 538 __skb_queue_tail(&bf_pending, skb);
e8324357
S
539 }
540
541 bf = bf_next;
f078f209 542 }
f078f209 543
4cee7861 544 /* prepend un-acked frames to the beginning of the pending frame queue */
56dc6336 545 if (!skb_queue_empty(&bf_pending)) {
5519541d
FF
546 if (an->sleeping)
547 ieee80211_sta_set_tim(sta);
548
4cee7861 549 spin_lock_bh(&txq->axq_lock);
5519541d
FF
550 if (clear_filter)
551 tid->ac->clear_ps_filter = true;
56dc6336 552 skb_queue_splice(&bf_pending, &tid->buf_q);
9af73cf7
FF
553 if (!an->sleeping)
554 ath_tx_queue_tid(txq, tid);
4cee7861
FF
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
e8324357 558 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
559 ath_tx_flush_tid(sc, tid);
560
e8324357
S
561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 563 tid->state &= ~AGGR_CLEANUP;
d43f3015 564 }
e8324357 565 }
f078f209 566
1286ec6d
S
567 rcu_read_unlock();
568
f6b4e4d4 569 if (needreset)
236de514 570 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
e8324357 571}
f078f209 572
1a6e9d0f
RM
573static bool ath_lookup_legacy(struct ath_buf *bf)
574{
575 struct sk_buff *skb;
576 struct ieee80211_tx_info *tx_info;
577 struct ieee80211_tx_rate *rates;
578 int i;
579
580 skb = bf->bf_mpdu;
581 tx_info = IEEE80211_SKB_CB(skb);
582 rates = tx_info->control.rates;
583
059ee09b
FF
584 for (i = 0; i < 4; i++) {
585 if (!rates[i].count || rates[i].idx < 0)
586 break;
587
1a6e9d0f
RM
588 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
589 return true;
590 }
591
592 return false;
593}
594
e8324357
S
595static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
596 struct ath_atx_tid *tid)
f078f209 597{
528f0c6b
S
598 struct sk_buff *skb;
599 struct ieee80211_tx_info *tx_info;
a8efee4f 600 struct ieee80211_tx_rate *rates;
d43f3015 601 u32 max_4ms_framelen, frmlen;
4ef70841 602 u16 aggr_limit, legacy = 0;
e8324357 603 int i;
528f0c6b 604
a22be22a 605 skb = bf->bf_mpdu;
528f0c6b 606 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 607 rates = tx_info->control.rates;
528f0c6b 608
e8324357
S
609 /*
610 * Find the lowest frame length among the rate series that will have a
611 * 4ms transmit duration.
612 * TODO - TXOP limit needs to be considered.
613 */
614 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 615
e8324357
S
616 for (i = 0; i < 4; i++) {
617 if (rates[i].count) {
545750d3
FF
618 int modeidx;
619 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
620 legacy = 1;
621 break;
622 }
623
0e668cde 624 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
625 modeidx = MCS_HT40;
626 else
0e668cde
FF
627 modeidx = MCS_HT20;
628
629 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
630 modeidx++;
545750d3
FF
631
632 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 633 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
634 }
635 }
e63835b0 636
f078f209 637 /*
e8324357
S
638 * limit aggregate size by the minimum rate if rate selected is
639 * not a probe rate, if rate selected is a probe rate then
640 * avoid aggregation of this packet.
f078f209 641 */
e8324357
S
642 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
643 return 0;
f078f209 644
1773912b
VT
645 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
646 aggr_limit = min((max_4ms_framelen * 3) / 8,
647 (u32)ATH_AMPDU_LIMIT_MAX);
648 else
649 aggr_limit = min(max_4ms_framelen,
650 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 651
e8324357 652 /*
25985edc
LDM
653 * h/w can accept aggregates up to 16 bit lengths (65535).
654 * The IE, however can hold up to 65536, which shows up here
e8324357 655 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 656 */
4ef70841
S
657 if (tid->an->maxampdu)
658 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 659
e8324357
S
660 return aggr_limit;
661}
f078f209 662
e8324357 663/*
d43f3015 664 * Returns the number of delimiters to be added to
e8324357 665 * meet the minimum required mpdudensity.
e8324357
S
666 */
667static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
7a12dfdb
RM
668 struct ath_buf *bf, u16 frmlen,
669 bool first_subfrm)
e8324357 670{
7a12dfdb 671#define FIRST_DESC_NDELIMS 60
e8324357
S
672 struct sk_buff *skb = bf->bf_mpdu;
673 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 674 u32 nsymbits, nsymbols;
e8324357 675 u16 minlen;
545750d3 676 u8 flags, rix;
c6663876 677 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 678 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
679
680 /* Select standard number of delimiters based on frame length alone */
681 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
682
683 /*
e8324357
S
684 * If encryption enabled, hardware requires some more padding between
685 * subframes.
686 * TODO - this could be improved to be dependent on the rate.
687 * The hardware can keep up at lower rates, but not higher rates
f078f209 688 */
4f6760b0
RM
689 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
690 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
e8324357 691 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 692
7a12dfdb
RM
693 /*
694 * Add delimiter when using RTS/CTS with aggregation
695 * and non enterprise AR9003 card
696 */
3459731a
FF
697 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
698 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
7a12dfdb
RM
699 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
700
e8324357
S
701 /*
702 * Convert desired mpdu density from microeconds to bytes based
703 * on highest rate in rate series (i.e. first rate) to determine
704 * required minimum length for subframe. Take into account
705 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 706 *
e8324357
S
707 * If there is no mpdu density restriction, no further calculation
708 * is needed.
709 */
4ef70841
S
710
711 if (tid->an->mpdudensity == 0)
e8324357 712 return ndelim;
f078f209 713
e8324357
S
714 rix = tx_info->control.rates[0].idx;
715 flags = tx_info->control.rates[0].flags;
e8324357
S
716 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
717 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 718
e8324357 719 if (half_gi)
4ef70841 720 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 721 else
4ef70841 722 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 723
e8324357
S
724 if (nsymbols == 0)
725 nsymbols = 1;
f078f209 726
c6663876
FF
727 streams = HT_RC_2_STREAMS(rix);
728 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 729 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 730
e8324357 731 if (frmlen < minlen) {
e8324357
S
732 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
733 ndelim = max(mindelim, ndelim);
f078f209
LR
734 }
735
e8324357 736 return ndelim;
f078f209
LR
737}
738
e8324357 739static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 740 struct ath_txq *txq,
d43f3015 741 struct ath_atx_tid *tid,
269c44bc
FF
742 struct list_head *bf_q,
743 int *aggr_len)
f078f209 744{
e8324357 745#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
56dc6336 746 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
d43f3015 747 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
748 u16 aggr_limit = 0, al = 0, bpad = 0,
749 al_delta, h_baw = tid->baw_size / 2;
750 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 751 struct ieee80211_tx_info *tx_info;
2d42efc4 752 struct ath_frame_info *fi;
56dc6336 753 struct sk_buff *skb;
6a0ddaef 754 u16 seqno;
f078f209 755
e8324357 756 do {
56dc6336
FF
757 skb = skb_peek(&tid->buf_q);
758 fi = get_frame_info(skb);
759 bf = fi->bf;
44f1d26c
FF
760 if (!fi->bf)
761 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
56dc6336 762
44f1d26c
FF
763 if (!bf)
764 continue;
765
399c6489 766 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
44f1d26c 767 seqno = bf->bf_state.seqno;
56dc6336
FF
768 if (!bf_first)
769 bf_first = bf;
f078f209 770
d43f3015 771 /* do not step over block-ack window */
6a0ddaef 772 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
e8324357
S
773 status = ATH_AGGR_BAW_CLOSED;
774 break;
775 }
f078f209 776
e8324357
S
777 if (!rl) {
778 aggr_limit = ath_lookup_rate(sc, bf, tid);
779 rl = 1;
780 }
f078f209 781
d43f3015 782 /* do not exceed aggregation limit */
2d42efc4 783 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 784
d43f3015 785 if (nframes &&
1a6e9d0f
RM
786 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
787 ath_lookup_legacy(bf))) {
e8324357
S
788 status = ATH_AGGR_LIMITED;
789 break;
790 }
f078f209 791
0299a50a
FF
792 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
793 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
794 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
795 break;
796
d43f3015
S
797 /* do not exceed subframe limit */
798 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
799 status = ATH_AGGR_LIMITED;
800 break;
801 }
f078f209 802
d43f3015 803 /* add padding for previous frame to aggregation length */
e8324357 804 al += bpad + al_delta;
f078f209 805
e8324357
S
806 /*
807 * Get the delimiters needed to meet the MPDU
808 * density for this node.
809 */
7a12dfdb
RM
810 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
811 !nframes);
e8324357 812 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 813
7a12dfdb 814 nframes++;
e8324357 815 bf->bf_next = NULL;
f078f209 816
d43f3015 817 /* link buffers of this frame to the aggregate */
2d42efc4 818 if (!fi->retries)
6a0ddaef 819 ath_tx_addto_baw(sc, tid, seqno);
399c6489 820 bf->bf_state.ndelim = ndelim;
56dc6336
FF
821
822 __skb_unlink(skb, &tid->buf_q);
823 list_add_tail(&bf->list, bf_q);
399c6489 824 if (bf_prev)
e8324357 825 bf_prev->bf_next = bf;
399c6489 826
e8324357 827 bf_prev = bf;
fec247c0 828
56dc6336 829 } while (!skb_queue_empty(&tid->buf_q));
f078f209 830
269c44bc 831 *aggr_len = al;
d43f3015 832
e8324357
S
833 return status;
834#undef PADBYTES
835}
f078f209 836
38dad7ba
FF
837/*
838 * rix - rate index
839 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
840 * width - 0 for 20 MHz, 1 for 40 MHz
841 * half_gi - to use 4us v/s 3.6 us for symbol time
842 */
843static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
844 int width, int half_gi, bool shortPreamble)
845{
846 u32 nbits, nsymbits, duration, nsymbols;
847 int streams;
848
849 /* find number of symbols: PLCP + data */
850 streams = HT_RC_2_STREAMS(rix);
851 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
852 nsymbits = bits_per_symbol[rix % 8][width] * streams;
853 nsymbols = (nbits + nsymbits - 1) / nsymbits;
854
855 if (!half_gi)
856 duration = SYMBOL_TIME(nsymbols);
857 else
858 duration = SYMBOL_TIME_HALFGI(nsymbols);
859
860 /* addup duration for legacy/ht training and signal fields */
861 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
862
863 return duration;
864}
865
493cf04f
FF
866static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
867 struct ath_tx_info *info, int len)
38dad7ba
FF
868{
869 struct ath_hw *ah = sc->sc_ah;
38dad7ba
FF
870 struct sk_buff *skb;
871 struct ieee80211_tx_info *tx_info;
872 struct ieee80211_tx_rate *rates;
873 const struct ieee80211_rate *rate;
874 struct ieee80211_hdr *hdr;
493cf04f
FF
875 int i;
876 u8 rix = 0;
38dad7ba
FF
877
878 skb = bf->bf_mpdu;
879 tx_info = IEEE80211_SKB_CB(skb);
880 rates = tx_info->control.rates;
881 hdr = (struct ieee80211_hdr *)skb->data;
493cf04f
FF
882
883 /* set dur_update_en for l-sig computation except for PS-Poll frames */
884 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
38dad7ba
FF
885
886 /*
887 * We check if Short Preamble is needed for the CTS rate by
888 * checking the BSS's global flag.
889 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
890 */
891 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
493cf04f 892 info->rtscts_rate = rate->hw_value;
38dad7ba 893 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
493cf04f 894 info->rtscts_rate |= rate->hw_value_short;
38dad7ba
FF
895
896 for (i = 0; i < 4; i++) {
897 bool is_40, is_sgi, is_sp;
898 int phy;
899
900 if (!rates[i].count || (rates[i].idx < 0))
901 continue;
902
903 rix = rates[i].idx;
493cf04f 904 info->rates[i].Tries = rates[i].count;
38dad7ba
FF
905
906 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
493cf04f
FF
907 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
908 info->flags |= ATH9K_TXDESC_RTSENA;
38dad7ba 909 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
493cf04f
FF
910 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
911 info->flags |= ATH9K_TXDESC_CTSENA;
38dad7ba
FF
912 }
913
914 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
493cf04f 915 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
38dad7ba 916 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
493cf04f 917 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
38dad7ba
FF
918
919 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
920 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
921 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
922
923 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
924 /* MCS rates */
493cf04f
FF
925 info->rates[i].Rate = rix | 0x80;
926 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
927 ah->txchainmask, info->rates[i].Rate);
928 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
38dad7ba
FF
929 is_40, is_sgi, is_sp);
930 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
493cf04f 931 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
38dad7ba
FF
932 continue;
933 }
934
935 /* legacy rates */
936 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
937 !(rate->flags & IEEE80211_RATE_ERP_G))
938 phy = WLAN_RC_PHY_CCK;
939 else
940 phy = WLAN_RC_PHY_OFDM;
941
942 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
493cf04f 943 info->rates[i].Rate = rate->hw_value;
38dad7ba
FF
944 if (rate->hw_value_short) {
945 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
493cf04f 946 info->rates[i].Rate |= rate->hw_value_short;
38dad7ba
FF
947 } else {
948 is_sp = false;
949 }
950
951 if (bf->bf_state.bfs_paprd)
493cf04f 952 info->rates[i].ChSel = ah->txchainmask;
38dad7ba 953 else
493cf04f
FF
954 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
955 ah->txchainmask, info->rates[i].Rate);
38dad7ba 956
493cf04f 957 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
38dad7ba
FF
958 phy, rate->bitrate * 100, len, rix, is_sp);
959 }
960
961 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
962 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
493cf04f 963 info->flags &= ~ATH9K_TXDESC_RTSENA;
38dad7ba
FF
964
965 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
493cf04f
FF
966 if (info->flags & ATH9K_TXDESC_RTSENA)
967 info->flags &= ~ATH9K_TXDESC_CTSENA;
968}
38dad7ba 969
493cf04f
FF
970static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
971{
972 struct ieee80211_hdr *hdr;
973 enum ath9k_pkt_type htype;
974 __le16 fc;
975
976 hdr = (struct ieee80211_hdr *)skb->data;
977 fc = hdr->frame_control;
38dad7ba 978
493cf04f
FF
979 if (ieee80211_is_beacon(fc))
980 htype = ATH9K_PKT_TYPE_BEACON;
981 else if (ieee80211_is_probe_resp(fc))
982 htype = ATH9K_PKT_TYPE_PROBE_RESP;
983 else if (ieee80211_is_atim(fc))
984 htype = ATH9K_PKT_TYPE_ATIM;
985 else if (ieee80211_is_pspoll(fc))
986 htype = ATH9K_PKT_TYPE_PSPOLL;
987 else
988 htype = ATH9K_PKT_TYPE_NORMAL;
989
990 return htype;
38dad7ba
FF
991}
992
493cf04f
FF
993static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
994 struct ath_txq *txq, int len)
399c6489
FF
995{
996 struct ath_hw *ah = sc->sc_ah;
997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
998 struct ath_buf *bf_first = bf;
493cf04f 999 struct ath_tx_info info;
399c6489 1000 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
399c6489 1001
493cf04f
FF
1002 memset(&info, 0, sizeof(info));
1003 info.is_first = true;
1004 info.is_last = true;
1005 info.txpower = MAX_RATE_POWER;
1006 info.qcu = txq->axq_qnum;
1007
1008 info.flags = ATH9K_TXDESC_INTREQ;
1009 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1010 info.flags |= ATH9K_TXDESC_NOACK;
1011 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1012 info.flags |= ATH9K_TXDESC_LDPC;
1013
1014 ath_buf_set_rate(sc, bf, &info, len);
1015
1016 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1017 info.flags |= ATH9K_TXDESC_CLRDMASK;
1018
1019 if (bf->bf_state.bfs_paprd)
1020 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
399c6489 1021
399c6489
FF
1022
1023 while (bf) {
493cf04f
FF
1024 struct sk_buff *skb = bf->bf_mpdu;
1025 struct ath_frame_info *fi = get_frame_info(skb);
1026
1027 info.type = get_hw_packet_type(skb);
399c6489 1028 if (bf->bf_next)
493cf04f 1029 info.link = bf->bf_next->bf_daddr;
399c6489 1030 else
493cf04f
FF
1031 info.link = 0;
1032
1033 info.buf_addr[0] = bf->bf_buf_addr;
1034 info.buf_len[0] = skb->len;
1035 info.pkt_len = fi->framelen;
1036 info.keyix = fi->keyix;
1037 info.keytype = fi->keytype;
1038
1039 if (aggr) {
399c6489 1040 if (bf == bf_first)
493cf04f
FF
1041 info.aggr = AGGR_BUF_FIRST;
1042 else if (!bf->bf_next)
1043 info.aggr = AGGR_BUF_LAST;
1044 else
1045 info.aggr = AGGR_BUF_MIDDLE;
399c6489 1046
493cf04f
FF
1047 info.ndelim = bf->bf_state.ndelim;
1048 info.aggr_len = len;
399c6489
FF
1049 }
1050
493cf04f 1051 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
399c6489
FF
1052 bf = bf->bf_next;
1053 }
1054}
1055
e8324357
S
1056static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1057 struct ath_atx_tid *tid)
1058{
d43f3015 1059 struct ath_buf *bf;
e8324357 1060 enum ATH_AGGR_STATUS status;
399c6489 1061 struct ieee80211_tx_info *tx_info;
e8324357 1062 struct list_head bf_q;
269c44bc 1063 int aggr_len;
f078f209 1064
e8324357 1065 do {
56dc6336 1066 if (skb_queue_empty(&tid->buf_q))
e8324357 1067 return;
f078f209 1068
e8324357
S
1069 INIT_LIST_HEAD(&bf_q);
1070
269c44bc 1071 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 1072
f078f209 1073 /*
d43f3015
S
1074 * no frames picked up to be aggregated;
1075 * block-ack window is not open.
f078f209 1076 */
e8324357
S
1077 if (list_empty(&bf_q))
1078 break;
f078f209 1079
e8324357 1080 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 1081 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
399c6489 1082 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
f078f209 1083
5519541d
FF
1084 if (tid->ac->clear_ps_filter) {
1085 tid->ac->clear_ps_filter = false;
399c6489
FF
1086 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1087 } else {
1088 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
5519541d
FF
1089 }
1090
d43f3015 1091 /* if only one frame, send as non-aggregate */
b572d033 1092 if (bf == bf->bf_lastbf) {
399c6489
FF
1093 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1094 bf->bf_state.bf_type = BUF_AMPDU;
1095 } else {
1096 TX_STAT_INC(txq->axq_qnum, a_aggr);
e8324357 1097 }
f078f209 1098
493cf04f 1099 ath_tx_fill_desc(sc, bf, txq, aggr_len);
fce041be 1100 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
4b3ba66a 1101 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
1102 status != ATH_AGGR_BAW_CLOSED);
1103}
1104
231c3a1f
FF
1105int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1106 u16 tid, u16 *ssn)
e8324357
S
1107{
1108 struct ath_atx_tid *txtid;
1109 struct ath_node *an;
1110
1111 an = (struct ath_node *)sta->drv_priv;
f83da965 1112 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
1113
1114 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1115 return -EAGAIN;
1116
f83da965 1117 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 1118 txtid->paused = true;
49447f2f 1119 *ssn = txtid->seq_start = txtid->seq_next;
231c3a1f 1120
2ed72229
FF
1121 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1122 txtid->baw_head = txtid->baw_tail = 0;
1123
231c3a1f 1124 return 0;
e8324357 1125}
f078f209 1126
f83da965 1127void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
1128{
1129 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1130 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 1131 struct ath_txq *txq = txtid->ac->txq;
f078f209 1132
e8324357 1133 if (txtid->state & AGGR_CLEANUP)
f83da965 1134 return;
f078f209 1135
e8324357 1136 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 1137 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 1138 return;
e8324357 1139 }
f078f209 1140
e8324357 1141 spin_lock_bh(&txq->axq_lock);
75401849 1142 txtid->paused = true;
f078f209 1143
90fa539c
FF
1144 /*
1145 * If frames are still being transmitted for this TID, they will be
1146 * cleaned up during tx completion. To prevent race conditions, this
1147 * TID can only be reused after all in-progress subframes have been
1148 * completed.
1149 */
1150 if (txtid->baw_head != txtid->baw_tail)
e8324357 1151 txtid->state |= AGGR_CLEANUP;
90fa539c 1152 else
e8324357 1153 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
1154 spin_unlock_bh(&txq->axq_lock);
1155
1156 ath_tx_flush_tid(sc, txtid);
e8324357 1157}
f078f209 1158
5519541d
FF
1159bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1160{
1161 struct ath_atx_tid *tid;
1162 struct ath_atx_ac *ac;
1163 struct ath_txq *txq;
1164 bool buffered = false;
1165 int tidno;
1166
1167 for (tidno = 0, tid = &an->tid[tidno];
1168 tidno < WME_NUM_TID; tidno++, tid++) {
1169
1170 if (!tid->sched)
1171 continue;
1172
1173 ac = tid->ac;
1174 txq = ac->txq;
1175
1176 spin_lock_bh(&txq->axq_lock);
1177
56dc6336 1178 if (!skb_queue_empty(&tid->buf_q))
5519541d
FF
1179 buffered = true;
1180
1181 tid->sched = false;
1182 list_del(&tid->list);
1183
1184 if (ac->sched) {
1185 ac->sched = false;
1186 list_del(&ac->list);
1187 }
1188
1189 spin_unlock_bh(&txq->axq_lock);
1190 }
1191
1192 return buffered;
1193}
1194
1195void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1196{
1197 struct ath_atx_tid *tid;
1198 struct ath_atx_ac *ac;
1199 struct ath_txq *txq;
1200 int tidno;
1201
1202 for (tidno = 0, tid = &an->tid[tidno];
1203 tidno < WME_NUM_TID; tidno++, tid++) {
1204
1205 ac = tid->ac;
1206 txq = ac->txq;
1207
1208 spin_lock_bh(&txq->axq_lock);
1209 ac->clear_ps_filter = true;
1210
56dc6336 1211 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
5519541d
FF
1212 ath_tx_queue_tid(txq, tid);
1213 ath_txq_schedule(sc, txq);
1214 }
1215
1216 spin_unlock_bh(&txq->axq_lock);
1217 }
1218}
1219
e8324357
S
1220void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1221{
1222 struct ath_atx_tid *txtid;
1223 struct ath_node *an;
1224
1225 an = (struct ath_node *)sta->drv_priv;
1226
1227 if (sc->sc_flags & SC_OP_TXAGGR) {
1228 txtid = ATH_AN_2_TID(an, tid);
1229 txtid->baw_size =
1230 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1231 txtid->state |= AGGR_ADDBA_COMPLETE;
1232 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1233 ath_tx_resume_tid(sc, txtid);
1234 }
f078f209
LR
1235}
1236
e8324357
S
1237/********************/
1238/* Queue Management */
1239/********************/
f078f209 1240
e8324357
S
1241static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1242 struct ath_txq *txq)
f078f209 1243{
e8324357
S
1244 struct ath_atx_ac *ac, *ac_tmp;
1245 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1246
e8324357
S
1247 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1248 list_del(&ac->list);
1249 ac->sched = false;
1250 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1251 list_del(&tid->list);
1252 tid->sched = false;
1253 ath_tid_drain(sc, txq, tid);
1254 }
f078f209
LR
1255 }
1256}
1257
e8324357 1258struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1259{
cbe61d8a 1260 struct ath_hw *ah = sc->sc_ah;
c46917bb 1261 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1262 struct ath9k_tx_queue_info qi;
066dae93
FF
1263 static const int subtype_txq_to_hwq[] = {
1264 [WME_AC_BE] = ATH_TXQ_AC_BE,
1265 [WME_AC_BK] = ATH_TXQ_AC_BK,
1266 [WME_AC_VI] = ATH_TXQ_AC_VI,
1267 [WME_AC_VO] = ATH_TXQ_AC_VO,
1268 };
60f2d1d5 1269 int axq_qnum, i;
f078f209 1270
e8324357 1271 memset(&qi, 0, sizeof(qi));
066dae93 1272 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1273 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1274 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1275 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1276 qi.tqi_physCompBuf = 0;
f078f209
LR
1277
1278 /*
e8324357
S
1279 * Enable interrupts only for EOL and DESC conditions.
1280 * We mark tx descriptors to receive a DESC interrupt
1281 * when a tx queue gets deep; otherwise waiting for the
1282 * EOL to reap descriptors. Note that this is done to
1283 * reduce interrupt load and this only defers reaping
1284 * descriptors, never transmitting frames. Aside from
1285 * reducing interrupts this also permits more concurrency.
1286 * The only potential downside is if the tx queue backs
1287 * up in which case the top half of the kernel may backup
1288 * due to a lack of tx descriptors.
1289 *
1290 * The UAPSD queue is an exception, since we take a desc-
1291 * based intr on the EOSP frames.
f078f209 1292 */
afe754d6
VT
1293 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1294 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1295 TXQ_FLAG_TXERRINT_ENABLE;
1296 } else {
1297 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1298 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1299 else
1300 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1301 TXQ_FLAG_TXDESCINT_ENABLE;
1302 }
60f2d1d5
BG
1303 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1304 if (axq_qnum == -1) {
f078f209 1305 /*
e8324357
S
1306 * NB: don't print a message, this happens
1307 * normally on parts with too few tx queues
f078f209 1308 */
e8324357 1309 return NULL;
f078f209 1310 }
60f2d1d5 1311 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
3800276a 1312 ath_err(common, "qnum %u out of range, max %zu!\n",
60f2d1d5
BG
1313 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1314 ath9k_hw_releasetxqueue(ah, axq_qnum);
e8324357
S
1315 return NULL;
1316 }
60f2d1d5
BG
1317 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1318 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1319
60f2d1d5
BG
1320 txq->axq_qnum = axq_qnum;
1321 txq->mac80211_qnum = -1;
e8324357
S
1322 txq->axq_link = NULL;
1323 INIT_LIST_HEAD(&txq->axq_q);
1324 INIT_LIST_HEAD(&txq->axq_acq);
1325 spin_lock_init(&txq->axq_lock);
1326 txq->axq_depth = 0;
4b3ba66a 1327 txq->axq_ampdu_depth = 0;
164ace38 1328 txq->axq_tx_inprogress = false;
60f2d1d5 1329 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1330
1331 txq->txq_headidx = txq->txq_tailidx = 0;
1332 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1333 INIT_LIST_HEAD(&txq->txq_fifo[i]);
e8324357 1334 }
60f2d1d5 1335 return &sc->tx.txq[axq_qnum];
f078f209
LR
1336}
1337
e8324357
S
1338int ath_txq_update(struct ath_softc *sc, int qnum,
1339 struct ath9k_tx_queue_info *qinfo)
1340{
cbe61d8a 1341 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1342 int error = 0;
1343 struct ath9k_tx_queue_info qi;
1344
1345 if (qnum == sc->beacon.beaconq) {
1346 /*
1347 * XXX: for beacon queue, we just save the parameter.
1348 * It will be picked up by ath_beaconq_config when
1349 * it's necessary.
1350 */
1351 sc->beacon.beacon_qi = *qinfo;
f078f209 1352 return 0;
e8324357 1353 }
f078f209 1354
9680e8a3 1355 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1356
1357 ath9k_hw_get_txq_props(ah, qnum, &qi);
1358 qi.tqi_aifs = qinfo->tqi_aifs;
1359 qi.tqi_cwmin = qinfo->tqi_cwmin;
1360 qi.tqi_cwmax = qinfo->tqi_cwmax;
1361 qi.tqi_burstTime = qinfo->tqi_burstTime;
1362 qi.tqi_readyTime = qinfo->tqi_readyTime;
1363
1364 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1365 ath_err(ath9k_hw_common(sc->sc_ah),
1366 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1367 error = -EIO;
1368 } else {
1369 ath9k_hw_resettxqueue(ah, qnum);
1370 }
1371
1372 return error;
1373}
1374
1375int ath_cabq_update(struct ath_softc *sc)
1376{
1377 struct ath9k_tx_queue_info qi;
9814f6b3 1378 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1379 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1380
e8324357 1381 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1382 /*
e8324357 1383 * Ensure the readytime % is within the bounds.
f078f209 1384 */
17d7904d
S
1385 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1386 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1387 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1388 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1389
9814f6b3 1390 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1391 sc->config.cabqReadytime) / 100;
e8324357
S
1392 ath_txq_update(sc, qnum, &qi);
1393
1394 return 0;
f078f209
LR
1395}
1396
4b3ba66a
FF
1397static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1398{
1399 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1400 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1401}
1402
fce041be
FF
1403static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1404 struct list_head *list, bool retry_tx)
5479de6e
RM
1405 __releases(txq->axq_lock)
1406 __acquires(txq->axq_lock)
f078f209 1407{
e8324357
S
1408 struct ath_buf *bf, *lastbf;
1409 struct list_head bf_head;
db1a052b
FF
1410 struct ath_tx_status ts;
1411
1412 memset(&ts, 0, sizeof(ts));
e8324357 1413 INIT_LIST_HEAD(&bf_head);
f078f209 1414
fce041be
FF
1415 while (!list_empty(list)) {
1416 bf = list_first_entry(list, struct ath_buf, list);
f078f209 1417
fce041be
FF
1418 if (bf->bf_stale) {
1419 list_del(&bf->list);
f078f209 1420
fce041be
FF
1421 ath_tx_return_buffer(sc, bf);
1422 continue;
e8324357 1423 }
f078f209 1424
e8324357 1425 lastbf = bf->bf_lastbf;
fce041be 1426 list_cut_position(&bf_head, list, &lastbf->list);
e5003249 1427
e8324357 1428 txq->axq_depth--;
4b3ba66a
FF
1429 if (bf_is_ampdu_not_probing(bf))
1430 txq->axq_ampdu_depth--;
e8324357 1431
fce041be 1432 spin_unlock_bh(&txq->axq_lock);
e8324357 1433 if (bf_isampdu(bf))
c5992618
FF
1434 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1435 retry_tx);
e8324357 1436 else
db1a052b 1437 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
fce041be 1438 spin_lock_bh(&txq->axq_lock);
f078f209 1439 }
fce041be 1440}
f078f209 1441
fce041be
FF
1442/*
1443 * Drain a given TX queue (could be Beacon or Data)
1444 *
1445 * This assumes output has been stopped and
1446 * we do not need to block ath_tx_tasklet.
1447 */
1448void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1449{
164ace38 1450 spin_lock_bh(&txq->axq_lock);
e5003249 1451 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
fce041be 1452 int idx = txq->txq_tailidx;
e5003249 1453
fce041be
FF
1454 while (!list_empty(&txq->txq_fifo[idx])) {
1455 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1456 retry_tx);
1457
1458 INCR(idx, ATH_TXFIFO_DEPTH);
e5003249 1459 }
fce041be 1460 txq->txq_tailidx = idx;
e5003249 1461 }
e609e2ea 1462
fce041be
FF
1463 txq->axq_link = NULL;
1464 txq->axq_tx_inprogress = false;
1465 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1466
e609e2ea 1467 /* flush any pending frames if aggregation is enabled */
fce041be
FF
1468 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1469 ath_txq_drain_pending_buffers(sc, txq);
1470
1471 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
1472}
1473
080e1a25 1474bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1475{
cbe61d8a 1476 struct ath_hw *ah = sc->sc_ah;
c46917bb 1477 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1478 struct ath_txq *txq;
1479 int i, npend = 0;
1480
1481 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1482 return true;
043a0405 1483
0d51cccc 1484 ath9k_hw_abort_tx_dma(ah);
043a0405 1485
0d51cccc 1486 /* Check if any queue remains active */
043a0405 1487 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1488 if (!ATH_TXQ_SETUP(sc, i))
1489 continue;
1490
1491 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
043a0405
S
1492 }
1493
080e1a25 1494 if (npend)
393934c6 1495 ath_err(common, "Failed to stop TX DMA!\n");
043a0405
S
1496
1497 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1498 if (!ATH_TXQ_SETUP(sc, i))
1499 continue;
1500
1501 /*
1502 * The caller will resume queues with ieee80211_wake_queues.
1503 * Mark the queue as not stopped to prevent ath_tx_complete
1504 * from waking the queue too early.
1505 */
1506 txq = &sc->tx.txq[i];
1507 txq->stopped = false;
1508 ath_draintxq(sc, txq, retry_tx);
043a0405 1509 }
080e1a25
FF
1510
1511 return !npend;
e8324357 1512}
f078f209 1513
043a0405 1514void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1515{
043a0405
S
1516 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1517 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1518}
f078f209 1519
7755bad9
BG
1520/* For each axq_acq entry, for each tid, try to schedule packets
1521 * for transmit until ampdu_depth has reached min Q depth.
1522 */
e8324357
S
1523void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1524{
7755bad9
BG
1525 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1526 struct ath_atx_tid *tid, *last_tid;
f078f209 1527
236de514 1528 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
21f28e6f 1529 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1530 return;
f078f209 1531
e8324357 1532 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1533 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1534
7755bad9
BG
1535 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1536 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1537 list_del(&ac->list);
1538 ac->sched = false;
f078f209 1539
7755bad9
BG
1540 while (!list_empty(&ac->tid_q)) {
1541 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1542 list);
1543 list_del(&tid->list);
1544 tid->sched = false;
f078f209 1545
7755bad9
BG
1546 if (tid->paused)
1547 continue;
f078f209 1548
7755bad9 1549 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1550
7755bad9
BG
1551 /*
1552 * add tid to round-robin queue if more frames
1553 * are pending for the tid
1554 */
56dc6336 1555 if (!skb_queue_empty(&tid->buf_q))
7755bad9 1556 ath_tx_queue_tid(txq, tid);
f078f209 1557
7755bad9
BG
1558 if (tid == last_tid ||
1559 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1560 break;
1561 }
f078f209 1562
7755bad9
BG
1563 if (!list_empty(&ac->tid_q)) {
1564 if (!ac->sched) {
1565 ac->sched = true;
1566 list_add_tail(&ac->list, &txq->axq_acq);
1567 }
f078f209 1568 }
7755bad9
BG
1569
1570 if (ac == last_ac ||
1571 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1572 return;
e8324357
S
1573 }
1574}
f078f209 1575
e8324357
S
1576/***********/
1577/* TX, DMA */
1578/***********/
1579
f078f209 1580/*
e8324357
S
1581 * Insert a chain of ath_buf (descriptors) on a txq and
1582 * assume the descriptors are already chained together by caller.
f078f209 1583 */
e8324357 1584static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 1585 struct list_head *head, bool internal)
f078f209 1586{
cbe61d8a 1587 struct ath_hw *ah = sc->sc_ah;
c46917bb 1588 struct ath_common *common = ath9k_hw_common(ah);
fce041be
FF
1589 struct ath_buf *bf, *bf_last;
1590 bool puttxbuf = false;
1591 bool edma;
f078f209 1592
e8324357
S
1593 /*
1594 * Insert the frame on the outbound list and
1595 * pass it on to the hardware.
1596 */
f078f209 1597
e8324357
S
1598 if (list_empty(head))
1599 return;
f078f209 1600
fce041be 1601 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
e8324357 1602 bf = list_first_entry(head, struct ath_buf, list);
fce041be 1603 bf_last = list_entry(head->prev, struct ath_buf, list);
f078f209 1604
226afe68
JP
1605 ath_dbg(common, ATH_DBG_QUEUE,
1606 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1607
fce041be
FF
1608 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1609 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
e5003249 1610 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
fce041be 1611 puttxbuf = true;
e8324357 1612 } else {
e5003249
VT
1613 list_splice_tail_init(head, &txq->axq_q);
1614
fce041be
FF
1615 if (txq->axq_link) {
1616 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
226afe68
JP
1617 ath_dbg(common, ATH_DBG_XMIT,
1618 "link[%u] (%p)=%llx (%p)\n",
1619 txq->axq_qnum, txq->axq_link,
1620 ito64(bf->bf_daddr), bf->bf_desc);
fce041be
FF
1621 } else if (!edma)
1622 puttxbuf = true;
1623
1624 txq->axq_link = bf_last->bf_desc;
1625 }
1626
1627 if (puttxbuf) {
1628 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1629 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1630 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1631 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1632 }
1633
1634 if (!edma) {
8d8d3fdc 1635 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1636 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1637 }
fce041be
FF
1638
1639 if (!internal) {
1640 txq->axq_depth++;
1641 if (bf_is_ampdu_not_probing(bf))
1642 txq->axq_ampdu_depth++;
1643 }
e8324357 1644}
f078f209 1645
e8324357 1646static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
44f1d26c 1647 struct sk_buff *skb, struct ath_tx_control *txctl)
f078f209 1648{
44f1d26c 1649 struct ath_frame_info *fi = get_frame_info(skb);
04caf863 1650 struct list_head bf_head;
44f1d26c 1651 struct ath_buf *bf;
f078f209 1652
e8324357
S
1653 /*
1654 * Do not queue to h/w when any of the following conditions is true:
1655 * - there are pending frames in software queue
1656 * - the TID is currently paused for ADDBA/BAR request
1657 * - seqno is not within block-ack window
1658 * - h/w queue depth exceeds low water mark
1659 */
56dc6336 1660 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
44f1d26c 1661 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
4b3ba66a 1662 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1663 /*
e8324357
S
1664 * Add this frame to software queue for scheduling later
1665 * for aggregation.
f078f209 1666 */
bda8adda 1667 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
44f1d26c 1668 __skb_queue_tail(&tid->buf_q, skb);
9af73cf7
FF
1669 if (!txctl->an || !txctl->an->sleeping)
1670 ath_tx_queue_tid(txctl->txq, tid);
e8324357
S
1671 return;
1672 }
1673
44f1d26c
FF
1674 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1675 if (!bf)
1676 return;
1677
399c6489 1678 bf->bf_state.bf_type = BUF_AMPDU;
04caf863
FF
1679 INIT_LIST_HEAD(&bf_head);
1680 list_add(&bf->list, &bf_head);
1681
e8324357 1682 /* Add sub-frame to BAW */
44f1d26c 1683 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
e8324357
S
1684
1685 /* Queue to h/w without aggregation */
bda8adda 1686 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1687 bf->bf_lastbf = bf;
493cf04f 1688 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
fce041be 1689 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
e8324357
S
1690}
1691
82b873af 1692static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c 1693 struct ath_atx_tid *tid, struct sk_buff *skb)
e8324357 1694{
44f1d26c
FF
1695 struct ath_frame_info *fi = get_frame_info(skb);
1696 struct list_head bf_head;
e8324357
S
1697 struct ath_buf *bf;
1698
44f1d26c
FF
1699 bf = fi->bf;
1700 if (!bf)
1701 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1702
1703 if (!bf)
1704 return;
1705
1706 INIT_LIST_HEAD(&bf_head);
1707 list_add_tail(&bf->list, &bf_head);
399c6489 1708 bf->bf_state.bf_type = 0;
e8324357
S
1709
1710 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1711 if (tid)
1712 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357 1713
d43f3015 1714 bf->bf_lastbf = bf;
493cf04f 1715 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
44f1d26c 1716 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
fec247c0 1717 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1718}
1719
2d42efc4
FF
1720static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1721 int framelen)
e8324357
S
1722{
1723 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1724 struct ieee80211_sta *sta = tx_info->control.sta;
1725 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
6a0ddaef 1726 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4 1727 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1728 struct ath_node *an = NULL;
2d42efc4 1729 enum ath9k_key_type keytype;
e8324357 1730
2d42efc4 1731 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1732
93ae2dd2
FF
1733 if (sta)
1734 an = (struct ath_node *) sta->drv_priv;
1735
2d42efc4
FF
1736 memset(fi, 0, sizeof(*fi));
1737 if (hw_key)
1738 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1739 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1740 fi->keyix = an->ps_key;
2d42efc4
FF
1741 else
1742 fi->keyix = ATH9K_TXKEYIX_INVALID;
1743 fi->keytype = keytype;
1744 fi->framelen = framelen;
e8324357
S
1745}
1746
ea066d5a
MSS
1747u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1748{
1749 struct ath_hw *ah = sc->sc_ah;
1750 struct ath9k_channel *curchan = ah->curchan;
d77bf3eb
RM
1751 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1752 (curchan->channelFlags & CHANNEL_5GHZ) &&
1753 (chainmask == 0x7) && (rate < 0x90))
ea066d5a
MSS
1754 return 0x3;
1755 else
1756 return chainmask;
1757}
1758
44f1d26c
FF
1759/*
1760 * Assign a descriptor (and sequence number if necessary,
1761 * and map buffer for DMA. Frees skb on error
1762 */
fa05f87a 1763static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
04caf863 1764 struct ath_txq *txq,
fa05f87a 1765 struct ath_atx_tid *tid,
2d42efc4 1766 struct sk_buff *skb)
f078f209 1767{
82b873af 1768 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1769 struct ath_frame_info *fi = get_frame_info(skb);
fa05f87a 1770 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1771 struct ath_buf *bf;
fa05f87a 1772 u16 seqno;
82b873af
FF
1773
1774 bf = ath_tx_get_buffer(sc);
1775 if (!bf) {
226afe68 1776 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
44f1d26c 1777 goto error;
82b873af 1778 }
e022edbd 1779
528f0c6b 1780 ATH_TXBUF_RESET(bf);
f078f209 1781
fa05f87a
FF
1782 if (tid) {
1783 seqno = tid->seq_next;
1784 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1785 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1786 bf->bf_state.seqno = seqno;
1787 }
1788
f078f209 1789 bf->bf_mpdu = skb;
f8316df1 1790
c1739eb3
BG
1791 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1792 skb->len, DMA_TO_DEVICE);
1793 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1794 bf->bf_mpdu = NULL;
6cf9e995 1795 bf->bf_buf_addr = 0;
3800276a
JP
1796 ath_err(ath9k_hw_common(sc->sc_ah),
1797 "dma_mapping_error() on TX\n");
82b873af 1798 ath_tx_return_buffer(sc, bf);
44f1d26c 1799 goto error;
f8316df1
LR
1800 }
1801
56dc6336 1802 fi->bf = bf;
04caf863
FF
1803
1804 return bf;
44f1d26c
FF
1805
1806error:
1807 dev_kfree_skb_any(skb);
1808 return NULL;
04caf863
FF
1809}
1810
1811/* FIXME: tx power */
44f1d26c 1812static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
04caf863
FF
1813 struct ath_tx_control *txctl)
1814{
04caf863
FF
1815 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1816 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
248a38d0 1817 struct ath_atx_tid *tid = NULL;
fa05f87a 1818 struct ath_buf *bf;
04caf863 1819 u8 tidno;
f078f209 1820
528f0c6b 1821 spin_lock_bh(&txctl->txq->axq_lock);
61e1b0b0
MSS
1822 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1823 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1824 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1825 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1826 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1827
066dae93 1828 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1829 }
1830
1831 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1832 /*
1833 * Try aggregation if it's a unicast data frame
1834 * and the destination is HT capable.
1835 */
44f1d26c 1836 ath_tx_send_ampdu(sc, tid, skb, txctl);
f078f209 1837 } else {
44f1d26c
FF
1838 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1839 if (!bf)
1840 goto out;
04caf863 1841
82b873af
FF
1842 bf->bf_state.bfs_paprd = txctl->paprd;
1843
9cf04dcc
MSS
1844 if (txctl->paprd)
1845 bf->bf_state.bfs_paprd_timestamp = jiffies;
1846
44f1d26c 1847 ath_tx_send_normal(sc, txctl->txq, tid, skb);
f078f209 1848 }
528f0c6b 1849
fa05f87a 1850out:
528f0c6b 1851 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1852}
1853
f8316df1 1854/* Upon failure caller should free skb */
c52f33d0 1855int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1856 struct ath_tx_control *txctl)
f078f209 1857{
28d16708
FF
1858 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1859 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1860 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1861 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1862 struct ath_softc *sc = hw->priv;
84642d6b 1863 struct ath_txq *txq = txctl->txq;
4d91f9f3 1864 int padpos, padsize;
04caf863 1865 int frmlen = skb->len + FCS_LEN;
28d16708 1866 int q;
f078f209 1867
a9927ba3
BG
1868 /* NOTE: sta can be NULL according to net/mac80211.h */
1869 if (sta)
1870 txctl->an = (struct ath_node *)sta->drv_priv;
1871
04caf863
FF
1872 if (info->control.hw_key)
1873 frmlen += info->control.hw_key->icv_len;
1874
f078f209 1875 /*
e8324357
S
1876 * As a temporary workaround, assign seq# here; this will likely need
1877 * to be cleaned up to work better with Beacon transmission and virtual
1878 * BSSes.
f078f209 1879 */
e8324357 1880 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1881 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1882 sc->tx.seq_no += 0x10;
1883 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1884 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1885 }
f078f209 1886
e8324357 1887 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1888 padpos = ath9k_cmn_padpos(hdr->frame_control);
1889 padsize = padpos & 3;
28d16708
FF
1890 if (padsize && skb->len > padpos) {
1891 if (skb_headroom(skb) < padsize)
1892 return -ENOMEM;
1893
e8324357 1894 skb_push(skb, padsize);
4d91f9f3 1895 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1896 }
f078f209 1897
f59a59fe
FF
1898 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1899 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1900 !ieee80211_is_data(hdr->frame_control))
1901 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1902
2d42efc4
FF
1903 setup_frame_info(hw, skb, frmlen);
1904
1905 /*
1906 * At this point, the vif, hw_key and sta pointers in the tx control
1907 * info are no longer valid (overwritten by the ath_frame_info data.
1908 */
1909
28d16708
FF
1910 q = skb_get_queue_mapping(skb);
1911 spin_lock_bh(&txq->axq_lock);
1912 if (txq == sc->tx.txq_map[q] &&
1913 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1914 ieee80211_stop_queue(sc->hw, q);
28d16708 1915 txq->stopped = 1;
f078f209 1916 }
28d16708 1917 spin_unlock_bh(&txq->axq_lock);
f078f209 1918
44f1d26c
FF
1919 ath_tx_start_dma(sc, skb, txctl);
1920 return 0;
f078f209
LR
1921}
1922
e8324357
S
1923/*****************/
1924/* TX Completion */
1925/*****************/
528f0c6b 1926
e8324357 1927static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0f9dc298 1928 int tx_flags, struct ath_txq *txq)
528f0c6b 1929{
e8324357
S
1930 struct ieee80211_hw *hw = sc->hw;
1931 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1932 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1933 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1934 int q, padpos, padsize;
528f0c6b 1935
226afe68 1936 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1937
6b2c4032 1938 if (tx_flags & ATH_TX_BAR)
e8324357 1939 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1940
55797b1a 1941 if (!(tx_flags & ATH_TX_ERROR))
e8324357
S
1942 /* Frame was ACKed */
1943 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b 1944
4d91f9f3
BP
1945 padpos = ath9k_cmn_padpos(hdr->frame_control);
1946 padsize = padpos & 3;
1947 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1948 /*
1949 * Remove MAC header padding before giving the frame back to
1950 * mac80211.
1951 */
4d91f9f3 1952 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1953 skb_pull(skb, padsize);
1954 }
528f0c6b 1955
1b04b930
S
1956 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1957 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
226afe68
JP
1958 ath_dbg(common, ATH_DBG_PS,
1959 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
1960 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1961 PS_WAIT_FOR_CAB |
1962 PS_WAIT_FOR_PSPOLL_DATA |
1963 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1964 }
1965
7545daf4
FF
1966 q = skb_get_queue_mapping(skb);
1967 if (txq == sc->tx.txq_map[q]) {
1968 spin_lock_bh(&txq->axq_lock);
1969 if (WARN_ON(--txq->pending_frames < 0))
1970 txq->pending_frames = 0;
92460412 1971
7545daf4
FF
1972 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1973 ieee80211_wake_queue(sc->hw, q);
1974 txq->stopped = 0;
066dae93 1975 }
7545daf4 1976 spin_unlock_bh(&txq->axq_lock);
97923b14 1977 }
7545daf4
FF
1978
1979 ieee80211_tx_status(hw, skb);
e8324357 1980}
f078f209 1981
e8324357 1982static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1983 struct ath_txq *txq, struct list_head *bf_q,
1984 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1985{
e8324357 1986 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1987 unsigned long flags;
6b2c4032 1988 int tx_flags = 0;
f078f209 1989
e8324357 1990 if (sendbar)
6b2c4032 1991 tx_flags = ATH_TX_BAR;
f078f209 1992
55797b1a 1993 if (!txok)
6b2c4032 1994 tx_flags |= ATH_TX_ERROR;
f078f209 1995
c1739eb3 1996 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1997 bf->bf_buf_addr = 0;
9f42c2b6
FF
1998
1999 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
2000 if (time_after(jiffies,
2001 bf->bf_state.bfs_paprd_timestamp +
2002 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2003 dev_kfree_skb_any(skb);
78a18172 2004 else
ca369eb4 2005 complete(&sc->paprd_complete);
9f42c2b6 2006 } else {
55797b1a 2007 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
0f9dc298 2008 ath_tx_complete(sc, skb, tx_flags, txq);
9f42c2b6 2009 }
6cf9e995
BG
2010 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2011 * accidentally reference it later.
2012 */
2013 bf->bf_mpdu = NULL;
e8324357
S
2014
2015 /*
2016 * Return the list of ath_buf of this mpdu to free queue
2017 */
2018 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2019 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2020 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2021}
2022
0cdd5c60
FF
2023static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2024 struct ath_tx_status *ts, int nframes, int nbad,
2025 int txok, bool update_rc)
f078f209 2026{
a22be22a 2027 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2028 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2029 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2030 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2031 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2032 u8 i, tx_rateindex;
f078f209 2033
95e4acb7 2034 if (txok)
db1a052b 2035 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2036
db1a052b 2037 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2038 WARN_ON(tx_rateindex >= hw->max_rates);
2039
db1a052b 2040 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 2041 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 2042 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 2043 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2044
b572d033 2045 BUG_ON(nbad > nframes);
ebd02287 2046
b572d033
FF
2047 tx_info->status.ampdu_len = nframes;
2048 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287
BS
2049 }
2050
db1a052b 2051 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
493cf04f 2052 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0 && update_rc) {
f0c255a0
FF
2053 /*
2054 * If an underrun error is seen assume it as an excessive
2055 * retry only if max frame trigger level has been reached
2056 * (2 KB for single stream, and 4 KB for dual stream).
2057 * Adjust the long retry as if the frame was tried
2058 * hw->max_rate_tries times to affect how rate control updates
2059 * PER for the failed rate.
2060 * In case of congestion on the bus penalizing this type of
2061 * underruns should help hardware actually transmit new frames
2062 * successfully by eventually preferring slower rates.
2063 * This itself should also alleviate congestion on the bus.
2064 */
2065 if (ieee80211_is_data(hdr->frame_control) &&
2066 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2067 ATH9K_TX_DELIM_UNDERRUN)) &&
83860c59 2068 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2069 tx_info->status.rates[tx_rateindex].count =
2070 hw->max_rate_tries;
f078f209 2071 }
8a92e2ee 2072
545750d3 2073 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2074 tx_info->status.rates[i].count = 0;
545750d3
FF
2075 tx_info->status.rates[i].idx = -1;
2076 }
8a92e2ee 2077
78c4653a 2078 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2079}
2080
fce041be
FF
2081static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2082 struct ath_tx_status *ts, struct ath_buf *bf,
2083 struct list_head *bf_head)
5479de6e
RM
2084 __releases(txq->axq_lock)
2085 __acquires(txq->axq_lock)
fce041be
FF
2086{
2087 int txok;
2088
2089 txq->axq_depth--;
2090 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2091 txq->axq_tx_inprogress = false;
2092 if (bf_is_ampdu_not_probing(bf))
2093 txq->axq_ampdu_depth--;
2094
2095 spin_unlock_bh(&txq->axq_lock);
2096
2097 if (!bf_isampdu(bf)) {
fce041be
FF
2098 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2099 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2100 } else
2101 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2102
2103 spin_lock_bh(&txq->axq_lock);
2104
2105 if (sc->sc_flags & SC_OP_TXAGGR)
2106 ath_txq_schedule(sc, txq);
2107}
2108
e8324357 2109static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2110{
cbe61d8a 2111 struct ath_hw *ah = sc->sc_ah;
c46917bb 2112 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2113 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2114 struct list_head bf_head;
e8324357 2115 struct ath_desc *ds;
29bffa96 2116 struct ath_tx_status ts;
e8324357 2117 int status;
f078f209 2118
226afe68
JP
2119 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2120 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2121 txq->axq_link);
f078f209 2122
fce041be 2123 spin_lock_bh(&txq->axq_lock);
f078f209 2124 for (;;) {
236de514
FF
2125 if (work_pending(&sc->hw_reset_work))
2126 break;
2127
f078f209
LR
2128 if (list_empty(&txq->axq_q)) {
2129 txq->axq_link = NULL;
86271e46 2130 if (sc->sc_flags & SC_OP_TXAGGR)
082f6536 2131 ath_txq_schedule(sc, txq);
f078f209
LR
2132 break;
2133 }
f078f209
LR
2134 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2135
e8324357
S
2136 /*
2137 * There is a race condition that a BH gets scheduled
2138 * after sw writes TxE and before hw re-load the last
2139 * descriptor to get the newly chained one.
2140 * Software must keep the last DONE descriptor as a
2141 * holding descriptor - software does so by marking
2142 * it with the STALE flag.
2143 */
2144 bf_held = NULL;
a119cc49 2145 if (bf->bf_stale) {
e8324357 2146 bf_held = bf;
fce041be 2147 if (list_is_last(&bf_held->list, &txq->axq_q))
e8324357 2148 break;
fce041be
FF
2149
2150 bf = list_entry(bf_held->list.next, struct ath_buf,
2151 list);
f078f209
LR
2152 }
2153
2154 lastbf = bf->bf_lastbf;
e8324357 2155 ds = lastbf->bf_desc;
f078f209 2156
29bffa96
FF
2157 memset(&ts, 0, sizeof(ts));
2158 status = ath9k_hw_txprocdesc(ah, ds, &ts);
fce041be 2159 if (status == -EINPROGRESS)
e8324357 2160 break;
fce041be 2161
2dac4fb9 2162 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2163
e8324357
S
2164 /*
2165 * Remove ath_buf's of the same transmit unit from txq,
2166 * however leave the last descriptor back as the holding
2167 * descriptor for hw.
2168 */
a119cc49 2169 lastbf->bf_stale = true;
e8324357 2170 INIT_LIST_HEAD(&bf_head);
e8324357
S
2171 if (!list_is_singular(&lastbf->list))
2172 list_cut_position(&bf_head,
2173 &txq->axq_q, lastbf->list.prev);
f078f209 2174
fce041be 2175 if (bf_held) {
0a8cea84 2176 list_del(&bf_held->list);
0a8cea84 2177 ath_tx_return_buffer(sc, bf_held);
e8324357 2178 }
f078f209 2179
fce041be 2180 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
8469cdef 2181 }
fce041be 2182 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2183}
2184
305fe47f 2185static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2186{
2187 struct ath_softc *sc = container_of(work, struct ath_softc,
2188 tx_complete_work.work);
2189 struct ath_txq *txq;
2190 int i;
2191 bool needreset = false;
60f2d1d5
BG
2192#ifdef CONFIG_ATH9K_DEBUGFS
2193 sc->tx_complete_poll_work_seen++;
2194#endif
164ace38
SB
2195
2196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2197 if (ATH_TXQ_SETUP(sc, i)) {
2198 txq = &sc->tx.txq[i];
2199 spin_lock_bh(&txq->axq_lock);
2200 if (txq->axq_depth) {
2201 if (txq->axq_tx_inprogress) {
2202 needreset = true;
2203 spin_unlock_bh(&txq->axq_lock);
2204 break;
2205 } else {
2206 txq->axq_tx_inprogress = true;
2207 }
2208 }
2209 spin_unlock_bh(&txq->axq_lock);
2210 }
2211
2212 if (needreset) {
226afe68
JP
2213 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2214 "tx hung, resetting the chip\n");
236de514 2215 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
164ace38
SB
2216 }
2217
42935eca 2218 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2219 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2220}
2221
2222
f078f209 2223
e8324357 2224void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2225{
e8324357
S
2226 int i;
2227 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2228
e8324357 2229 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2230
e8324357
S
2231 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2232 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2233 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2234 }
2235}
2236
e5003249
VT
2237void ath_tx_edma_tasklet(struct ath_softc *sc)
2238{
fce041be 2239 struct ath_tx_status ts;
e5003249
VT
2240 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2241 struct ath_hw *ah = sc->sc_ah;
2242 struct ath_txq *txq;
2243 struct ath_buf *bf, *lastbf;
2244 struct list_head bf_head;
2245 int status;
e5003249
VT
2246
2247 for (;;) {
236de514
FF
2248 if (work_pending(&sc->hw_reset_work))
2249 break;
2250
fce041be 2251 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
e5003249
VT
2252 if (status == -EINPROGRESS)
2253 break;
2254 if (status == -EIO) {
226afe68
JP
2255 ath_dbg(common, ATH_DBG_XMIT,
2256 "Error processing tx status\n");
e5003249
VT
2257 break;
2258 }
2259
2260 /* Skip beacon completions */
fce041be 2261 if (ts.qid == sc->beacon.beaconq)
e5003249
VT
2262 continue;
2263
fce041be 2264 txq = &sc->tx.txq[ts.qid];
e5003249
VT
2265
2266 spin_lock_bh(&txq->axq_lock);
fce041be 2267
e5003249
VT
2268 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2269 spin_unlock_bh(&txq->axq_lock);
2270 return;
2271 }
2272
2273 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2274 struct ath_buf, list);
2275 lastbf = bf->bf_lastbf;
2276
2277 INIT_LIST_HEAD(&bf_head);
2278 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2279 &lastbf->list);
e5003249 2280
fce041be
FF
2281 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2282 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
e5003249 2283
fce041be
FF
2284 if (!list_empty(&txq->axq_q)) {
2285 struct list_head bf_q;
60f2d1d5 2286
fce041be
FF
2287 INIT_LIST_HEAD(&bf_q);
2288 txq->axq_link = NULL;
2289 list_splice_tail_init(&txq->axq_q, &bf_q);
2290 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2291 }
2292 }
86271e46 2293
fce041be 2294 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
e5003249
VT
2295 spin_unlock_bh(&txq->axq_lock);
2296 }
2297}
2298
e8324357
S
2299/*****************/
2300/* Init, Cleanup */
2301/*****************/
f078f209 2302
5088c2f1
VT
2303static int ath_txstatus_setup(struct ath_softc *sc, int size)
2304{
2305 struct ath_descdma *dd = &sc->txsdma;
2306 u8 txs_len = sc->sc_ah->caps.txs_len;
2307
2308 dd->dd_desc_len = size * txs_len;
2309 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2310 &dd->dd_desc_paddr, GFP_KERNEL);
2311 if (!dd->dd_desc)
2312 return -ENOMEM;
2313
2314 return 0;
2315}
2316
2317static int ath_tx_edma_init(struct ath_softc *sc)
2318{
2319 int err;
2320
2321 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2322 if (!err)
2323 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2324 sc->txsdma.dd_desc_paddr,
2325 ATH_TXSTATUS_RING_SIZE);
2326
2327 return err;
2328}
2329
2330static void ath_tx_edma_cleanup(struct ath_softc *sc)
2331{
2332 struct ath_descdma *dd = &sc->txsdma;
2333
2334 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2335 dd->dd_desc_paddr);
2336}
2337
e8324357 2338int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2339{
c46917bb 2340 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2341 int error = 0;
f078f209 2342
797fe5cb 2343 spin_lock_init(&sc->tx.txbuflock);
f078f209 2344
797fe5cb 2345 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2346 "tx", nbufs, 1, 1);
797fe5cb 2347 if (error != 0) {
3800276a
JP
2348 ath_err(common,
2349 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2350 goto err;
2351 }
f078f209 2352
797fe5cb 2353 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2354 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2355 if (error != 0) {
3800276a
JP
2356 ath_err(common,
2357 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2358 goto err;
2359 }
f078f209 2360
164ace38
SB
2361 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2362
5088c2f1
VT
2363 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2364 error = ath_tx_edma_init(sc);
2365 if (error)
2366 goto err;
2367 }
2368
797fe5cb 2369err:
e8324357
S
2370 if (error != 0)
2371 ath_tx_cleanup(sc);
f078f209 2372
e8324357 2373 return error;
f078f209
LR
2374}
2375
797fe5cb 2376void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2377{
2378 if (sc->beacon.bdma.dd_desc_len != 0)
2379 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2380
2381 if (sc->tx.txdma.dd_desc_len != 0)
2382 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2383
2384 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2385 ath_tx_edma_cleanup(sc);
e8324357 2386}
f078f209
LR
2387
2388void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2389{
c5170163
S
2390 struct ath_atx_tid *tid;
2391 struct ath_atx_ac *ac;
2392 int tidno, acno;
f078f209 2393
8ee5afbc 2394 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2395 tidno < WME_NUM_TID;
2396 tidno++, tid++) {
2397 tid->an = an;
2398 tid->tidno = tidno;
2399 tid->seq_start = tid->seq_next = 0;
2400 tid->baw_size = WME_MAX_BA;
2401 tid->baw_head = tid->baw_tail = 0;
2402 tid->sched = false;
e8324357 2403 tid->paused = false;
a37c2c79 2404 tid->state &= ~AGGR_CLEANUP;
56dc6336 2405 __skb_queue_head_init(&tid->buf_q);
c5170163 2406 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2407 tid->ac = &an->ac[acno];
a37c2c79
S
2408 tid->state &= ~AGGR_ADDBA_COMPLETE;
2409 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2410 }
f078f209 2411
8ee5afbc 2412 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2413 acno < WME_NUM_AC; acno++, ac++) {
2414 ac->sched = false;
066dae93 2415 ac->txq = sc->tx.txq_map[acno];
c5170163 2416 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2417 }
2418}
2419
b5aa9bf9 2420void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2421{
2b40994c
FF
2422 struct ath_atx_ac *ac;
2423 struct ath_atx_tid *tid;
f078f209 2424 struct ath_txq *txq;
066dae93 2425 int tidno;
e8324357 2426
2b40994c
FF
2427 for (tidno = 0, tid = &an->tid[tidno];
2428 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2429
2b40994c 2430 ac = tid->ac;
066dae93 2431 txq = ac->txq;
f078f209 2432
2b40994c
FF
2433 spin_lock_bh(&txq->axq_lock);
2434
2435 if (tid->sched) {
2436 list_del(&tid->list);
2437 tid->sched = false;
2438 }
2439
2440 if (ac->sched) {
2441 list_del(&ac->list);
2442 tid->ac->sched = false;
f078f209 2443 }
2b40994c
FF
2444
2445 ath_tid_drain(sc, txq, tid);
2446 tid->state &= ~AGGR_ADDBA_COMPLETE;
2447 tid->state &= ~AGGR_CLEANUP;
2448
2449 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2450 }
2451}