mac80211: redefine usage of the mac80211 workqueue
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ath / ar9170 / main.c
CommitLineData
e9348cdd
CL
1/*
2 * Atheros AR9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
e9348cdd
CL
40#include <linux/init.h>
41#include <linux/module.h>
42#include <linux/etherdevice.h>
43#include <net/mac80211.h>
44#include "ar9170.h"
45#include "hw.h"
46#include "cmd.h"
47
48static int modparam_nohwcrypt;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
e9348cdd 51
acbadf01
CL
52static int modparam_ht;
53module_param_named(ht, modparam_ht, bool, S_IRUGO);
54MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
55
e9348cdd
CL
56#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
57 .bitrate = (_bitrate), \
58 .flags = (_flags), \
59 .hw_value = (_hw_rate) | (_txpidx) << 4, \
60}
61
62static struct ieee80211_rate __ar9170_ratetable[] = {
63 RATE(10, 0, 0, 0),
64 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
65 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
66 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(60, 0xb, 0, 0),
68 RATE(90, 0xf, 0, 0),
69 RATE(120, 0xa, 0, 0),
70 RATE(180, 0xe, 0, 0),
71 RATE(240, 0x9, 0, 0),
72 RATE(360, 0xd, 1, 0),
73 RATE(480, 0x8, 2, 0),
74 RATE(540, 0xc, 3, 0),
75};
76#undef RATE
77
78#define ar9170_g_ratetable (__ar9170_ratetable + 0)
79#define ar9170_g_ratetable_size 12
80#define ar9170_a_ratetable (__ar9170_ratetable + 4)
81#define ar9170_a_ratetable_size 8
82
83/*
84 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
85 * array in phy.c so that we don't have to do frequency lookups!
86 */
87#define CHAN(_freq, _idx) { \
88 .center_freq = (_freq), \
89 .hw_value = (_idx), \
90 .max_power = 18, /* XXX */ \
91}
92
93static struct ieee80211_channel ar9170_2ghz_chantable[] = {
94 CHAN(2412, 0),
95 CHAN(2417, 1),
96 CHAN(2422, 2),
97 CHAN(2427, 3),
98 CHAN(2432, 4),
99 CHAN(2437, 5),
100 CHAN(2442, 6),
101 CHAN(2447, 7),
102 CHAN(2452, 8),
103 CHAN(2457, 9),
104 CHAN(2462, 10),
105 CHAN(2467, 11),
106 CHAN(2472, 12),
107 CHAN(2484, 13),
108};
109
110static struct ieee80211_channel ar9170_5ghz_chantable[] = {
111 CHAN(4920, 14),
112 CHAN(4940, 15),
113 CHAN(4960, 16),
114 CHAN(4980, 17),
115 CHAN(5040, 18),
116 CHAN(5060, 19),
117 CHAN(5080, 20),
118 CHAN(5180, 21),
119 CHAN(5200, 22),
120 CHAN(5220, 23),
121 CHAN(5240, 24),
122 CHAN(5260, 25),
123 CHAN(5280, 26),
124 CHAN(5300, 27),
125 CHAN(5320, 28),
126 CHAN(5500, 29),
127 CHAN(5520, 30),
128 CHAN(5540, 31),
129 CHAN(5560, 32),
130 CHAN(5580, 33),
131 CHAN(5600, 34),
132 CHAN(5620, 35),
133 CHAN(5640, 36),
134 CHAN(5660, 37),
135 CHAN(5680, 38),
136 CHAN(5700, 39),
137 CHAN(5745, 40),
138 CHAN(5765, 41),
139 CHAN(5785, 42),
140 CHAN(5805, 43),
141 CHAN(5825, 44),
142 CHAN(5170, 45),
143 CHAN(5190, 46),
144 CHAN(5210, 47),
145 CHAN(5230, 48),
146};
147#undef CHAN
148
9e52b062
JB
149#define AR9170_HT_CAP \
150{ \
151 .ht_supported = true, \
152 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
9e52b062
JB
153 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
154 IEEE80211_HT_CAP_SGI_40 | \
acbadf01 155 IEEE80211_HT_CAP_GRN_FLD | \
9e52b062
JB
156 IEEE80211_HT_CAP_DSSSCCK40 | \
157 IEEE80211_HT_CAP_SM_PS, \
083c4687
CL
158 .ampdu_factor = 3, \
159 .ampdu_density = 6, \
9e52b062 160 .mcs = { \
acbadf01
CL
161 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
162 .rx_highest = cpu_to_le16(300), \
163 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
9e52b062
JB
164 }, \
165}
166
e9348cdd
CL
167static struct ieee80211_supported_band ar9170_band_2GHz = {
168 .channels = ar9170_2ghz_chantable,
169 .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
170 .bitrates = ar9170_g_ratetable,
171 .n_bitrates = ar9170_g_ratetable_size,
9e52b062
JB
172 .ht_cap = AR9170_HT_CAP,
173};
174
175static struct ieee80211_supported_band ar9170_band_5GHz = {
176 .channels = ar9170_5ghz_chantable,
177 .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
178 .bitrates = ar9170_a_ratetable,
179 .n_bitrates = ar9170_a_ratetable_size,
180 .ht_cap = AR9170_HT_CAP,
e9348cdd
CL
181};
182
9b9c5aae 183static void ar9170_tx(struct ar9170 *ar);
acbadf01 184static bool ar9170_tx_ampdu(struct ar9170 *ar);
e9348cdd 185
acbadf01
CL
186static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
187{
188 return le16_to_cpu(hdr->seq_ctrl) >> 4;
189}
190
191static inline u16 ar9170_get_seq(struct sk_buff *skb)
192{
193 struct ar9170_tx_control *txc = (void *) skb->data;
194 return ar9170_get_seq_h((void *) txc->frame_data);
195}
196
197static inline u16 ar9170_get_tid(struct sk_buff *skb)
198{
199 struct ar9170_tx_control *txc = (void *) skb->data;
200 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
201
202 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
203}
204
205#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
206#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
207
208#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
e9348cdd
CL
209static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
210{
211 struct ar9170_tx_control *txc = (void *) skb->data;
9b9c5aae
CL
212 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
213 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
214 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
e9348cdd 215
acbadf01 216 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
9b9c5aae 217 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
e9348cdd 218 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
acbadf01 219 ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr),
9b9c5aae
CL
220 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
221 jiffies_to_msecs(arinfo->timeout - jiffies));
e9348cdd
CL
222}
223
9b9c5aae
CL
224static void __ar9170_dump_txqueue(struct ar9170 *ar,
225 struct sk_buff_head *queue)
e9348cdd
CL
226{
227 struct sk_buff *skb;
228 int i = 0;
229
230 printk(KERN_DEBUG "---[ cut here ]---\n");
9b9c5aae 231 printk(KERN_DEBUG "%s: %d entries in queue.\n",
e9348cdd
CL
232 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
233
234 skb_queue_walk(queue, skb) {
9b9c5aae 235 printk(KERN_DEBUG "index:%d => \n", i++);
e9348cdd
CL
236 ar9170_print_txheader(ar, skb);
237 }
9b9c5aae
CL
238 if (i != skb_queue_len(queue))
239 printk(KERN_DEBUG "WARNING: queue frame counter "
240 "mismatch %d != %d\n", skb_queue_len(queue), i);
e9348cdd
CL
241 printk(KERN_DEBUG "---[ end ]---\n");
242}
acbadf01 243#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
e9348cdd 244
acbadf01 245#ifdef AR9170_QUEUE_DEBUG
9b9c5aae
CL
246static void ar9170_dump_txqueue(struct ar9170 *ar,
247 struct sk_buff_head *queue)
248{
249 unsigned long flags;
250
251 spin_lock_irqsave(&queue->lock, flags);
252 __ar9170_dump_txqueue(ar, queue);
253 spin_unlock_irqrestore(&queue->lock, flags);
254}
acbadf01 255#endif /* AR9170_QUEUE_DEBUG */
9b9c5aae 256
acbadf01 257#ifdef AR9170_QUEUE_STOP_DEBUG
9b9c5aae
CL
258static void __ar9170_dump_txstats(struct ar9170 *ar)
259{
260 int i;
261
262 printk(KERN_DEBUG "%s: QoS queue stats\n",
263 wiphy_name(ar->hw->wiphy));
264
265 for (i = 0; i < __AR9170_NUM_TXQ; i++)
acbadf01
CL
266 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d "
267 " stopped:%d\n", wiphy_name(ar->hw->wiphy), i,
268 ar->tx_stats[i].limit, ar->tx_stats[i].len,
269 skb_queue_len(&ar->tx_status[i]),
270 ieee80211_queue_stopped(ar->hw, i));
9b9c5aae 271}
acbadf01 272#endif /* AR9170_QUEUE_STOP_DEBUG */
9b9c5aae 273
acbadf01
CL
274#ifdef AR9170_TXAGG_DEBUG
275static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
e9348cdd 276{
e9348cdd
CL
277 unsigned long flags;
278
acbadf01
CL
279 spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
280 printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
281 wiphy_name(ar->hw->wiphy));
282 __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
283 spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
9b9c5aae 284}
acbadf01
CL
285
286#endif /* AR9170_TXAGG_DEBUG */
9b9c5aae
CL
287
288/* caller must guarantee exclusive access for _bin_ queue. */
289static void ar9170_recycle_expired(struct ar9170 *ar,
290 struct sk_buff_head *queue,
291 struct sk_buff_head *bin)
292{
293 struct sk_buff *skb, *old = NULL;
294 unsigned long flags;
295
296 spin_lock_irqsave(&queue->lock, flags);
297 while ((skb = skb_peek(queue))) {
298 struct ieee80211_tx_info *txinfo;
299 struct ar9170_tx_info *arinfo;
300
301 txinfo = IEEE80211_SKB_CB(skb);
302 arinfo = (void *) txinfo->rate_driver_data;
303
304 if (time_is_before_jiffies(arinfo->timeout)) {
305#ifdef AR9170_QUEUE_DEBUG
306 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
307 "recycle \n", wiphy_name(ar->hw->wiphy),
308 jiffies, arinfo->timeout);
309 ar9170_print_txheader(ar, skb);
310#endif /* AR9170_QUEUE_DEBUG */
311 __skb_unlink(skb, queue);
312 __skb_queue_tail(bin, skb);
313 } else {
314 break;
315 }
316
317 if (unlikely(old == skb)) {
318 /* bail out - queue is shot. */
319
320 WARN_ON(1);
321 break;
322 }
323 old = skb;
324 }
325 spin_unlock_irqrestore(&queue->lock, flags);
326}
327
328static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
329 u16 tx_status)
330{
331 struct ieee80211_tx_info *txinfo;
332 unsigned int retries = 0;
e9348cdd
CL
333
334 txinfo = IEEE80211_SKB_CB(skb);
335 ieee80211_tx_info_clear_status(txinfo);
336
337 switch (tx_status) {
338 case AR9170_TX_STATUS_RETRY:
339 retries = 2;
340 case AR9170_TX_STATUS_COMPLETE:
341 txinfo->flags |= IEEE80211_TX_STAT_ACK;
342 break;
343
344 case AR9170_TX_STATUS_FAILED:
345 retries = ar->hw->conf.long_frame_max_tx_count;
346 break;
347
348 default:
349 printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
350 wiphy_name(ar->hw->wiphy), tx_status);
351 break;
352 }
353
9b9c5aae 354 txinfo->status.rates[0].count = retries + 1;
e9348cdd
CL
355 skb_pull(skb, sizeof(struct ar9170_tx_control));
356 ieee80211_tx_status_irqsafe(ar->hw, skb);
357}
e9348cdd 358
acbadf01
CL
359static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
360{
361 struct sk_buff_head success;
362 struct sk_buff *skb;
363 unsigned int i;
364 unsigned long queue_bitmap = 0;
365
366 skb_queue_head_init(&success);
367
368 while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
369 __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
370
371 ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
372
373#ifdef AR9170_TXAGG_DEBUG
374 printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
375 wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
376 __ar9170_dump_txqueue(ar, &success);
377#endif /* AR9170_TXAGG_DEBUG */
378
379 while ((skb = __skb_dequeue(&success))) {
380 struct ieee80211_tx_info *txinfo;
381
382 queue_bitmap |= BIT(skb_get_queue_mapping(skb));
383
384 txinfo = IEEE80211_SKB_CB(skb);
385 ieee80211_tx_info_clear_status(txinfo);
386
387 txinfo->flags |= IEEE80211_TX_STAT_ACK;
388 txinfo->status.rates[0].count = 1;
389
390 skb_pull(skb, sizeof(struct ar9170_tx_control));
391 ieee80211_tx_status_irqsafe(ar->hw, skb);
392 }
393
394 for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) {
395#ifdef AR9170_QUEUE_STOP_DEBUG
396 printk(KERN_DEBUG "%s: wake queue %d\n",
397 wiphy_name(ar->hw->wiphy), i);
398 __ar9170_dump_txstats(ar);
399#endif /* AR9170_QUEUE_STOP_DEBUG */
400 ieee80211_wake_queue(ar->hw, i);
401 }
402
403 if (queue_bitmap)
404 ar9170_tx(ar);
405}
406
407static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
408{
409 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
410 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
411
412 arinfo->timeout = jiffies +
413 msecs_to_jiffies(AR9170_BA_TIMEOUT);
414
415 skb_queue_tail(&ar->tx_status_ampdu, skb);
416 ar9170_tx_fake_ampdu_status(ar);
417 ar->tx_ampdu_pending--;
418
419 if (!list_empty(&ar->tx_ampdu_list) && !ar->tx_ampdu_pending)
420 ar9170_tx_ampdu(ar);
421}
422
9b9c5aae 423void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
e9348cdd 424{
9b9c5aae
CL
425 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
426 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
427 unsigned int queue = skb_get_queue_mapping(skb);
e9348cdd 428 unsigned long flags;
e9348cdd 429
9b9c5aae
CL
430 spin_lock_irqsave(&ar->tx_stats_lock, flags);
431 ar->tx_stats[queue].len--;
e9348cdd 432
9b9c5aae
CL
433 if (skb_queue_empty(&ar->tx_pending[queue])) {
434#ifdef AR9170_QUEUE_STOP_DEBUG
435 printk(KERN_DEBUG "%s: wake queue %d\n",
436 wiphy_name(ar->hw->wiphy), queue);
437 __ar9170_dump_txstats(ar);
438#endif /* AR9170_QUEUE_STOP_DEBUG */
439 ieee80211_wake_queue(ar->hw, queue);
440 }
441 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
e9348cdd 442
9b9c5aae 443 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
acbadf01 444 ar9170_tx_ampdu_callback(ar, skb);
9b9c5aae
CL
445 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
446 arinfo->timeout = jiffies +
447 msecs_to_jiffies(AR9170_TX_TIMEOUT);
448
449 skb_queue_tail(&ar->tx_status[queue], skb);
450 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
451 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
452 } else {
453#ifdef AR9170_QUEUE_DEBUG
454 printk(KERN_DEBUG "%s: unsupported frame flags!\n",
455 wiphy_name(ar->hw->wiphy));
456 ar9170_print_txheader(ar, skb);
457#endif /* AR9170_QUEUE_DEBUG */
458 dev_kfree_skb_any(skb);
459 }
460
461 if (!ar->tx_stats[queue].len &&
462 !skb_queue_empty(&ar->tx_pending[queue])) {
463 ar9170_tx(ar);
e9348cdd 464 }
e9348cdd
CL
465}
466
9b9c5aae
CL
467static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
468 const u8 *mac,
469 struct sk_buff_head *queue,
470 const u32 rate)
e9348cdd 471{
9b9c5aae 472 unsigned long flags;
e9348cdd
CL
473 struct sk_buff *skb;
474
475 /*
476 * Unfortunately, the firmware does not tell to which (queued) frame
477 * this transmission status report belongs to.
478 *
479 * So we have to make risky guesses - with the scarce information
480 * the firmware provided (-> destination MAC, and phy_control) -
481 * and hope that we picked the right one...
482 */
e9348cdd 483
9b9c5aae
CL
484 spin_lock_irqsave(&queue->lock, flags);
485 skb_queue_walk(queue, skb) {
486 struct ar9170_tx_control *txc = (void *) skb->data;
487 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
488 u32 r;
489
490 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
e9348cdd 491#ifdef AR9170_QUEUE_DEBUG
9b9c5aae
CL
492 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
493 wiphy_name(ar->hw->wiphy), mac,
494 ieee80211_get_DA(hdr));
495 ar9170_print_txheader(ar, skb);
496#endif /* AR9170_QUEUE_DEBUG */
497 continue;
498 }
499
500 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
501 AR9170_TX_PHY_MCS_SHIFT;
502
503 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
504#ifdef AR9170_QUEUE_DEBUG
505 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
506 wiphy_name(ar->hw->wiphy), rate, r);
507 ar9170_print_txheader(ar, skb);
508#endif /* AR9170_QUEUE_DEBUG */
509 continue;
510 }
511
512 __skb_unlink(skb, queue);
513 spin_unlock_irqrestore(&queue->lock, flags);
514 return skb;
e9348cdd 515 }
9b9c5aae
CL
516
517#ifdef AR9170_QUEUE_DEBUG
518 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
519 "outstanding frames in queue.\n",
520 wiphy_name(ar->hw->wiphy), mac);
521 __ar9170_dump_txqueue(ar, queue);
e9348cdd 522#endif /* AR9170_QUEUE_DEBUG */
9b9c5aae
CL
523 spin_unlock_irqrestore(&queue->lock, flags);
524
525 return NULL;
e9348cdd
CL
526}
527
acbadf01
CL
528static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
529{
530 struct sk_buff *skb;
531 struct ieee80211_tx_info *txinfo;
532
533 while (count) {
534 skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
535 if (!skb)
536 break;
537
538 txinfo = IEEE80211_SKB_CB(skb);
539 ieee80211_tx_info_clear_status(txinfo);
540
541 /* FIXME: maybe more ? */
542 txinfo->status.rates[0].count = 1;
543
544 skb_pull(skb, sizeof(struct ar9170_tx_control));
545 ieee80211_tx_status_irqsafe(ar->hw, skb);
546 count--;
547 }
548
549#ifdef AR9170_TXAGG_DEBUG
550 if (count) {
551 printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
552 "suitable frames left in tx_status queue.\n",
553 wiphy_name(ar->hw->wiphy), count);
554
555 ar9170_dump_tx_status_ampdu(ar);
556 }
557#endif /* AR9170_TXAGG_DEBUG */
558}
559
e9348cdd 560/*
9b9c5aae
CL
561 * This worker tries to keeps an maintain tx_status queues.
562 * So we can guarantee that incoming tx_status reports are
563 * actually for a pending frame.
e9348cdd
CL
564 */
565
9b9c5aae 566static void ar9170_tx_janitor(struct work_struct *work)
e9348cdd
CL
567{
568 struct ar9170 *ar = container_of(work, struct ar9170,
9b9c5aae
CL
569 tx_janitor.work);
570 struct sk_buff_head waste;
571 unsigned int i;
572 bool resched = false;
e9348cdd 573
4a48e2a4
CL
574 if (unlikely(!IS_STARTED(ar)))
575 return ;
576
9b9c5aae
CL
577 skb_queue_head_init(&waste);
578
579 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
e9348cdd 580#ifdef AR9170_QUEUE_DEBUG
9b9c5aae
CL
581 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
582 wiphy_name(ar->hw->wiphy), i);
583 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
584 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
e9348cdd 585#endif /* AR9170_QUEUE_DEBUG */
e9348cdd 586
9b9c5aae
CL
587 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
588 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
589 skb_queue_purge(&waste);
e9348cdd 590
9b9c5aae
CL
591 if (!skb_queue_empty(&ar->tx_status[i]) ||
592 !skb_queue_empty(&ar->tx_pending[i]))
593 resched = true;
e9348cdd
CL
594 }
595
acbadf01
CL
596 ar9170_tx_fake_ampdu_status(ar);
597
42935eca
LR
598 if (!resched)
599 return;
600
601 ieee80211_queue_delayed_work(ar->hw,
602 &ar->tx_janitor,
603 msecs_to_jiffies(AR9170_JANITOR_DELAY));
e9348cdd
CL
604}
605
66d00813 606void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
e9348cdd
CL
607{
608 struct ar9170_cmd_response *cmd = (void *) buf;
609
610 if ((cmd->type & 0xc0) != 0xc0) {
611 ar->callback_cmd(ar, len, buf);
612 return;
613 }
614
615 /* hardware event handlers */
616 switch (cmd->type) {
617 case 0xc1: {
618 /*
619 * TX status notification:
620 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
621 *
622 * XX always 81
623 * YY always 00
624 * M1-M6 is the MAC address
625 * R1-R4 is the transmit rate
626 * S1-S2 is the transmit status
627 */
628
629 struct sk_buff *skb;
9b9c5aae
CL
630 u32 phy = le32_to_cpu(cmd->tx_status.rate);
631 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
632 AR9170_TX_PHY_QOS_SHIFT;
633#ifdef AR9170_QUEUE_DEBUG
634 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
635 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
636#endif /* AR9170_QUEUE_DEBUG */
e9348cdd 637
9b9c5aae
CL
638 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
639 &ar->tx_status[q],
640 AR9170_TX_INVALID_RATE);
e9348cdd
CL
641 if (unlikely(!skb))
642 return ;
643
9b9c5aae 644 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
e9348cdd
CL
645 break;
646 }
647
648 case 0xc0:
649 /*
650 * pre-TBTT event
651 */
652 if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
42935eca 653 ieee80211_queue_work(ar->hw, &ar->beacon_work);
e9348cdd
CL
654 break;
655
656 case 0xc2:
657 /*
658 * (IBSS) beacon send notification
659 * bytes: 04 c2 XX YY B4 B3 B2 B1
660 *
661 * XX always 80
662 * YY always 00
663 * B1-B4 "should" be the number of send out beacons.
664 */
665 break;
666
667 case 0xc3:
668 /* End of Atim Window */
669 break;
670
671 case 0xc4:
acbadf01
CL
672 /* BlockACK bitmap */
673 break;
674
e9348cdd
CL
675 case 0xc5:
676 /* BlockACK events */
acbadf01
CL
677 ar9170_handle_block_ack(ar,
678 le16_to_cpu(cmd->ba_fail_cnt.failed),
679 le16_to_cpu(cmd->ba_fail_cnt.rate));
680 ar9170_tx_fake_ampdu_status(ar);
e9348cdd
CL
681 break;
682
683 case 0xc6:
684 /* Watchdog Interrupt */
685 break;
686
687 case 0xc9:
688 /* retransmission issue / SIFS/EIFS collision ?! */
689 break;
690
2543a0c4
JB
691 /* firmware debug */
692 case 0xca:
693 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
694 break;
695 case 0xcb:
696 len -= 4;
697
698 switch (len) {
699 case 1:
700 printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
701 *((char *)buf + 4));
702 break;
703 case 2:
704 printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
705 le16_to_cpup((__le16 *)((char *)buf + 4)));
706 break;
707 case 4:
708 printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
709 le32_to_cpup((__le32 *)((char *)buf + 4)));
710 break;
711 case 8:
712 printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
713 (unsigned long)le64_to_cpup(
714 (__le64 *)((char *)buf + 4)));
715 break;
716 }
717 break;
718 case 0xcc:
719 print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
720 (char *)buf + 4, len - 4);
721 break;
722
e9348cdd
CL
723 default:
724 printk(KERN_INFO "received unhandled event %x\n", cmd->type);
725 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
726 break;
727 }
728}
729
cca84799 730static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
e9348cdd 731{
cca84799
CL
732 memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
733 ar->rx_mpdu.has_plcp = false;
734}
e9348cdd 735
9b9c5aae 736int ar9170_nag_limiter(struct ar9170 *ar)
cca84799
CL
737{
738 bool print_message;
739
740 /*
741 * we expect all sorts of errors in promiscuous mode.
742 * don't bother with it, it's OK!
743 */
744 if (ar->sniffer_enabled)
745 return false;
746
747 /*
748 * only go for frequent errors! The hardware tends to
749 * do some stupid thing once in a while under load, in
750 * noisy environments or just for fun!
751 */
752 if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
753 print_message = true;
754 else
755 print_message = false;
756
757 /* reset threshold for "once in a while" */
758 ar->bad_hw_nagger = jiffies + HZ / 4;
759 return print_message;
760}
761
762static int ar9170_rx_mac_status(struct ar9170 *ar,
763 struct ar9170_rx_head *head,
764 struct ar9170_rx_macstatus *mac,
765 struct ieee80211_rx_status *status)
766{
767 u8 error, decrypt;
e9348cdd 768
e9348cdd 769 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
cca84799 770 BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
e9348cdd 771
cca84799
CL
772 error = mac->error;
773 if (error & AR9170_RX_ERROR_MMIC) {
774 status->flag |= RX_FLAG_MMIC_ERROR;
775 error &= ~AR9170_RX_ERROR_MMIC;
776 }
e9348cdd 777
cca84799
CL
778 if (error & AR9170_RX_ERROR_PLCP) {
779 status->flag |= RX_FLAG_FAILED_PLCP_CRC;
780 error &= ~AR9170_RX_ERROR_PLCP;
e9348cdd 781
cca84799
CL
782 if (!(ar->filter_state & FIF_PLCPFAIL))
783 return -EINVAL;
784 }
e9348cdd 785
cca84799
CL
786 if (error & AR9170_RX_ERROR_FCS) {
787 status->flag |= RX_FLAG_FAILED_FCS_CRC;
788 error &= ~AR9170_RX_ERROR_FCS;
e9348cdd 789
cca84799
CL
790 if (!(ar->filter_state & FIF_FCSFAIL))
791 return -EINVAL;
792 }
793
794 decrypt = ar9170_get_decrypt_type(mac);
795 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
796 decrypt != AR9170_ENC_ALG_NONE)
797 status->flag |= RX_FLAG_DECRYPTED;
e9348cdd 798
cca84799
CL
799 /* ignore wrong RA errors */
800 error &= ~AR9170_RX_ERROR_WRONG_RA;
e9348cdd 801
cca84799
CL
802 if (error & AR9170_RX_ERROR_DECRYPT) {
803 error &= ~AR9170_RX_ERROR_DECRYPT;
804 /*
805 * Rx decryption is done in place,
806 * the original data is lost anyway.
807 */
808
809 return -EINVAL;
810 }
811
812 /* drop any other error frames */
813 if (unlikely(error)) {
814 /* TODO: update netdevice's RX dropped/errors statistics */
815
816 if (ar9170_nag_limiter(ar))
817 printk(KERN_DEBUG "%s: received frame with "
818 "suspicious error code (%#x).\n",
819 wiphy_name(ar->hw->wiphy), error);
820
821 return -EINVAL;
822 }
823
824 status->band = ar->channel->band;
825 status->freq = ar->channel->center_freq;
826
827 switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
e9348cdd 828 case AR9170_RX_STATUS_MODULATION_CCK:
cca84799
CL
829 if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
830 status->flag |= RX_FLAG_SHORTPRE;
e9348cdd
CL
831 switch (head->plcp[0]) {
832 case 0x0a:
cca84799 833 status->rate_idx = 0;
e9348cdd
CL
834 break;
835 case 0x14:
cca84799 836 status->rate_idx = 1;
e9348cdd
CL
837 break;
838 case 0x37:
cca84799 839 status->rate_idx = 2;
e9348cdd
CL
840 break;
841 case 0x6e:
cca84799 842 status->rate_idx = 3;
e9348cdd
CL
843 break;
844 default:
cca84799 845 if (ar9170_nag_limiter(ar))
e9348cdd
CL
846 printk(KERN_ERR "%s: invalid plcp cck rate "
847 "(%x).\n", wiphy_name(ar->hw->wiphy),
848 head->plcp[0]);
cca84799 849 return -EINVAL;
e9348cdd
CL
850 }
851 break;
cca84799 852
e9348cdd 853 case AR9170_RX_STATUS_MODULATION_OFDM:
cca84799
CL
854 switch (head->plcp[0] & 0xf) {
855 case 0xb:
856 status->rate_idx = 0;
e9348cdd 857 break;
cca84799
CL
858 case 0xf:
859 status->rate_idx = 1;
e9348cdd 860 break;
cca84799
CL
861 case 0xa:
862 status->rate_idx = 2;
e9348cdd 863 break;
cca84799
CL
864 case 0xe:
865 status->rate_idx = 3;
e9348cdd
CL
866 break;
867 case 0x9:
cca84799 868 status->rate_idx = 4;
e9348cdd 869 break;
cca84799
CL
870 case 0xd:
871 status->rate_idx = 5;
e9348cdd
CL
872 break;
873 case 0x8:
cca84799 874 status->rate_idx = 6;
e9348cdd 875 break;
cca84799
CL
876 case 0xc:
877 status->rate_idx = 7;
e9348cdd
CL
878 break;
879 default:
cca84799 880 if (ar9170_nag_limiter(ar))
e9348cdd
CL
881 printk(KERN_ERR "%s: invalid plcp ofdm rate "
882 "(%x).\n", wiphy_name(ar->hw->wiphy),
883 head->plcp[0]);
cca84799 884 return -EINVAL;
e9348cdd 885 }
cca84799
CL
886 if (status->band == IEEE80211_BAND_2GHZ)
887 status->rate_idx += 4;
e9348cdd 888 break;
cca84799 889
e9348cdd 890 case AR9170_RX_STATUS_MODULATION_HT:
cca84799
CL
891 if (head->plcp[3] & 0x80)
892 status->flag |= RX_FLAG_40MHZ;
893 if (head->plcp[6] & 0x80)
894 status->flag |= RX_FLAG_SHORT_GI;
895
896 status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
897 status->flag |= RX_FLAG_HT;
898 break;
899
e9348cdd
CL
900 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
901 /* XXX */
cca84799 902 if (ar9170_nag_limiter(ar))
e9348cdd
CL
903 printk(KERN_ERR "%s: invalid modulation\n",
904 wiphy_name(ar->hw->wiphy));
cca84799 905 return -EINVAL;
e9348cdd
CL
906 }
907
cca84799
CL
908 return 0;
909}
e9348cdd 910
cca84799
CL
911static void ar9170_rx_phy_status(struct ar9170 *ar,
912 struct ar9170_rx_phystatus *phy,
913 struct ieee80211_rx_status *status)
914{
915 int i;
e9348cdd 916
cca84799
CL
917 BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
918
919 for (i = 0; i < 3; i++)
920 if (phy->rssi[i] != 0x80)
921 status->antenna |= BIT(i);
922
923 /* post-process RSSI */
924 for (i = 0; i < 7; i++)
925 if (phy->rssi[i] & 0x80)
926 phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
927
928 /* TODO: we could do something with phy_errors */
929 status->signal = ar->noise[0] + phy->rssi_combined;
930 status->noise = ar->noise[0];
931}
932
933static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
934{
935 struct sk_buff *skb;
936 int reserved = 0;
937 struct ieee80211_hdr *hdr = (void *) buf;
938
939 if (ieee80211_is_data_qos(hdr->frame_control)) {
940 u8 *qc = ieee80211_get_qos_ctl(hdr);
941 reserved += NET_IP_ALIGN;
942
943 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
944 reserved += NET_IP_ALIGN;
e9348cdd
CL
945 }
946
cca84799
CL
947 if (ieee80211_has_a4(hdr->frame_control))
948 reserved += NET_IP_ALIGN;
949
950 reserved = 32 + (reserved & NET_IP_ALIGN);
951
952 skb = dev_alloc_skb(len + reserved);
953 if (likely(skb)) {
954 skb_reserve(skb, reserved);
955 memcpy(skb_put(skb, len), buf, len);
e9348cdd
CL
956 }
957
cca84799
CL
958 return skb;
959}
e9348cdd 960
cca84799
CL
961/*
962 * If the frame alignment is right (or the kernel has
963 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
964 * is only a single MPDU in the USB frame, then we could
965 * submit to mac80211 the SKB directly. However, since
966 * there may be multiple packets in one SKB in stream
967 * mode, and we need to observe the proper ordering,
968 * this is non-trivial.
969 */
e9348cdd 970
cca84799
CL
971static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
972{
973 struct ar9170_rx_head *head;
974 struct ar9170_rx_macstatus *mac;
975 struct ar9170_rx_phystatus *phy = NULL;
976 struct ieee80211_rx_status status;
977 struct sk_buff *skb;
978 int mpdu_len;
979
980 if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
981 return ;
982
983 /* Received MPDU */
984 mpdu_len = len - sizeof(*mac);
985
986 mac = (void *)(buf + mpdu_len);
987 if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
988 /* this frame is too damaged and can't be used - drop it */
e9348cdd 989
e9348cdd
CL
990 return ;
991 }
992
cca84799
CL
993 switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
994 case AR9170_RX_STATUS_MPDU_FIRST:
995 /* first mpdu packet has the plcp header */
996 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
997 head = (void *) buf;
998 memcpy(&ar->rx_mpdu.plcp, (void *) buf,
999 sizeof(struct ar9170_rx_head));
1000
1001 mpdu_len -= sizeof(struct ar9170_rx_head);
1002 buf += sizeof(struct ar9170_rx_head);
1003 ar->rx_mpdu.has_plcp = true;
1004 } else {
1005 if (ar9170_nag_limiter(ar))
1006 printk(KERN_ERR "%s: plcp info is clipped.\n",
1007 wiphy_name(ar->hw->wiphy));
1008 return ;
1009 }
1010 break;
1011
1012 case AR9170_RX_STATUS_MPDU_LAST:
1013 /* last mpdu has a extra tail with phy status information */
1014
1015 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
1016 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
1017 phy = (void *)(buf + mpdu_len);
1018 } else {
1019 if (ar9170_nag_limiter(ar))
1020 printk(KERN_ERR "%s: frame tail is clipped.\n",
1021 wiphy_name(ar->hw->wiphy));
1022 return ;
1023 }
1024
1025 case AR9170_RX_STATUS_MPDU_MIDDLE:
1026 /* middle mpdus are just data */
1027 if (unlikely(!ar->rx_mpdu.has_plcp)) {
1028 if (!ar9170_nag_limiter(ar))
1029 return ;
1030
1031 printk(KERN_ERR "%s: rx stream did not start "
1032 "with a first_mpdu frame tag.\n",
1033 wiphy_name(ar->hw->wiphy));
1034
1035 return ;
1036 }
1037
1038 head = &ar->rx_mpdu.plcp;
1039 break;
1040
1041 case AR9170_RX_STATUS_MPDU_SINGLE:
1042 /* single mpdu - has plcp (head) and phy status (tail) */
1043 head = (void *) buf;
1044
1045 mpdu_len -= sizeof(struct ar9170_rx_head);
1046 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
1047
1048 buf += sizeof(struct ar9170_rx_head);
1049 phy = (void *)(buf + mpdu_len);
1050 break;
1051
1052 default:
1053 BUG_ON(1);
1054 break;
e9348cdd
CL
1055 }
1056
cca84799
CL
1057 if (unlikely(mpdu_len < FCS_LEN))
1058 return ;
e9348cdd 1059
cca84799
CL
1060 memset(&status, 0, sizeof(status));
1061 if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
1062 return ;
e9348cdd 1063
cca84799
CL
1064 if (phy)
1065 ar9170_rx_phy_status(ar, phy, &status);
e9348cdd 1066
cca84799 1067 skb = ar9170_rx_copy_data(buf, mpdu_len);
f1d58c25
JB
1068 if (likely(skb)) {
1069 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
1070 ieee80211_rx_irqsafe(ar->hw, skb);
1071 }
e9348cdd
CL
1072}
1073
e9348cdd
CL
1074void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
1075{
cca84799 1076 unsigned int i, tlen, resplen, wlen = 0, clen = 0;
e9348cdd
CL
1077 u8 *tbuf, *respbuf;
1078
1079 tbuf = skb->data;
1080 tlen = skb->len;
1081
1082 while (tlen >= 4) {
cca84799
CL
1083 clen = tbuf[1] << 8 | tbuf[0];
1084 wlen = ALIGN(clen, 4);
e9348cdd 1085
cca84799 1086 /* check if this is stream has a valid tag.*/
e9348cdd 1087 if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
cca84799
CL
1088 /*
1089 * TODO: handle the highly unlikely event that the
1090 * corrupted stream has the TAG at the right position.
1091 */
1092
1093 /* check if the frame can be repaired. */
1094 if (!ar->rx_failover_missing) {
1095 /* this is no "short read". */
1096 if (ar9170_nag_limiter(ar)) {
1097 printk(KERN_ERR "%s: missing tag!\n",
1098 wiphy_name(ar->hw->wiphy));
1099 goto err_telluser;
1100 } else
1101 goto err_silent;
1102 }
1103
1104 if (ar->rx_failover_missing > tlen) {
1105 if (ar9170_nag_limiter(ar)) {
1106 printk(KERN_ERR "%s: possible multi "
1107 "stream corruption!\n",
1108 wiphy_name(ar->hw->wiphy));
1109 goto err_telluser;
1110 } else
1111 goto err_silent;
1112 }
1113
1114 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1115 ar->rx_failover_missing -= tlen;
1116
1117 if (ar->rx_failover_missing <= 0) {
1118 /*
1119 * nested ar9170_rx call!
1120 * termination is guranteed, even when the
1121 * combined frame also have a element with
1122 * a bad tag.
1123 */
1124
1125 ar->rx_failover_missing = 0;
1126 ar9170_rx(ar, ar->rx_failover);
1127
1128 skb_reset_tail_pointer(ar->rx_failover);
1129 skb_trim(ar->rx_failover, 0);
1130 }
1131
e9348cdd
CL
1132 return ;
1133 }
cca84799
CL
1134
1135 /* check if stream is clipped */
e9348cdd 1136 if (wlen > tlen - 4) {
cca84799
CL
1137 if (ar->rx_failover_missing) {
1138 /* TODO: handle double stream corruption. */
1139 if (ar9170_nag_limiter(ar)) {
1140 printk(KERN_ERR "%s: double rx stream "
1141 "corruption!\n",
1142 wiphy_name(ar->hw->wiphy));
1143 goto err_telluser;
1144 } else
1145 goto err_silent;
1146 }
1147
1148 /*
1149 * save incomplete data set.
1150 * the firmware will resend the missing bits when
1151 * the rx - descriptor comes round again.
1152 */
1153
1154 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1155 ar->rx_failover_missing = clen - tlen;
e9348cdd
CL
1156 return ;
1157 }
1158 resplen = clen;
1159 respbuf = tbuf + 4;
1160 tbuf += wlen + 4;
1161 tlen -= wlen + 4;
1162
1163 i = 0;
1164
1165 /* weird thing, but this is the same in the original driver */
1166 while (resplen > 2 && i < 12 &&
1167 respbuf[0] == 0xff && respbuf[1] == 0xff) {
1168 i += 2;
1169 resplen -= 2;
1170 respbuf += 2;
1171 }
1172
1173 if (resplen < 4)
1174 continue;
1175
1176 /* found the 6 * 0xffff marker? */
1177 if (i == 12)
1178 ar9170_handle_command_response(ar, respbuf, resplen);
1179 else
cca84799 1180 ar9170_handle_mpdu(ar, respbuf, clen);
e9348cdd
CL
1181 }
1182
cca84799
CL
1183 if (tlen) {
1184 if (net_ratelimit())
1185 printk(KERN_ERR "%s: %d bytes of unprocessed "
1186 "data left in rx stream!\n",
1187 wiphy_name(ar->hw->wiphy), tlen);
1188
1189 goto err_telluser;
1190 }
1191
1192 return ;
1193
1194err_telluser:
1195 printk(KERN_ERR "%s: damaged RX stream data [want:%d, "
1196 "data:%d, rx:%d, pending:%d ]\n",
1197 wiphy_name(ar->hw->wiphy), clen, wlen, tlen,
1198 ar->rx_failover_missing);
1199
1200 if (ar->rx_failover_missing)
1201 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
1202 ar->rx_failover->data,
1203 ar->rx_failover->len);
1204
1205 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
1206 skb->data, skb->len);
1207
1208 printk(KERN_ERR "%s: please check your hardware and cables, if "
1209 "you see this message frequently.\n",
1210 wiphy_name(ar->hw->wiphy));
1211
1212err_silent:
1213 if (ar->rx_failover_missing) {
1214 skb_reset_tail_pointer(ar->rx_failover);
1215 skb_trim(ar->rx_failover, 0);
1216 ar->rx_failover_missing = 0;
1217 }
e9348cdd 1218}
e9348cdd
CL
1219
1220#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
1221do { \
1222 queue.aifs = ai_fs; \
1223 queue.cw_min = cwmin; \
1224 queue.cw_max = cwmax; \
1225 queue.txop = _txop; \
1226} while (0)
1227
1228static int ar9170_op_start(struct ieee80211_hw *hw)
1229{
1230 struct ar9170 *ar = hw->priv;
1231 int err, i;
1232
1233 mutex_lock(&ar->mutex);
1234
864cc02e
CL
1235 ar->filter_changed = 0;
1236
e9348cdd
CL
1237 /* reinitialize queues statistics */
1238 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
9b9c5aae
CL
1239 for (i = 0; i < __AR9170_NUM_TXQ; i++)
1240 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
e9348cdd
CL
1241
1242 /* reset QoS defaults */
1243 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
1244 AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
1245 AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
1246 AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
1247 AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
1248
acbadf01
CL
1249 /* set sane AMPDU defaults */
1250 ar->global_ampdu_density = 6;
1251 ar->global_ampdu_factor = 3;
1252
cca84799
CL
1253 ar->bad_hw_nagger = jiffies;
1254
e9348cdd
CL
1255 err = ar->open(ar);
1256 if (err)
1257 goto out;
1258
1259 err = ar9170_init_mac(ar);
1260 if (err)
1261 goto out;
1262
1263 err = ar9170_set_qos(ar);
1264 if (err)
1265 goto out;
1266
1267 err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
1268 if (err)
1269 goto out;
1270
1271 err = ar9170_init_rf(ar);
1272 if (err)
1273 goto out;
1274
1275 /* start DMA */
1276 err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
1277 if (err)
1278 goto out;
1279
1280 ar->state = AR9170_STARTED;
1281
1282out:
1283 mutex_unlock(&ar->mutex);
1284 return err;
1285}
1286
1287static void ar9170_op_stop(struct ieee80211_hw *hw)
1288{
1289 struct ar9170 *ar = hw->priv;
9b9c5aae 1290 unsigned int i;
e9348cdd
CL
1291
1292 if (IS_STARTED(ar))
1293 ar->state = AR9170_IDLE;
1294
9b9c5aae 1295 cancel_delayed_work_sync(&ar->tx_janitor);
ff8365ca 1296#ifdef CONFIG_AR9170_LEDS
acbadf01 1297 cancel_delayed_work_sync(&ar->led_work);
ff8365ca 1298#endif
e9348cdd
CL
1299 cancel_work_sync(&ar->filter_config_work);
1300 cancel_work_sync(&ar->beacon_work);
e351cfbf 1301
b55d6bcf 1302 mutex_lock(&ar->mutex);
e9348cdd
CL
1303
1304 if (IS_ACCEPTING_CMD(ar)) {
1305 ar9170_set_leds_state(ar, 0);
1306
1307 /* stop DMA */
1308 ar9170_write_reg(ar, 0x1c3d30, 0);
1309 ar->stop(ar);
1310 }
1311
9b9c5aae
CL
1312 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1313 skb_queue_purge(&ar->tx_pending[i]);
1314 skb_queue_purge(&ar->tx_status[i]);
1315 }
acbadf01
CL
1316 skb_queue_purge(&ar->tx_status_ampdu);
1317
e9348cdd
CL
1318 mutex_unlock(&ar->mutex);
1319}
1320
acbadf01
CL
1321static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
1322{
1323 struct ar9170_tx_control *txc = (void *) skb->data;
1324
1325 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
1326}
1327
1328static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
1329 struct sk_buff *src)
1330{
1331 struct ar9170_tx_control *dst_txc, *src_txc;
1332 struct ieee80211_tx_info *dst_info, *src_info;
1333 struct ar9170_tx_info *dst_arinfo, *src_arinfo;
1334
1335 src_txc = (void *) src->data;
1336 src_info = IEEE80211_SKB_CB(src);
1337 src_arinfo = (void *) src_info->rate_driver_data;
1338
1339 dst_txc = (void *) dst->data;
1340 dst_info = IEEE80211_SKB_CB(dst);
1341 dst_arinfo = (void *) dst_info->rate_driver_data;
1342
1343 dst_txc->phy_control = src_txc->phy_control;
1344
1345 /* same MCS for the whole aggregate */
1346 memcpy(dst_info->driver_rates, src_info->driver_rates,
1347 sizeof(dst_info->driver_rates));
1348}
1349
9b9c5aae 1350static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
e9348cdd 1351{
e9348cdd
CL
1352 struct ieee80211_hdr *hdr;
1353 struct ar9170_tx_control *txc;
1354 struct ieee80211_tx_info *info;
e9348cdd 1355 struct ieee80211_tx_rate *txrate;
9b9c5aae 1356 struct ar9170_tx_info *arinfo;
e9348cdd 1357 unsigned int queue = skb_get_queue_mapping(skb);
e9348cdd
CL
1358 u16 keytype = 0;
1359 u16 len, icv = 0;
e9348cdd 1360
9b9c5aae 1361 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
e9348cdd
CL
1362
1363 hdr = (void *)skb->data;
1364 info = IEEE80211_SKB_CB(skb);
1365 len = skb->len;
1366
e9348cdd
CL
1367 txc = (void *)skb_push(skb, sizeof(*txc));
1368
e9348cdd
CL
1369 if (info->control.hw_key) {
1370 icv = info->control.hw_key->icv_len;
1371
1372 switch (info->control.hw_key->alg) {
1373 case ALG_WEP:
1374 keytype = AR9170_TX_MAC_ENCR_RC4;
1375 break;
1376 case ALG_TKIP:
1377 keytype = AR9170_TX_MAC_ENCR_RC4;
1378 break;
1379 case ALG_CCMP:
1380 keytype = AR9170_TX_MAC_ENCR_AES;
1381 break;
1382 default:
1383 WARN_ON(1);
9b9c5aae 1384 goto err_out;
e9348cdd
CL
1385 }
1386 }
1387
1388 /* Length */
1389 txc->length = cpu_to_le16(len + icv + 4);
1390
1391 txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
1392 AR9170_TX_MAC_BACKOFF);
1393 txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
1394 AR9170_TX_MAC_QOS_SHIFT);
1395 txc->mac_control |= cpu_to_le16(keytype);
1396 txc->phy_control = cpu_to_le32(0);
1397
1398 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1399 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1400
e9348cdd 1401 txrate = &info->control.rates[0];
e9348cdd
CL
1402 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1403 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1404 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1405 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1406
9b9c5aae
CL
1407 arinfo = (void *)info->rate_driver_data;
1408 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1409
1410 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1411 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1412 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1413 if (unlikely(!info->control.sta))
1414 goto err_out;
1415
1416 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1417 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
acbadf01 1418
9b9c5aae
CL
1419 goto out;
1420 }
1421
1422 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1423 /*
1424 * WARNING:
1425 * Putting the QoS queue bits into an unexplored territory is
1426 * certainly not elegant.
1427 *
1428 * In my defense: This idea provides a reasonable way to
1429 * smuggle valuable information to the tx_status callback.
1430 * Also, the idea behind this bit-abuse came straight from
1431 * the original driver code.
1432 */
1433
1434 txc->phy_control |=
1435 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1436 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
1437 } else {
1438 arinfo->flags = AR9170_TX_FLAG_NO_ACK;
1439 }
1440
1441out:
1442 return 0;
1443
1444err_out:
1445 skb_pull(skb, sizeof(*txc));
1446 return -EINVAL;
1447}
1448
1449static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1450{
1451 struct ar9170_tx_control *txc;
1452 struct ieee80211_tx_info *info;
1453 struct ieee80211_rate *rate = NULL;
1454 struct ieee80211_tx_rate *txrate;
1455 u32 power, chains;
1456
1457 txc = (void *) skb->data;
1458 info = IEEE80211_SKB_CB(skb);
1459 txrate = &info->control.rates[0];
1460
e9348cdd
CL
1461 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1462 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1463
1464 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1465 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
1466
1467 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1468 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
1469 /* this works because 40 MHz is 2 and dup is 3 */
1470 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
1471 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
1472
1473 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
1474 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
1475
1476 if (txrate->flags & IEEE80211_TX_RC_MCS) {
1477 u32 r = txrate->idx;
1478 u8 *txpower;
1479
9b9c5aae
CL
1480 /* heavy clip control */
1481 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1482
e9348cdd 1483 r <<= AR9170_TX_PHY_MCS_SHIFT;
9b9c5aae
CL
1484 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1485
e9348cdd
CL
1486 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1487 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1488
1489 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1490 if (info->band == IEEE80211_BAND_5GHZ)
1491 txpower = ar->power_5G_ht40;
1492 else
1493 txpower = ar->power_2G_ht40;
1494 } else {
1495 if (info->band == IEEE80211_BAND_5GHZ)
1496 txpower = ar->power_5G_ht20;
1497 else
1498 txpower = ar->power_2G_ht20;
1499 }
1500
1501 power = txpower[(txrate->idx) & 7];
1502 } else {
1503 u8 *txpower;
1504 u32 mod;
1505 u32 phyrate;
1506 u8 idx = txrate->idx;
1507
1508 if (info->band != IEEE80211_BAND_2GHZ) {
1509 idx += 4;
1510 txpower = ar->power_5G_leg;
1511 mod = AR9170_TX_PHY_MOD_OFDM;
1512 } else {
1513 if (idx < 4) {
1514 txpower = ar->power_2G_cck;
1515 mod = AR9170_TX_PHY_MOD_CCK;
1516 } else {
1517 mod = AR9170_TX_PHY_MOD_OFDM;
1518 txpower = ar->power_2G_ofdm;
1519 }
1520 }
1521
1522 rate = &__ar9170_ratetable[idx];
1523
1524 phyrate = rate->hw_value & 0xF;
1525 power = txpower[(rate->hw_value & 0x30) >> 4];
1526 phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
1527
1528 txc->phy_control |= cpu_to_le32(mod);
1529 txc->phy_control |= cpu_to_le32(phyrate);
1530 }
1531
1532 power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
1533 power &= AR9170_TX_PHY_TX_PWR_MASK;
1534 txc->phy_control |= cpu_to_le32(power);
1535
1536 /* set TX chains */
1537 if (ar->eeprom.tx_mask == 1) {
1538 chains = AR9170_TX_PHY_TXCHAIN_1;
1539 } else {
1540 chains = AR9170_TX_PHY_TXCHAIN_2;
1541
1542 /* >= 36M legacy OFDM - use only one chain */
1543 if (rate && rate->bitrate >= 360)
1544 chains = AR9170_TX_PHY_TXCHAIN_1;
1545 }
1546 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
9b9c5aae 1547}
e9348cdd 1548
acbadf01
CL
1549static bool ar9170_tx_ampdu(struct ar9170 *ar)
1550{
1551 struct sk_buff_head agg;
1552 struct ar9170_sta_tid *tid_info = NULL, *tmp;
1553 struct sk_buff *skb, *first = NULL;
1554 unsigned long flags, f2;
1555 unsigned int i = 0;
1556 u16 seq, queue, tmpssn;
1557 bool run = false;
1558
1559 skb_queue_head_init(&agg);
1560
1561 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1562 if (list_empty(&ar->tx_ampdu_list)) {
1563#ifdef AR9170_TXAGG_DEBUG
1564 printk(KERN_DEBUG "%s: aggregation list is empty.\n",
1565 wiphy_name(ar->hw->wiphy));
1566#endif /* AR9170_TXAGG_DEBUG */
1567 goto out_unlock;
1568 }
1569
1570 list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
1571 if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
1572#ifdef AR9170_TXAGG_DEBUG
1573 printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
1574 wiphy_name(ar->hw->wiphy));
1575#endif /* AR9170_TXAGG_DEBUG */
1576 continue;
1577 }
1578
1579 if (++i > 64) {
1580#ifdef AR9170_TXAGG_DEBUG
1581 printk(KERN_DEBUG "%s: enough frames aggregated.\n",
1582 wiphy_name(ar->hw->wiphy));
1583#endif /* AR9170_TXAGG_DEBUG */
1584 break;
1585 }
1586
1587 queue = TID_TO_WME_AC(tid_info->tid);
1588
1589 if (skb_queue_len(&ar->tx_pending[queue]) >=
1590 AR9170_NUM_TX_AGG_MAX) {
1591#ifdef AR9170_TXAGG_DEBUG
1592 printk(KERN_DEBUG "%s: queue %d full.\n",
1593 wiphy_name(ar->hw->wiphy), queue);
1594#endif /* AR9170_TXAGG_DEBUG */
1595 continue;
1596 }
1597
1598 list_del_init(&tid_info->list);
1599
1600 spin_lock_irqsave(&tid_info->queue.lock, f2);
1601 tmpssn = seq = tid_info->ssn;
1602 first = skb_peek(&tid_info->queue);
1603
1604 if (likely(first))
1605 tmpssn = ar9170_get_seq(first);
1606
1607 if (unlikely(tmpssn != seq)) {
1608#ifdef AR9170_TXAGG_DEBUG
1609 printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
1610 wiphy_name(ar->hw->wiphy), seq, tmpssn);
1611#endif /* AR9170_TXAGG_DEBUG */
1612 tid_info->ssn = tmpssn;
1613 }
1614
1615#ifdef AR9170_TXAGG_DEBUG
1616 printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
1617 "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
1618 tid_info->tid, tid_info->ssn,
1619 skb_queue_len(&tid_info->queue));
1620 __ar9170_dump_txqueue(ar, &tid_info->queue);
1621#endif /* AR9170_TXAGG_DEBUG */
1622
1623 while ((skb = skb_peek(&tid_info->queue))) {
1624 if (unlikely(ar9170_get_seq(skb) != seq))
1625 break;
1626
1627 __skb_unlink(skb, &tid_info->queue);
1628 tid_info->ssn = seq = GET_NEXT_SEQ(seq);
1629
1630 if (unlikely(skb_get_queue_mapping(skb) != queue)) {
1631#ifdef AR9170_TXAGG_DEBUG
1632 printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
1633 "!match.\n", wiphy_name(ar->hw->wiphy),
1634 tid_info->tid,
1635 TID_TO_WME_AC(tid_info->tid),
1636 skb_get_queue_mapping(skb));
1637#endif /* AR9170_TXAGG_DEBUG */
1638 dev_kfree_skb_any(skb);
1639 continue;
1640 }
1641
1642 if (unlikely(first == skb)) {
1643 ar9170_tx_prepare_phy(ar, skb);
1644 __skb_queue_tail(&agg, skb);
1645 first = skb;
1646 } else {
1647 ar9170_tx_copy_phy(ar, skb, first);
1648 __skb_queue_tail(&agg, skb);
1649 }
1650
1651 if (unlikely(skb_queue_len(&agg) ==
1652 AR9170_NUM_TX_AGG_MAX))
1653 break;
1654 }
1655
1656 if (skb_queue_empty(&tid_info->queue))
1657 tid_info->active = false;
1658 else
1659 list_add_tail(&tid_info->list,
1660 &ar->tx_ampdu_list);
1661
1662 spin_unlock_irqrestore(&tid_info->queue.lock, f2);
1663
1664 if (unlikely(skb_queue_empty(&agg))) {
1665#ifdef AR9170_TXAGG_DEBUG
1666 printk(KERN_DEBUG "%s: queued empty list!\n",
1667 wiphy_name(ar->hw->wiphy));
1668#endif /* AR9170_TXAGG_DEBUG */
1669 continue;
1670 }
1671
1672 /*
1673 * tell the FW/HW that this is the last frame,
1674 * that way it will wait for the immediate block ack.
1675 */
1676 if (likely(skb_peek_tail(&agg)))
1677 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1678
1679#ifdef AR9170_TXAGG_DEBUG
1680 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
1681 wiphy_name(ar->hw->wiphy));
1682 __ar9170_dump_txqueue(ar, &agg);
1683#endif /* AR9170_TXAGG_DEBUG */
1684
1685 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1686
1687 spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
1688 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1689 spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
1690 run = true;
1691
1692 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1693 }
1694
1695out_unlock:
1696 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1697 __skb_queue_purge(&agg);
1698
1699 return run;
1700}
1701
9b9c5aae
CL
1702static void ar9170_tx(struct ar9170 *ar)
1703{
1704 struct sk_buff *skb;
1705 unsigned long flags;
1706 struct ieee80211_tx_info *info;
1707 struct ar9170_tx_info *arinfo;
1708 unsigned int i, frames, frames_failed, remaining_space;
1709 int err;
1710 bool schedule_garbagecollector = false;
e9348cdd 1711
9b9c5aae 1712 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
e9348cdd 1713
9b9c5aae
CL
1714 if (unlikely(!IS_STARTED(ar)))
1715 return ;
1716
1717 remaining_space = AR9170_TX_MAX_PENDING;
1718
1719 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1720 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1721 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1722#ifdef AR9170_QUEUE_DEBUG
1723 printk(KERN_DEBUG "%s: queue %d full\n",
1724 wiphy_name(ar->hw->wiphy), i);
1725
acbadf01
CL
1726 printk(KERN_DEBUG "%s: stuck frames: ===> \n",
1727 wiphy_name(ar->hw->wiphy));
9b9c5aae
CL
1728 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1729 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1730#endif /* AR9170_QUEUE_DEBUG */
acbadf01
CL
1731
1732#ifdef AR9170_QUEUE_STOP_DEBUG
1733 printk(KERN_DEBUG "%s: stop queue %d\n",
1734 wiphy_name(ar->hw->wiphy), i);
1735 __ar9170_dump_txstats(ar);
1736#endif /* AR9170_QUEUE_STOP_DEBUG */
9b9c5aae
CL
1737 ieee80211_stop_queue(ar->hw, i);
1738 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1739 continue;
1740 }
e9348cdd 1741
9b9c5aae
CL
1742 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1743 skb_queue_len(&ar->tx_pending[i]));
1744
1745 if (remaining_space < frames) {
1746#ifdef AR9170_QUEUE_DEBUG
1747 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1748 "remaining slots:%d, needed:%d\n",
1749 wiphy_name(ar->hw->wiphy), i, remaining_space,
1750 frames);
9b9c5aae
CL
1751#endif /* AR9170_QUEUE_DEBUG */
1752 frames = remaining_space;
1753 }
1754
1755 ar->tx_stats[i].len += frames;
1756 ar->tx_stats[i].count += frames;
1757 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1758
1759 if (!frames)
1760 continue;
1761
1762 frames_failed = 0;
1763 while (frames) {
1764 skb = skb_dequeue(&ar->tx_pending[i]);
1765 if (unlikely(!skb)) {
1766 frames_failed += frames;
1767 frames = 0;
1768 break;
1769 }
1770
1771 info = IEEE80211_SKB_CB(skb);
1772 arinfo = (void *) info->rate_driver_data;
1773
1774 /* TODO: cancel stuck frames */
1775 arinfo->timeout = jiffies +
1776 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1777
acbadf01
CL
1778 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1779 ar->tx_ampdu_pending++;
1780
9b9c5aae
CL
1781#ifdef AR9170_QUEUE_DEBUG
1782 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1783 wiphy_name(ar->hw->wiphy), i);
1784 ar9170_print_txheader(ar, skb);
1785#endif /* AR9170_QUEUE_DEBUG */
1786
1787 err = ar->tx(ar, skb);
1788 if (unlikely(err)) {
acbadf01
CL
1789 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1790 ar->tx_ampdu_pending--;
1791
9b9c5aae
CL
1792 frames_failed++;
1793 dev_kfree_skb_any(skb);
1794 } else {
1795 remaining_space--;
1796 schedule_garbagecollector = true;
1797 }
1798
1799 frames--;
1800 }
1801
1802#ifdef AR9170_QUEUE_DEBUG
1803 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
1804 wiphy_name(ar->hw->wiphy), i);
1805
1806 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
1807 wiphy_name(ar->hw->wiphy));
1808 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1809#endif /* AR9170_QUEUE_DEBUG */
1810
1811 if (unlikely(frames_failed)) {
1812#ifdef AR9170_QUEUE_DEBUG
acbadf01 1813 printk(KERN_DEBUG "%s: frames failed %d =>\n",
9b9c5aae
CL
1814 wiphy_name(ar->hw->wiphy), frames_failed);
1815#endif /* AR9170_QUEUE_DEBUG */
1816
1817 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1818 ar->tx_stats[i].len -= frames_failed;
1819 ar->tx_stats[i].count -= frames_failed;
acbadf01
CL
1820#ifdef AR9170_QUEUE_STOP_DEBUG
1821 printk(KERN_DEBUG "%s: wake queue %d\n",
1822 wiphy_name(ar->hw->wiphy), i);
1823 __ar9170_dump_txstats(ar);
1824#endif /* AR9170_QUEUE_STOP_DEBUG */
9b9c5aae
CL
1825 ieee80211_wake_queue(ar->hw, i);
1826 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
e9348cdd
CL
1827 }
1828 }
1829
42935eca
LR
1830 if (!schedule_garbagecollector)
1831 return;
1832
1833 ieee80211_queue_delayed_work(ar->hw,
1834 &ar->tx_janitor,
1835 msecs_to_jiffies(AR9170_JANITOR_DELAY));
9b9c5aae
CL
1836}
1837
acbadf01
CL
1838static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
1839{
1840 struct ieee80211_tx_info *txinfo;
1841 struct ar9170_sta_info *sta_info;
1842 struct ar9170_sta_tid *agg;
1843 struct sk_buff *iter;
1844 unsigned long flags, f2;
1845 unsigned int max;
1846 u16 tid, seq, qseq;
1847 bool run = false, queue = false;
1848
1849 tid = ar9170_get_tid(skb);
1850 seq = ar9170_get_seq(skb);
1851 txinfo = IEEE80211_SKB_CB(skb);
1852 sta_info = (void *) txinfo->control.sta->drv_priv;
1853 agg = &sta_info->agg[tid];
1854 max = sta_info->ampdu_max_len;
1855
1856 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1857
1858 if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
1859#ifdef AR9170_TXAGG_DEBUG
1860 printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
1861 "for ESS:%pM tid:%d state:%d.\n",
1862 wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
1863 agg->state);
1864#endif /* AR9170_TXAGG_DEBUG */
1865 goto err_unlock;
1866 }
1867
1868 if (!agg->active) {
1869 agg->active = true;
1870 agg->ssn = seq;
1871 queue = true;
1872 }
1873
1874 /* check if seq is within the BA window */
1875 if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
1876#ifdef AR9170_TXAGG_DEBUG
1877 printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
1878 "fit into BA window (%d - %d)\n",
1879 wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
1880 (agg->ssn + max) & 0xfff);
1881#endif /* AR9170_TXAGG_DEBUG */
1882 goto err_unlock;
1883 }
1884
1885 spin_lock_irqsave(&agg->queue.lock, f2);
1886
1887 skb_queue_reverse_walk(&agg->queue, iter) {
1888 qseq = ar9170_get_seq(iter);
1889
1890 if (GET_NEXT_SEQ(qseq) == seq) {
1891 __skb_queue_after(&agg->queue, iter, skb);
1892 goto queued;
1893 }
1894 }
1895
1896 __skb_queue_head(&agg->queue, skb);
1897
1898queued:
1899 spin_unlock_irqrestore(&agg->queue.lock, f2);
1900
1901#ifdef AR9170_TXAGG_DEBUG
1902 printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
1903 wiphy_name(ar->hw->wiphy), skb);
1904 __ar9170_dump_txqueue(ar, &agg->queue);
1905#endif /* AR9170_TXAGG_DEBUG */
1906
1907 if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
1908 run = true;
1909
1910 if (queue)
1911 list_add_tail(&agg->list, &ar->tx_ampdu_list);
1912
1913 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1914 return run;
1915
1916err_unlock:
1917 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1918 dev_kfree_skb_irq(skb);
1919 return false;
1920}
1921
9b9c5aae
CL
1922int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1923{
1924 struct ar9170 *ar = hw->priv;
1925 struct ieee80211_tx_info *info;
1926
1927 if (unlikely(!IS_STARTED(ar)))
1928 goto err_free;
1929
1930 if (unlikely(ar9170_tx_prepare(ar, skb)))
1931 goto err_free;
1932
1933 info = IEEE80211_SKB_CB(skb);
1934 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
acbadf01
CL
1935 bool run = ar9170_tx_ampdu_queue(ar, skb);
1936
1937 if (run || !ar->tx_ampdu_pending)
1938 ar9170_tx_ampdu(ar);
9b9c5aae
CL
1939 } else {
1940 unsigned int queue = skb_get_queue_mapping(skb);
1941
1942 ar9170_tx_prepare_phy(ar, skb);
1943 skb_queue_tail(&ar->tx_pending[queue], skb);
e9348cdd
CL
1944 }
1945
9b9c5aae 1946 ar9170_tx(ar);
e9348cdd
CL
1947 return NETDEV_TX_OK;
1948
e9348cdd 1949err_free:
9b9c5aae 1950 dev_kfree_skb_any(skb);
e9348cdd
CL
1951 return NETDEV_TX_OK;
1952}
1953
1954static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1955 struct ieee80211_if_init_conf *conf)
1956{
1957 struct ar9170 *ar = hw->priv;
1958 int err = 0;
1959
1960 mutex_lock(&ar->mutex);
1961
1962 if (ar->vif) {
1963 err = -EBUSY;
1964 goto unlock;
1965 }
1966
1967 ar->vif = conf->vif;
1968 memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN);
1969
1970 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1971 ar->rx_software_decryption = true;
1972 ar->disable_offload = true;
1973 }
1974
1975 ar->cur_filter = 0;
1976 ar->want_filter = AR9170_MAC_REG_FTF_DEFAULTS;
1977 err = ar9170_update_frame_filter(ar);
1978 if (err)
1979 goto unlock;
1980
1981 err = ar9170_set_operating_mode(ar);
1982
1983unlock:
1984 mutex_unlock(&ar->mutex);
1985 return err;
1986}
1987
1988static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1989 struct ieee80211_if_init_conf *conf)
1990{
1991 struct ar9170 *ar = hw->priv;
1992
1993 mutex_lock(&ar->mutex);
1994 ar->vif = NULL;
1995 ar->want_filter = 0;
1996 ar9170_update_frame_filter(ar);
1997 ar9170_set_beacon_timers(ar);
1998 dev_kfree_skb(ar->beacon);
1999 ar->beacon = NULL;
2000 ar->sniffer_enabled = false;
2001 ar->rx_software_decryption = false;
2002 ar9170_set_operating_mode(ar);
2003 mutex_unlock(&ar->mutex);
2004}
2005
2006static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
2007{
2008 struct ar9170 *ar = hw->priv;
2009 int err = 0;
2010
2011 mutex_lock(&ar->mutex);
2012
e9348cdd
CL
2013 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
2014 /* TODO */
2015 err = 0;
2016 }
2017
2018 if (changed & IEEE80211_CONF_CHANGE_PS) {
2019 /* TODO */
2020 err = 0;
2021 }
2022
2023 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2024 /* TODO */
2025 err = 0;
2026 }
2027
2028 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
2029 /*
2030 * is it long_frame_max_tx_count or short_frame_max_tx_count?
2031 */
2032
2033 err = ar9170_set_hwretry_limit(ar,
2034 ar->hw->conf.long_frame_max_tx_count);
2035 if (err)
2036 goto out;
2037 }
2038
57c4d7b4 2039 if (changed & BSS_CHANGED_BEACON_INT) {
e9348cdd
CL
2040 err = ar9170_set_beacon_timers(ar);
2041 if (err)
2042 goto out;
2043 }
2044
2045 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
29ceff5d
CL
2046
2047 /* adjust slot time for 5 GHz */
2048 err = ar9170_set_slot_time(ar);
2049 if (err)
2050 goto out;
2051
2052 err = ar9170_set_dyn_sifs_ack(ar);
2053 if (err)
2054 goto out;
2055
e9348cdd 2056 err = ar9170_set_channel(ar, hw->conf.channel,
9e52b062
JB
2057 AR9170_RFI_NONE,
2058 nl80211_to_ar9170(hw->conf.channel_type));
e9348cdd
CL
2059 if (err)
2060 goto out;
e9348cdd
CL
2061 }
2062
2063out:
2064 mutex_unlock(&ar->mutex);
2065 return err;
2066}
2067
e9348cdd
CL
2068static void ar9170_set_filters(struct work_struct *work)
2069{
2070 struct ar9170 *ar = container_of(work, struct ar9170,
2071 filter_config_work);
2072 int err;
2073
e9348cdd 2074 if (unlikely(!IS_STARTED(ar)))
32c1628f 2075 return ;
e9348cdd 2076
32c1628f 2077 mutex_lock(&ar->mutex);
864cc02e
CL
2078 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MODE,
2079 &ar->filter_changed)) {
e9348cdd
CL
2080 err = ar9170_set_operating_mode(ar);
2081 if (err)
2082 goto unlock;
2083 }
2084
864cc02e
CL
2085 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MULTICAST,
2086 &ar->filter_changed)) {
e9348cdd
CL
2087 err = ar9170_update_multicast(ar);
2088 if (err)
2089 goto unlock;
2090 }
2091
864cc02e
CL
2092 if (test_and_clear_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
2093 &ar->filter_changed)) {
e9348cdd 2094 err = ar9170_update_frame_filter(ar);
864cc02e
CL
2095 if (err)
2096 goto unlock;
2097 }
e9348cdd
CL
2098
2099unlock:
2100 mutex_unlock(&ar->mutex);
2101}
2102
2103static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
2104 unsigned int changed_flags,
2105 unsigned int *new_flags,
2106 int mc_count, struct dev_mc_list *mclist)
2107{
2108 struct ar9170 *ar = hw->priv;
2109
2110 /* mask supported flags */
2111 *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
cca84799
CL
2112 FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
2113 ar->filter_state = *new_flags;
e9348cdd
CL
2114 /*
2115 * We can support more by setting the sniffer bit and
2116 * then checking the error flags, later.
2117 */
2118
2119 if (changed_flags & FIF_ALLMULTI) {
2120 if (*new_flags & FIF_ALLMULTI) {
2121 ar->want_mc_hash = ~0ULL;
2122 } else {
2123 u64 mchash;
2124 int i;
2125
2126 /* always get broadcast frames */
864cc02e 2127 mchash = 1ULL << (0xff >> 2);
e9348cdd
CL
2128
2129 for (i = 0; i < mc_count; i++) {
2130 if (WARN_ON(!mclist))
2131 break;
2132 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
2133 mclist = mclist->next;
2134 }
2135 ar->want_mc_hash = mchash;
2136 }
864cc02e 2137 set_bit(AR9170_FILTER_CHANGED_MULTICAST, &ar->filter_changed);
e9348cdd
CL
2138 }
2139
2140 if (changed_flags & FIF_CONTROL) {
2141 u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
2142 AR9170_MAC_REG_FTF_RTS |
2143 AR9170_MAC_REG_FTF_CTS |
2144 AR9170_MAC_REG_FTF_ACK |
2145 AR9170_MAC_REG_FTF_CFE |
2146 AR9170_MAC_REG_FTF_CFE_ACK;
2147
2148 if (*new_flags & FIF_CONTROL)
2149 ar->want_filter = ar->cur_filter | filter;
2150 else
2151 ar->want_filter = ar->cur_filter & ~filter;
2152
864cc02e
CL
2153 set_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
2154 &ar->filter_changed);
e9348cdd
CL
2155 }
2156
2157 if (changed_flags & FIF_PROMISC_IN_BSS) {
2158 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
864cc02e
CL
2159 set_bit(AR9170_FILTER_CHANGED_MODE,
2160 &ar->filter_changed);
e9348cdd
CL
2161 }
2162
2163 if (likely(IS_STARTED(ar)))
42935eca 2164 ieee80211_queue_work(ar->hw, &ar->filter_config_work);
e9348cdd
CL
2165}
2166
2167static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
2168 struct ieee80211_vif *vif,
2169 struct ieee80211_bss_conf *bss_conf,
2170 u32 changed)
2171{
2172 struct ar9170 *ar = hw->priv;
2173 int err = 0;
2174
2175 mutex_lock(&ar->mutex);
2176
2d0ddec5
JB
2177 if (changed & BSS_CHANGED_BSSID) {
2178 memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN);
2179 err = ar9170_set_operating_mode(ar);
29ceff5d
CL
2180 if (err)
2181 goto out;
2d0ddec5
JB
2182 }
2183
2184 if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) {
2185 err = ar9170_update_beacon(ar);
29ceff5d
CL
2186 if (err)
2187 goto out;
2d0ddec5 2188
29ceff5d
CL
2189 err = ar9170_set_beacon_timers(ar);
2190 if (err)
2191 goto out;
2192 }
e9348cdd
CL
2193
2194 if (changed & BSS_CHANGED_ASSOC) {
e9348cdd
CL
2195#ifndef CONFIG_AR9170_LEDS
2196 /* enable assoc LED. */
2197 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
2198#endif /* CONFIG_AR9170_LEDS */
2199 }
2200
29ceff5d 2201 if (changed & BSS_CHANGED_BEACON_INT) {
57c4d7b4 2202 err = ar9170_set_beacon_timers(ar);
29ceff5d
CL
2203 if (err)
2204 goto out;
2205 }
57c4d7b4 2206
e9348cdd
CL
2207 if (changed & BSS_CHANGED_HT) {
2208 /* TODO */
2209 err = 0;
2210 }
2211
2212 if (changed & BSS_CHANGED_ERP_SLOT) {
29ceff5d
CL
2213 err = ar9170_set_slot_time(ar);
2214 if (err)
2215 goto out;
e9348cdd
CL
2216 }
2217
2218 if (changed & BSS_CHANGED_BASIC_RATES) {
29ceff5d
CL
2219 err = ar9170_set_basic_rates(ar);
2220 if (err)
2221 goto out;
e9348cdd
CL
2222 }
2223
29ceff5d 2224out:
e9348cdd
CL
2225 mutex_unlock(&ar->mutex);
2226}
2227
2228static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
2229{
2230 struct ar9170 *ar = hw->priv;
2231 int err;
2232 u32 tsf_low;
2233 u32 tsf_high;
2234 u64 tsf;
2235
2236 mutex_lock(&ar->mutex);
2237 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low);
2238 if (!err)
2239 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high);
2240 mutex_unlock(&ar->mutex);
2241
2242 if (WARN_ON(err))
2243 return 0;
2244
2245 tsf = tsf_high;
2246 tsf = (tsf << 32) | tsf_low;
2247 return tsf;
2248}
2249
2250static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2251 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2252 struct ieee80211_key_conf *key)
2253{
2254 struct ar9170 *ar = hw->priv;
2255 int err = 0, i;
2256 u8 ktype;
2257
2258 if ((!ar->vif) || (ar->disable_offload))
2259 return -EOPNOTSUPP;
2260
2261 switch (key->alg) {
2262 case ALG_WEP:
e31a16d6 2263 if (key->keylen == WLAN_KEY_LEN_WEP40)
e9348cdd
CL
2264 ktype = AR9170_ENC_ALG_WEP64;
2265 else
2266 ktype = AR9170_ENC_ALG_WEP128;
2267 break;
2268 case ALG_TKIP:
2269 ktype = AR9170_ENC_ALG_TKIP;
2270 break;
2271 case ALG_CCMP:
2272 ktype = AR9170_ENC_ALG_AESCCMP;
2273 break;
2274 default:
2275 return -EOPNOTSUPP;
2276 }
2277
2278 mutex_lock(&ar->mutex);
2279 if (cmd == SET_KEY) {
2280 if (unlikely(!IS_STARTED(ar))) {
2281 err = -EOPNOTSUPP;
2282 goto out;
2283 }
2284
2285 /* group keys need all-zeroes address */
2286 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2287 sta = NULL;
2288
2289 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
2290 for (i = 0; i < 64; i++)
2291 if (!(ar->usedkeys & BIT(i)))
2292 break;
2293 if (i == 64) {
2294 ar->rx_software_decryption = true;
2295 ar9170_set_operating_mode(ar);
2296 err = -ENOSPC;
2297 goto out;
2298 }
2299 } else {
2300 i = 64 + key->keyidx;
2301 }
2302
2303 key->hw_key_idx = i;
2304
2305 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
2306 key->key, min_t(u8, 16, key->keylen));
2307 if (err)
2308 goto out;
2309
2310 if (key->alg == ALG_TKIP) {
2311 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
2312 ktype, 1, key->key + 16, 16);
2313 if (err)
2314 goto out;
2315
2316 /*
2317 * hardware is not capable generating the MMIC
2318 * for fragmented frames!
2319 */
2320 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2321 }
2322
2323 if (i < 64)
2324 ar->usedkeys |= BIT(i);
2325
2326 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2327 } else {
2328 if (unlikely(!IS_STARTED(ar))) {
2329 /* The device is gone... together with the key ;-) */
2330 err = 0;
2331 goto out;
2332 }
2333
2334 err = ar9170_disable_key(ar, key->hw_key_idx);
2335 if (err)
2336 goto out;
2337
2338 if (key->hw_key_idx < 64) {
2339 ar->usedkeys &= ~BIT(key->hw_key_idx);
2340 } else {
2341 err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
2342 AR9170_ENC_ALG_NONE, 0,
2343 NULL, 0);
2344 if (err)
2345 goto out;
2346
2347 if (key->alg == ALG_TKIP) {
2348 err = ar9170_upload_key(ar, key->hw_key_idx,
2349 NULL,
2350 AR9170_ENC_ALG_NONE, 1,
2351 NULL, 0);
2352 if (err)
2353 goto out;
2354 }
2355
2356 }
2357 }
2358
2359 ar9170_regwrite_begin(ar);
2360 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
2361 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
2362 ar9170_regwrite_finish();
2363 err = ar9170_regwrite_result();
2364
2365out:
2366 mutex_unlock(&ar->mutex);
2367
2368 return err;
2369}
2370
2371static void ar9170_sta_notify(struct ieee80211_hw *hw,
2372 struct ieee80211_vif *vif,
2373 enum sta_notify_cmd cmd,
2374 struct ieee80211_sta *sta)
2375{
acbadf01
CL
2376 struct ar9170 *ar = hw->priv;
2377 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2378 unsigned int i;
2379
2380 switch (cmd) {
2381 case STA_NOTIFY_ADD:
2382 memset(sta_info, 0, sizeof(*sta_info));
2383
2384 if (!sta->ht_cap.ht_supported)
2385 break;
2386
2387 if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
2388 ar->global_ampdu_density = sta->ht_cap.ampdu_density;
2389
2390 if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
2391 ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
2392
2393 for (i = 0; i < AR9170_NUM_TID; i++) {
2394 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2395 sta_info->agg[i].active = false;
2396 sta_info->agg[i].ssn = 0;
2397 sta_info->agg[i].retry = 0;
2398 sta_info->agg[i].tid = i;
2399 INIT_LIST_HEAD(&sta_info->agg[i].list);
2400 skb_queue_head_init(&sta_info->agg[i].queue);
2401 }
2402
2403 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
2404 break;
2405
2406 case STA_NOTIFY_REMOVE:
2407 if (!sta->ht_cap.ht_supported)
2408 break;
2409
2410 for (i = 0; i < AR9170_NUM_TID; i++) {
2411 sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
2412 skb_queue_purge(&sta_info->agg[i].queue);
2413 }
2414
2415 break;
2416
2417 default:
2418 break;
2419 }
2420
2421 if (IS_STARTED(ar) && ar->filter_changed)
42935eca 2422 ieee80211_queue_work(ar->hw, &ar->filter_config_work);
e9348cdd
CL
2423}
2424
2425static int ar9170_get_stats(struct ieee80211_hw *hw,
2426 struct ieee80211_low_level_stats *stats)
2427{
2428 struct ar9170 *ar = hw->priv;
2429 u32 val;
2430 int err;
2431
2432 mutex_lock(&ar->mutex);
2433 err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
2434 ar->stats.dot11ACKFailureCount += val;
2435
2436 memcpy(stats, &ar->stats, sizeof(*stats));
2437 mutex_unlock(&ar->mutex);
2438
2439 return 0;
2440}
2441
2442static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
2443 struct ieee80211_tx_queue_stats *tx_stats)
2444{
2445 struct ar9170 *ar = hw->priv;
2446
2447 spin_lock_bh(&ar->tx_stats_lock);
2448 memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
2449 spin_unlock_bh(&ar->tx_stats_lock);
2450
2451 return 0;
2452}
2453
2454static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
2455 const struct ieee80211_tx_queue_params *param)
2456{
2457 struct ar9170 *ar = hw->priv;
2458 int ret;
2459
2460 mutex_lock(&ar->mutex);
9b9c5aae 2461 if ((param) && !(queue > __AR9170_NUM_TXQ)) {
e9348cdd
CL
2462 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
2463 param, sizeof(*param));
2464
2465 ret = ar9170_set_qos(ar);
2466 } else
2467 ret = -EINVAL;
2468
2469 mutex_unlock(&ar->mutex);
2470 return ret;
2471}
2472
9e52b062
JB
2473static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2474 enum ieee80211_ampdu_mlme_action action,
2475 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2476{
acbadf01
CL
2477 struct ar9170 *ar = hw->priv;
2478 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2479 struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
2480 unsigned long flags;
2481
2482 if (!modparam_ht)
2483 return -EOPNOTSUPP;
2484
9e52b062 2485 switch (action) {
acbadf01
CL
2486 case IEEE80211_AMPDU_TX_START:
2487 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2488 if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
2489 !list_empty(&tid_info->list)) {
2490 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2491#ifdef AR9170_TXAGG_DEBUG
2492 printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
2493 "is in a very bad state!\n",
2494 wiphy_name(hw->wiphy), sta->addr, tid);
2495#endif /* AR9170_TXAGG_DEBUG */
2496 return -EBUSY;
2497 }
2498
2499 *ssn = tid_info->ssn;
2500 tid_info->state = AR9170_TID_STATE_PROGRESS;
2501 tid_info->active = false;
2502 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2503 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2504 break;
2505
2506 case IEEE80211_AMPDU_TX_STOP:
2507 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2508 tid_info->state = AR9170_TID_STATE_SHUTDOWN;
2509 list_del_init(&tid_info->list);
2510 tid_info->active = false;
2511 skb_queue_purge(&tid_info->queue);
2512 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2513 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2514 break;
2515
2516 case IEEE80211_AMPDU_TX_OPERATIONAL:
2517#ifdef AR9170_TXAGG_DEBUG
2518 printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
2519 wiphy_name(hw->wiphy), sta->addr, tid);
2520#endif /* AR9170_TXAGG_DEBUG */
2521 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2522 sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
2523 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2524 break;
2525
9e52b062
JB
2526 case IEEE80211_AMPDU_RX_START:
2527 case IEEE80211_AMPDU_RX_STOP:
acbadf01
CL
2528 /* Handled by firmware */
2529 break;
2530
9e52b062
JB
2531 default:
2532 return -EOPNOTSUPP;
2533 }
acbadf01
CL
2534
2535 return 0;
9e52b062
JB
2536}
2537
e9348cdd
CL
2538static const struct ieee80211_ops ar9170_ops = {
2539 .start = ar9170_op_start,
2540 .stop = ar9170_op_stop,
2541 .tx = ar9170_op_tx,
2542 .add_interface = ar9170_op_add_interface,
2543 .remove_interface = ar9170_op_remove_interface,
2544 .config = ar9170_op_config,
e9348cdd
CL
2545 .configure_filter = ar9170_op_configure_filter,
2546 .conf_tx = ar9170_conf_tx,
2547 .bss_info_changed = ar9170_op_bss_info_changed,
2548 .get_tsf = ar9170_op_get_tsf,
2549 .set_key = ar9170_set_key,
2550 .sta_notify = ar9170_sta_notify,
2551 .get_stats = ar9170_get_stats,
2552 .get_tx_stats = ar9170_get_tx_stats,
9e52b062 2553 .ampdu_action = ar9170_ampdu_action,
e9348cdd
CL
2554};
2555
2556void *ar9170_alloc(size_t priv_size)
2557{
2558 struct ieee80211_hw *hw;
2559 struct ar9170 *ar;
cca84799 2560 struct sk_buff *skb;
e9348cdd
CL
2561 int i;
2562
cca84799
CL
2563 /*
2564 * this buffer is used for rx stream reconstruction.
2565 * Under heavy load this device (or the transport layer?)
2566 * tends to split the streams into seperate rx descriptors.
2567 */
2568
2569 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
2570 if (!skb)
2571 goto err_nomem;
2572
e9348cdd
CL
2573 hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
2574 if (!hw)
cca84799 2575 goto err_nomem;
e9348cdd
CL
2576
2577 ar = hw->priv;
2578 ar->hw = hw;
cca84799 2579 ar->rx_failover = skb;
e9348cdd
CL
2580
2581 mutex_init(&ar->mutex);
2582 spin_lock_init(&ar->cmdlock);
2583 spin_lock_init(&ar->tx_stats_lock);
acbadf01
CL
2584 spin_lock_init(&ar->tx_ampdu_list_lock);
2585 skb_queue_head_init(&ar->tx_status_ampdu);
9b9c5aae
CL
2586 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
2587 skb_queue_head_init(&ar->tx_status[i]);
2588 skb_queue_head_init(&ar->tx_pending[i]);
2589 }
cca84799 2590 ar9170_rx_reset_rx_mpdu(ar);
e9348cdd
CL
2591 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
2592 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
9b9c5aae 2593 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
acbadf01 2594 INIT_LIST_HEAD(&ar->tx_ampdu_list);
e9348cdd
CL
2595
2596 /* all hw supports 2.4 GHz, so set channel to 1 by default */
2597 ar->channel = &ar9170_2ghz_chantable[0];
2598
2599 /* first part of wiphy init */
2600 ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2601 BIT(NL80211_IFTYPE_WDS) |
2602 BIT(NL80211_IFTYPE_ADHOC);
2603 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2604 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2605 IEEE80211_HW_SIGNAL_DBM |
2606 IEEE80211_HW_NOISE_DBM;
2607
acbadf01
CL
2608 if (modparam_ht) {
2609 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
2610 } else {
2611 ar9170_band_2GHz.ht_cap.ht_supported = false;
2612 ar9170_band_5GHz.ht_cap.ht_supported = false;
2613 }
2614
4a48e2a4 2615 ar->hw->queues = __AR9170_NUM_TXQ;
e9348cdd
CL
2616 ar->hw->extra_tx_headroom = 8;
2617 ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
2618
2619 ar->hw->max_rates = 1;
2620 ar->hw->max_rate_tries = 3;
2621
2622 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
2623 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
2624
2625 return ar;
cca84799
CL
2626
2627err_nomem:
2628 kfree_skb(skb);
2629 return ERR_PTR(-ENOMEM);
e9348cdd 2630}
e9348cdd
CL
2631
2632static int ar9170_read_eeprom(struct ar9170 *ar)
2633{
2634#define RW 8 /* number of words to read at once */
2635#define RB (sizeof(u32) * RW)
e9348cdd
CL
2636 u8 *eeprom = (void *)&ar->eeprom;
2637 u8 *addr = ar->eeprom.mac_address;
2638 __le32 offsets[RW];
acbadf01 2639 unsigned int rx_streams, tx_streams, tx_params = 0;
e9348cdd
CL
2640 int i, j, err, bands = 0;
2641
2642 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
2643
2644 BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
2645#ifndef __CHECKER__
2646 /* don't want to handle trailing remains */
2647 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
2648#endif
2649
2650 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
2651 for (j = 0; j < RW; j++)
2652 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
2653 RB * i + 4 * j);
2654
2655 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
2656 RB, (u8 *) &offsets,
2657 RB, eeprom + RB * i);
2658 if (err)
2659 return err;
2660 }
2661
2662#undef RW
2663#undef RB
2664
2665 if (ar->eeprom.length == cpu_to_le16(0xFFFF))
2666 return -ENODATA;
2667
2668 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
2669 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
2670 bands++;
2671 }
2672 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
2673 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
2674 bands++;
2675 }
acbadf01
CL
2676
2677 rx_streams = hweight8(ar->eeprom.rx_mask);
2678 tx_streams = hweight8(ar->eeprom.tx_mask);
2679
2680 if (rx_streams != tx_streams)
2681 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
2682
2683 if (tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)
2684 tx_params = (tx_streams - 1) <<
2685 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2686
2687 ar9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
2688 ar9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
2689
e9348cdd
CL
2690 /*
2691 * I measured this, a bandswitch takes roughly
2692 * 135 ms and a frequency switch about 80.
2693 *
2694 * FIXME: measure these values again once EEPROM settings
2695 * are used, that will influence them!
2696 */
2697 if (bands == 2)
2698 ar->hw->channel_change_time = 135 * 1000;
2699 else
2700 ar->hw->channel_change_time = 80 * 1000;
2701
1878f77e
CL
2702 ar->regulatory.current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
2703 ar->regulatory.current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
2704
e9348cdd
CL
2705 /* second part of wiphy init */
2706 SET_IEEE80211_PERM_ADDR(ar->hw, addr);
2707
2708 return bands ? 0 : -EINVAL;
2709}
2710
1878f77e
CL
2711static int ar9170_reg_notifier(struct wiphy *wiphy,
2712 struct regulatory_request *request)
2713{
2714 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
2715 struct ar9170 *ar = hw->priv;
2716
2717 return ath_reg_notifier_apply(wiphy, request, &ar->regulatory);
2718}
2719
e9348cdd
CL
2720int ar9170_register(struct ar9170 *ar, struct device *pdev)
2721{
2722 int err;
2723
2724 /* try to read EEPROM, init MAC addr */
2725 err = ar9170_read_eeprom(ar);
2726 if (err)
2727 goto err_out;
2728
1878f77e
CL
2729 err = ath_regd_init(&ar->regulatory, ar->hw->wiphy,
2730 ar9170_reg_notifier);
85efc86e
LR
2731 if (err)
2732 goto err_out;
1878f77e 2733
e9348cdd
CL
2734 err = ieee80211_register_hw(ar->hw);
2735 if (err)
2736 goto err_out;
2737
1878f77e
CL
2738 if (!ath_is_world_regd(&ar->regulatory))
2739 regulatory_hint(ar->hw->wiphy, ar->regulatory.alpha2);
2740
e9348cdd
CL
2741 err = ar9170_init_leds(ar);
2742 if (err)
2743 goto err_unreg;
2744
2745#ifdef CONFIG_AR9170_LEDS
2746 err = ar9170_register_leds(ar);
2747 if (err)
2748 goto err_unreg;
2749#endif /* CONFIG_AR9170_LEDS */
2750
2751 dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
2752 wiphy_name(ar->hw->wiphy));
2753
2754 return err;
2755
2756err_unreg:
2757 ieee80211_unregister_hw(ar->hw);
2758
2759err_out:
2760 return err;
2761}
e9348cdd
CL
2762
2763void ar9170_unregister(struct ar9170 *ar)
2764{
2765#ifdef CONFIG_AR9170_LEDS
2766 ar9170_unregister_leds(ar);
2767#endif /* CONFIG_AR9170_LEDS */
2768
cca84799 2769 kfree_skb(ar->rx_failover);
e9348cdd
CL
2770 ieee80211_unregister_hw(ar->hw);
2771 mutex_destroy(&ar->mutex);
2772}