Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ath / carl9170 / main.c
1 /*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
50
51 static int modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54
55 int modparam_noht;
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
63 }
64
65 struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
78 };
79 #undef RATE
80
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
85
86 /*
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
89 */
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
94 }
95
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
111 };
112
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
149 };
150 #undef CHAN
151
152 #define CARL9170_HT_CAP \
153 { \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
167 }
168
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
175 };
176
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
183 };
184
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
186 {
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
189
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
198 }
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
200
201 }
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
204
205 synchronize_rcu();
206
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
211
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
214
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
217 }
218 }
219
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221 {
222 if (drop_queued) {
223 int i;
224
225 /*
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
228 */
229
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
232
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
235
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
239
240 carl9170_tx_status(ar, skb, false);
241 }
242 }
243 }
244
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
248 }
249
250 static void carl9170_flush_ba(struct ar9170 *ar)
251 {
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
254 struct sk_buff *skb;
255
256 __skb_queue_head_init(&free);
257
258 rcu_read_lock();
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
263
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
268 }
269 }
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 rcu_read_unlock();
272
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
275 }
276
277 static void carl9170_zap_queues(struct ar9170 *ar)
278 {
279 struct carl9170_vif_info *cvif;
280 unsigned int i;
281
282 carl9170_ampdu_gc(ar);
283
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
286
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
290 struct sk_buff *skb;
291
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
298 }
299 spin_unlock_bh(&ar->tx_status[i].lock);
300 }
301
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
305
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
313
314 rcu_read_lock();
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
318 cvif->beacon = NULL;
319 spin_unlock_bh(&ar->beacon_lock);
320 }
321 rcu_read_unlock();
322
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
328 }
329
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331 do { \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
336 } while (0)
337
338 static int carl9170_op_start(struct ieee80211_hw *hw)
339 {
340 struct ar9170 *ar = hw->priv;
341 int err, i;
342
343 mutex_lock(&ar->mutex);
344
345 carl9170_zap_queues(ar);
346
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT */
349 CARL9170_FILL_QUEUE(ar->edcf[1], 2, 7, 15, 94); /* VIDEO */
350 CARL9170_FILL_QUEUE(ar->edcf[2], 2, 3, 7, 47); /* VOICE */
351 CARL9170_FILL_QUEUE(ar->edcf[3], 7, 15, 1023, 0); /* BACKGROUND */
352 CARL9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
353
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
356 ar->usedkeys = 1;
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
361 ar->rx_software_decryption = false;
362 ar->disable_offload = false;
363
364 for (i = 0; i < ar->hw->queues; i++) {
365 ar->queue_stop_timeout[i] = jiffies;
366 ar->max_queue_stop_timeout[i] = 0;
367 }
368
369 atomic_set(&ar->mem_allocs, 0);
370
371 err = carl9170_usb_open(ar);
372 if (err)
373 goto out;
374
375 err = carl9170_init_mac(ar);
376 if (err)
377 goto out;
378
379 err = carl9170_set_qos(ar);
380 if (err)
381 goto out;
382
383 if (ar->fw.rx_filter) {
384 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
385 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
386 if (err)
387 goto out;
388 }
389
390 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
391 AR9170_DMA_TRIGGER_RXQ);
392 if (err)
393 goto out;
394
395 /* Clear key-cache */
396 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
397 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
398 0, NULL, 0);
399 if (err)
400 goto out;
401
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 1, NULL, 0);
404 if (err)
405 goto out;
406
407 if (i < AR9170_CAM_MAX_USER) {
408 err = carl9170_disable_key(ar, i);
409 if (err)
410 goto out;
411 }
412 }
413
414 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
415
416 ieee80211_wake_queues(ar->hw);
417 err = 0;
418
419 out:
420 mutex_unlock(&ar->mutex);
421 return err;
422 }
423
424 static void carl9170_cancel_worker(struct ar9170 *ar)
425 {
426 cancel_delayed_work_sync(&ar->tx_janitor);
427 #ifdef CONFIG_CARL9170_LEDS
428 cancel_delayed_work_sync(&ar->led_work);
429 #endif /* CONFIG_CARL9170_LEDS */
430 cancel_work_sync(&ar->ps_work);
431 cancel_work_sync(&ar->ampdu_work);
432 }
433
434 static void carl9170_op_stop(struct ieee80211_hw *hw)
435 {
436 struct ar9170 *ar = hw->priv;
437
438 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
439
440 ieee80211_stop_queues(ar->hw);
441
442 mutex_lock(&ar->mutex);
443 if (IS_ACCEPTING_CMD(ar)) {
444 rcu_assign_pointer(ar->beacon_iter, NULL);
445
446 carl9170_led_set_state(ar, 0);
447
448 /* stop DMA */
449 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
450 carl9170_usb_stop(ar);
451 }
452
453 carl9170_zap_queues(ar);
454 mutex_unlock(&ar->mutex);
455
456 carl9170_cancel_worker(ar);
457 }
458
459 static void carl9170_restart_work(struct work_struct *work)
460 {
461 struct ar9170 *ar = container_of(work, struct ar9170,
462 restart_work);
463 int err;
464
465 ar->usedkeys = 0;
466 ar->filter_state = 0;
467 carl9170_cancel_worker(ar);
468
469 mutex_lock(&ar->mutex);
470 err = carl9170_usb_restart(ar);
471 if (net_ratelimit()) {
472 if (err) {
473 dev_err(&ar->udev->dev, "Failed to restart device "
474 " (%d).\n", err);
475 } else {
476 dev_info(&ar->udev->dev, "device restarted "
477 "successfully.\n");
478 }
479 }
480
481 carl9170_zap_queues(ar);
482 mutex_unlock(&ar->mutex);
483 if (!err) {
484 ar->restart_counter++;
485 atomic_set(&ar->pending_restarts, 0);
486
487 ieee80211_restart_hw(ar->hw);
488 } else {
489 /*
490 * The reset was unsuccessful and the device seems to
491 * be dead. But there's still one option: a low-level
492 * usb subsystem reset...
493 */
494
495 carl9170_usb_reset(ar);
496 }
497 }
498
499 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
500 {
501 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
502
503 /*
504 * Sometimes, an error can trigger several different reset events.
505 * By ignoring these *surplus* reset events, the device won't be
506 * killed again, right after it has recovered.
507 */
508 if (atomic_inc_return(&ar->pending_restarts) > 1) {
509 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
510 return;
511 }
512
513 ieee80211_stop_queues(ar->hw);
514
515 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
516
517 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
518 !WARN_ON(r >= __CARL9170_RR_LAST))
519 ar->last_reason = r;
520
521 if (!ar->registered)
522 return;
523
524 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
525 ieee80211_queue_work(ar->hw, &ar->restart_work);
526 else
527 carl9170_usb_reset(ar);
528
529 /*
530 * At this point, the device instance might have vanished/disabled.
531 * So, don't put any code which access the ar9170 struct
532 * without proper protection.
533 */
534 }
535
536 static int carl9170_init_interface(struct ar9170 *ar,
537 struct ieee80211_vif *vif)
538 {
539 struct ath_common *common = &ar->common;
540 int err;
541
542 if (!vif) {
543 WARN_ON_ONCE(IS_STARTED(ar));
544 return 0;
545 }
546
547 memcpy(common->macaddr, vif->addr, ETH_ALEN);
548
549 if (modparam_nohwcrypt ||
550 ((vif->type != NL80211_IFTYPE_STATION) &&
551 (vif->type != NL80211_IFTYPE_AP))) {
552 ar->rx_software_decryption = true;
553 ar->disable_offload = true;
554 }
555
556 err = carl9170_set_operating_mode(ar);
557 return err;
558 }
559
560 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
561 struct ieee80211_vif *vif)
562 {
563 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
564 struct ieee80211_vif *main_vif;
565 struct ar9170 *ar = hw->priv;
566 int vif_id = -1, err = 0;
567
568 mutex_lock(&ar->mutex);
569 rcu_read_lock();
570 if (vif_priv->active) {
571 /*
572 * Skip the interface structure initialization,
573 * if the vif survived the _restart call.
574 */
575 vif_id = vif_priv->id;
576 vif_priv->enable_beacon = false;
577
578 spin_lock_bh(&ar->beacon_lock);
579 dev_kfree_skb_any(vif_priv->beacon);
580 vif_priv->beacon = NULL;
581 spin_unlock_bh(&ar->beacon_lock);
582
583 goto init;
584 }
585
586 main_vif = carl9170_get_main_vif(ar);
587
588 if (main_vif) {
589 switch (main_vif->type) {
590 case NL80211_IFTYPE_STATION:
591 if (vif->type == NL80211_IFTYPE_STATION)
592 break;
593
594 err = -EBUSY;
595 rcu_read_unlock();
596
597 goto unlock;
598
599 case NL80211_IFTYPE_AP:
600 if ((vif->type == NL80211_IFTYPE_STATION) ||
601 (vif->type == NL80211_IFTYPE_WDS) ||
602 (vif->type == NL80211_IFTYPE_AP))
603 break;
604
605 err = -EBUSY;
606 rcu_read_unlock();
607 goto unlock;
608
609 default:
610 rcu_read_unlock();
611 goto unlock;
612 }
613 }
614
615 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
616
617 if (vif_id < 0) {
618 rcu_read_unlock();
619
620 err = -ENOSPC;
621 goto unlock;
622 }
623
624 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
625
626 vif_priv->active = true;
627 vif_priv->id = vif_id;
628 vif_priv->enable_beacon = false;
629 ar->vifs++;
630 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
631 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
632
633 init:
634 if (carl9170_get_main_vif(ar) == vif) {
635 rcu_assign_pointer(ar->beacon_iter, vif_priv);
636 rcu_read_unlock();
637
638 err = carl9170_init_interface(ar, vif);
639 if (err)
640 goto unlock;
641 } else {
642 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
643 rcu_read_unlock();
644
645 if (err)
646 goto unlock;
647 }
648
649 unlock:
650 if (err && (vif_id != -1)) {
651 vif_priv->active = false;
652 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
653 ar->vifs--;
654 rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
655 list_del_rcu(&vif_priv->list);
656 mutex_unlock(&ar->mutex);
657 synchronize_rcu();
658 } else {
659 if (ar->vifs > 1)
660 ar->ps.off_override |= PS_OFF_VIF;
661
662 mutex_unlock(&ar->mutex);
663 }
664
665 return err;
666 }
667
668 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
669 struct ieee80211_vif *vif)
670 {
671 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
672 struct ieee80211_vif *main_vif;
673 struct ar9170 *ar = hw->priv;
674 unsigned int id;
675
676 mutex_lock(&ar->mutex);
677
678 if (WARN_ON_ONCE(!vif_priv->active))
679 goto unlock;
680
681 ar->vifs--;
682
683 rcu_read_lock();
684 main_vif = carl9170_get_main_vif(ar);
685
686 id = vif_priv->id;
687
688 vif_priv->active = false;
689 WARN_ON(vif_priv->enable_beacon);
690 vif_priv->enable_beacon = false;
691 list_del_rcu(&vif_priv->list);
692 rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
693
694 if (vif == main_vif) {
695 rcu_read_unlock();
696
697 if (ar->vifs) {
698 WARN_ON(carl9170_init_interface(ar,
699 carl9170_get_main_vif(ar)));
700 } else {
701 carl9170_set_operating_mode(ar);
702 }
703 } else {
704 rcu_read_unlock();
705
706 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
707 }
708
709 carl9170_update_beacon(ar, false);
710 carl9170_flush_cab(ar, id);
711
712 spin_lock_bh(&ar->beacon_lock);
713 dev_kfree_skb_any(vif_priv->beacon);
714 vif_priv->beacon = NULL;
715 spin_unlock_bh(&ar->beacon_lock);
716
717 bitmap_release_region(&ar->vif_bitmap, id, 0);
718
719 carl9170_set_beacon_timers(ar);
720
721 if (ar->vifs == 1)
722 ar->ps.off_override &= ~PS_OFF_VIF;
723
724 unlock:
725 mutex_unlock(&ar->mutex);
726
727 synchronize_rcu();
728 }
729
730 void carl9170_ps_check(struct ar9170 *ar)
731 {
732 ieee80211_queue_work(ar->hw, &ar->ps_work);
733 }
734
735 /* caller must hold ar->mutex */
736 static int carl9170_ps_update(struct ar9170 *ar)
737 {
738 bool ps = false;
739 int err = 0;
740
741 if (!ar->ps.off_override)
742 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
743
744 if (ps != ar->ps.state) {
745 err = carl9170_powersave(ar, ps);
746 if (err)
747 return err;
748
749 if (ar->ps.state && !ps) {
750 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
751 ar->ps.last_action);
752 }
753
754 if (ps)
755 ar->ps.last_slept = jiffies;
756
757 ar->ps.last_action = jiffies;
758 ar->ps.state = ps;
759 }
760
761 return 0;
762 }
763
764 static void carl9170_ps_work(struct work_struct *work)
765 {
766 struct ar9170 *ar = container_of(work, struct ar9170,
767 ps_work);
768 mutex_lock(&ar->mutex);
769 if (IS_STARTED(ar))
770 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
771 mutex_unlock(&ar->mutex);
772 }
773
774
775 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
776 {
777 struct ar9170 *ar = hw->priv;
778 int err = 0;
779
780 mutex_lock(&ar->mutex);
781 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
782 /* TODO */
783 err = 0;
784 }
785
786 if (changed & IEEE80211_CONF_CHANGE_PS) {
787 err = carl9170_ps_update(ar);
788 if (err)
789 goto out;
790 }
791
792 if (changed & IEEE80211_CONF_CHANGE_POWER) {
793 /* TODO */
794 err = 0;
795 }
796
797 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
798 /* TODO */
799 err = 0;
800 }
801
802 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
803 /* adjust slot time for 5 GHz */
804 err = carl9170_set_slot_time(ar);
805 if (err)
806 goto out;
807
808 err = carl9170_set_channel(ar, hw->conf.channel,
809 hw->conf.channel_type, CARL9170_RFI_NONE);
810 if (err)
811 goto out;
812
813 err = carl9170_set_dyn_sifs_ack(ar);
814 if (err)
815 goto out;
816
817 err = carl9170_set_rts_cts_rate(ar);
818 if (err)
819 goto out;
820 }
821
822 out:
823 mutex_unlock(&ar->mutex);
824 return err;
825 }
826
827 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
828 struct netdev_hw_addr_list *mc_list)
829 {
830 struct netdev_hw_addr *ha;
831 u64 mchash;
832
833 /* always get broadcast frames */
834 mchash = 1ULL << (0xff >> 2);
835
836 netdev_hw_addr_list_for_each(ha, mc_list)
837 mchash |= 1ULL << (ha->addr[5] >> 2);
838
839 return mchash;
840 }
841
842 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
843 unsigned int changed_flags,
844 unsigned int *new_flags,
845 u64 multicast)
846 {
847 struct ar9170 *ar = hw->priv;
848
849 /* mask supported flags */
850 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
851
852 if (!IS_ACCEPTING_CMD(ar))
853 return;
854
855 mutex_lock(&ar->mutex);
856
857 ar->filter_state = *new_flags;
858 /*
859 * We can support more by setting the sniffer bit and
860 * then checking the error flags, later.
861 */
862
863 if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
864 multicast = ~0ULL;
865
866 if (multicast != ar->cur_mc_hash)
867 WARN_ON(carl9170_update_multicast(ar, multicast));
868
869 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
870 ar->sniffer_enabled = !!(*new_flags &
871 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
872
873 WARN_ON(carl9170_set_operating_mode(ar));
874 }
875
876 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
877 u32 rx_filter = 0;
878
879 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
880 rx_filter |= CARL9170_RX_FILTER_BAD;
881
882 if (!(*new_flags & FIF_CONTROL))
883 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
884
885 if (!(*new_flags & FIF_PSPOLL))
886 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
887
888 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
889 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
890 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
891 }
892
893 WARN_ON(carl9170_rx_filter(ar, rx_filter));
894 }
895
896 mutex_unlock(&ar->mutex);
897 }
898
899
900 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
901 struct ieee80211_vif *vif,
902 struct ieee80211_bss_conf *bss_conf,
903 u32 changed)
904 {
905 struct ar9170 *ar = hw->priv;
906 struct ath_common *common = &ar->common;
907 int err = 0;
908 struct carl9170_vif_info *vif_priv;
909 struct ieee80211_vif *main_vif;
910
911 mutex_lock(&ar->mutex);
912 vif_priv = (void *) vif->drv_priv;
913 main_vif = carl9170_get_main_vif(ar);
914 if (WARN_ON(!main_vif))
915 goto out;
916
917 if (changed & BSS_CHANGED_BEACON_ENABLED) {
918 struct carl9170_vif_info *iter;
919 int i = 0;
920
921 vif_priv->enable_beacon = bss_conf->enable_beacon;
922 rcu_read_lock();
923 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
924 if (iter->active && iter->enable_beacon)
925 i++;
926
927 }
928 rcu_read_unlock();
929
930 ar->beacon_enabled = i;
931 }
932
933 if (changed & BSS_CHANGED_BEACON) {
934 err = carl9170_update_beacon(ar, false);
935 if (err)
936 goto out;
937 }
938
939 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
940 BSS_CHANGED_BEACON_INT)) {
941
942 if (main_vif != vif) {
943 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
944 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
945 }
946
947 /*
948 * Therefore a hard limit for the broadcast traffic should
949 * prevent false alarms.
950 */
951 if (vif->type != NL80211_IFTYPE_STATION &&
952 (bss_conf->beacon_int * bss_conf->dtim_period >=
953 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
954 err = -EINVAL;
955 goto out;
956 }
957
958 err = carl9170_set_beacon_timers(ar);
959 if (err)
960 goto out;
961 }
962
963 if (changed & BSS_CHANGED_HT) {
964 /* TODO */
965 err = 0;
966 if (err)
967 goto out;
968 }
969
970 if (main_vif != vif)
971 goto out;
972
973 /*
974 * The following settings can only be changed by the
975 * master interface.
976 */
977
978 if (changed & BSS_CHANGED_BSSID) {
979 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
980 err = carl9170_set_operating_mode(ar);
981 if (err)
982 goto out;
983 }
984
985 if (changed & BSS_CHANGED_ASSOC) {
986 ar->common.curaid = bss_conf->aid;
987 err = carl9170_set_beacon_timers(ar);
988 if (err)
989 goto out;
990 }
991
992 if (changed & BSS_CHANGED_ERP_SLOT) {
993 err = carl9170_set_slot_time(ar);
994 if (err)
995 goto out;
996 }
997
998 if (changed & BSS_CHANGED_BASIC_RATES) {
999 err = carl9170_set_mac_rates(ar);
1000 if (err)
1001 goto out;
1002 }
1003
1004 out:
1005 WARN_ON_ONCE(err && IS_STARTED(ar));
1006 mutex_unlock(&ar->mutex);
1007 }
1008
1009 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw)
1010 {
1011 struct ar9170 *ar = hw->priv;
1012 struct carl9170_tsf_rsp tsf;
1013 int err;
1014
1015 mutex_lock(&ar->mutex);
1016 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1017 0, NULL, sizeof(tsf), &tsf);
1018 mutex_unlock(&ar->mutex);
1019 if (WARN_ON(err))
1020 return 0;
1021
1022 return le64_to_cpu(tsf.tsf_64);
1023 }
1024
1025 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1026 struct ieee80211_vif *vif,
1027 struct ieee80211_sta *sta,
1028 struct ieee80211_key_conf *key)
1029 {
1030 struct ar9170 *ar = hw->priv;
1031 int err = 0, i;
1032 u8 ktype;
1033
1034 if (ar->disable_offload || !vif)
1035 return -EOPNOTSUPP;
1036
1037 /*
1038 * We have to fall back to software encryption, whenever
1039 * the user choose to participates in an IBSS or is connected
1040 * to more than one network.
1041 *
1042 * This is very unfortunate, because some machines cannot handle
1043 * the high througput speed in 802.11n networks.
1044 */
1045
1046 if (!is_main_vif(ar, vif))
1047 goto err_softw;
1048
1049 /*
1050 * While the hardware supports *catch-all* key, for offloading
1051 * group-key en-/de-cryption. The way of how the hardware
1052 * decides which keyId maps to which key, remains a mystery...
1053 */
1054 if ((vif->type != NL80211_IFTYPE_STATION &&
1055 vif->type != NL80211_IFTYPE_ADHOC) &&
1056 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1057 return -EOPNOTSUPP;
1058
1059 switch (key->cipher) {
1060 case WLAN_CIPHER_SUITE_WEP40:
1061 ktype = AR9170_ENC_ALG_WEP64;
1062 break;
1063 case WLAN_CIPHER_SUITE_WEP104:
1064 ktype = AR9170_ENC_ALG_WEP128;
1065 break;
1066 case WLAN_CIPHER_SUITE_TKIP:
1067 ktype = AR9170_ENC_ALG_TKIP;
1068 break;
1069 case WLAN_CIPHER_SUITE_CCMP:
1070 ktype = AR9170_ENC_ALG_AESCCMP;
1071 break;
1072 default:
1073 return -EOPNOTSUPP;
1074 }
1075
1076 mutex_lock(&ar->mutex);
1077 if (cmd == SET_KEY) {
1078 if (!IS_STARTED(ar)) {
1079 err = -EOPNOTSUPP;
1080 goto out;
1081 }
1082
1083 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1084 sta = NULL;
1085
1086 i = 64 + key->keyidx;
1087 } else {
1088 for (i = 0; i < 64; i++)
1089 if (!(ar->usedkeys & BIT(i)))
1090 break;
1091 if (i == 64)
1092 goto err_softw;
1093 }
1094
1095 key->hw_key_idx = i;
1096
1097 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1098 ktype, 0, key->key,
1099 min_t(u8, 16, key->keylen));
1100 if (err)
1101 goto out;
1102
1103 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1104 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1105 NULL, ktype, 1,
1106 key->key + 16, 16);
1107 if (err)
1108 goto out;
1109
1110 /*
1111 * hardware is not capable generating MMIC
1112 * of fragmented frames!
1113 */
1114 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1115 }
1116
1117 if (i < 64)
1118 ar->usedkeys |= BIT(i);
1119
1120 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1121 } else {
1122 if (!IS_STARTED(ar)) {
1123 /* The device is gone... together with the key ;-) */
1124 err = 0;
1125 goto out;
1126 }
1127
1128 if (key->hw_key_idx < 64) {
1129 ar->usedkeys &= ~BIT(key->hw_key_idx);
1130 } else {
1131 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1132 AR9170_ENC_ALG_NONE, 0,
1133 NULL, 0);
1134 if (err)
1135 goto out;
1136
1137 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1138 err = carl9170_upload_key(ar, key->hw_key_idx,
1139 NULL,
1140 AR9170_ENC_ALG_NONE,
1141 1, NULL, 0);
1142 if (err)
1143 goto out;
1144 }
1145
1146 }
1147
1148 err = carl9170_disable_key(ar, key->hw_key_idx);
1149 if (err)
1150 goto out;
1151 }
1152
1153 out:
1154 mutex_unlock(&ar->mutex);
1155 return err;
1156
1157 err_softw:
1158 if (!ar->rx_software_decryption) {
1159 ar->rx_software_decryption = true;
1160 carl9170_set_operating_mode(ar);
1161 }
1162 mutex_unlock(&ar->mutex);
1163 return -ENOSPC;
1164 }
1165
1166 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1167 struct ieee80211_vif *vif,
1168 struct ieee80211_sta *sta)
1169 {
1170 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1171 unsigned int i;
1172
1173 if (sta->ht_cap.ht_supported) {
1174 if (sta->ht_cap.ampdu_density > 6) {
1175 /*
1176 * HW does support 16us AMPDU density.
1177 * No HT-Xmit for station.
1178 */
1179
1180 return 0;
1181 }
1182
1183 for (i = 0; i < CARL9170_NUM_TID; i++)
1184 rcu_assign_pointer(sta_info->agg[i], NULL);
1185
1186 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1187 sta_info->ht_sta = true;
1188 }
1189
1190 return 0;
1191 }
1192
1193 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1194 struct ieee80211_vif *vif,
1195 struct ieee80211_sta *sta)
1196 {
1197 struct ar9170 *ar = hw->priv;
1198 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1199 unsigned int i;
1200 bool cleanup = false;
1201
1202 if (sta->ht_cap.ht_supported) {
1203
1204 sta_info->ht_sta = false;
1205
1206 rcu_read_lock();
1207 for (i = 0; i < CARL9170_NUM_TID; i++) {
1208 struct carl9170_sta_tid *tid_info;
1209
1210 tid_info = rcu_dereference(sta_info->agg[i]);
1211 rcu_assign_pointer(sta_info->agg[i], NULL);
1212
1213 if (!tid_info)
1214 continue;
1215
1216 spin_lock_bh(&ar->tx_ampdu_list_lock);
1217 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1218 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1219 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1220 cleanup = true;
1221 }
1222 rcu_read_unlock();
1223
1224 if (cleanup)
1225 carl9170_ampdu_gc(ar);
1226 }
1227
1228 return 0;
1229 }
1230
1231 static int carl9170_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1232 const struct ieee80211_tx_queue_params *param)
1233 {
1234 struct ar9170 *ar = hw->priv;
1235 int ret;
1236
1237 mutex_lock(&ar->mutex);
1238 if (queue < ar->hw->queues) {
1239 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1240 ret = carl9170_set_qos(ar);
1241 } else {
1242 ret = -EINVAL;
1243 }
1244
1245 mutex_unlock(&ar->mutex);
1246 return ret;
1247 }
1248
1249 static void carl9170_ampdu_work(struct work_struct *work)
1250 {
1251 struct ar9170 *ar = container_of(work, struct ar9170,
1252 ampdu_work);
1253
1254 if (!IS_STARTED(ar))
1255 return;
1256
1257 mutex_lock(&ar->mutex);
1258 carl9170_ampdu_gc(ar);
1259 mutex_unlock(&ar->mutex);
1260 }
1261
1262 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1263 struct ieee80211_vif *vif,
1264 enum ieee80211_ampdu_mlme_action action,
1265 struct ieee80211_sta *sta,
1266 u16 tid, u16 *ssn)
1267 {
1268 struct ar9170 *ar = hw->priv;
1269 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1270 struct carl9170_sta_tid *tid_info;
1271
1272 if (modparam_noht)
1273 return -EOPNOTSUPP;
1274
1275 switch (action) {
1276 case IEEE80211_AMPDU_TX_START:
1277 if (!sta_info->ht_sta)
1278 return -EOPNOTSUPP;
1279
1280 rcu_read_lock();
1281 if (rcu_dereference(sta_info->agg[tid])) {
1282 rcu_read_unlock();
1283 return -EBUSY;
1284 }
1285
1286 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1287 GFP_ATOMIC);
1288 if (!tid_info) {
1289 rcu_read_unlock();
1290 return -ENOMEM;
1291 }
1292
1293 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1294 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1295 tid_info->tid = tid;
1296 tid_info->max = sta_info->ampdu_max_len;
1297
1298 INIT_LIST_HEAD(&tid_info->list);
1299 INIT_LIST_HEAD(&tid_info->tmp_list);
1300 skb_queue_head_init(&tid_info->queue);
1301 spin_lock_init(&tid_info->lock);
1302
1303 spin_lock_bh(&ar->tx_ampdu_list_lock);
1304 ar->tx_ampdu_list_len++;
1305 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1306 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1307 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1308 rcu_read_unlock();
1309
1310 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1311 break;
1312
1313 case IEEE80211_AMPDU_TX_STOP:
1314 rcu_read_lock();
1315 tid_info = rcu_dereference(sta_info->agg[tid]);
1316 if (tid_info) {
1317 spin_lock_bh(&ar->tx_ampdu_list_lock);
1318 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1319 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1320 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1321 }
1322
1323 rcu_assign_pointer(sta_info->agg[tid], NULL);
1324 rcu_read_unlock();
1325
1326 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1327 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1328 break;
1329
1330 case IEEE80211_AMPDU_TX_OPERATIONAL:
1331 rcu_read_lock();
1332 tid_info = rcu_dereference(sta_info->agg[tid]);
1333
1334 sta_info->stats[tid].clear = true;
1335
1336 if (tid_info) {
1337 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1338 tid_info->state = CARL9170_TID_STATE_IDLE;
1339 }
1340 rcu_read_unlock();
1341
1342 if (WARN_ON_ONCE(!tid_info))
1343 return -EFAULT;
1344
1345 break;
1346
1347 case IEEE80211_AMPDU_RX_START:
1348 case IEEE80211_AMPDU_RX_STOP:
1349 /* Handled by hardware */
1350 break;
1351
1352 default:
1353 return -EOPNOTSUPP;
1354 }
1355
1356 return 0;
1357 }
1358
1359 #ifdef CONFIG_CARL9170_WPC
1360 static int carl9170_register_wps_button(struct ar9170 *ar)
1361 {
1362 struct input_dev *input;
1363 int err;
1364
1365 if (!(ar->features & CARL9170_WPS_BUTTON))
1366 return 0;
1367
1368 input = input_allocate_device();
1369 if (!input)
1370 return -ENOMEM;
1371
1372 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1373 wiphy_name(ar->hw->wiphy));
1374
1375 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1376 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1377
1378 input->name = ar->wps.name;
1379 input->phys = ar->wps.phys;
1380 input->id.bustype = BUS_USB;
1381 input->dev.parent = &ar->hw->wiphy->dev;
1382
1383 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1384
1385 err = input_register_device(input);
1386 if (err) {
1387 input_free_device(input);
1388 return err;
1389 }
1390
1391 ar->wps.pbc = input;
1392 return 0;
1393 }
1394 #endif /* CONFIG_CARL9170_WPC */
1395
1396 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1397 struct survey_info *survey)
1398 {
1399 struct ar9170 *ar = hw->priv;
1400 int err;
1401
1402 if (idx != 0)
1403 return -ENOENT;
1404
1405 mutex_lock(&ar->mutex);
1406 err = carl9170_get_noisefloor(ar);
1407 mutex_unlock(&ar->mutex);
1408 if (err)
1409 return err;
1410
1411 survey->channel = ar->channel;
1412 survey->filled = SURVEY_INFO_NOISE_DBM;
1413 survey->noise = ar->noise[0];
1414 return 0;
1415 }
1416
1417 static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1418 {
1419 struct ar9170 *ar = hw->priv;
1420 unsigned int vid;
1421
1422 mutex_lock(&ar->mutex);
1423 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1424 carl9170_flush_cab(ar, vid);
1425
1426 carl9170_flush(ar, drop);
1427 mutex_unlock(&ar->mutex);
1428 }
1429
1430 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1431 struct ieee80211_low_level_stats *stats)
1432 {
1433 struct ar9170 *ar = hw->priv;
1434
1435 memset(stats, 0, sizeof(*stats));
1436 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1437 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1438 return 0;
1439 }
1440
1441 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1442 struct ieee80211_vif *vif,
1443 enum sta_notify_cmd cmd,
1444 struct ieee80211_sta *sta)
1445 {
1446 struct ar9170 *ar = hw->priv;
1447 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1448 struct sk_buff *skb, *tmp;
1449 struct sk_buff_head free;
1450 int i;
1451
1452 switch (cmd) {
1453 case STA_NOTIFY_SLEEP:
1454 /*
1455 * Since the peer is no longer listening, we have to return
1456 * as many SKBs as possible back to the mac80211 stack.
1457 * It will deal with the retry procedure, once the peer
1458 * has become available again.
1459 *
1460 * NB: Ideally, the driver should return the all frames in
1461 * the correct, ascending order. However, I think that this
1462 * functionality should be implemented in the stack and not
1463 * here...
1464 */
1465
1466 __skb_queue_head_init(&free);
1467
1468 if (sta->ht_cap.ht_supported) {
1469 rcu_read_lock();
1470 for (i = 0; i < CARL9170_NUM_TID; i++) {
1471 struct carl9170_sta_tid *tid_info;
1472
1473 tid_info = rcu_dereference(sta_info->agg[i]);
1474
1475 if (!tid_info)
1476 continue;
1477
1478 spin_lock_bh(&ar->tx_ampdu_list_lock);
1479 if (tid_info->state >
1480 CARL9170_TID_STATE_SUSPEND)
1481 tid_info->state =
1482 CARL9170_TID_STATE_SUSPEND;
1483 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1484
1485 spin_lock_bh(&tid_info->lock);
1486 while ((skb = __skb_dequeue(&tid_info->queue)))
1487 __skb_queue_tail(&free, skb);
1488 spin_unlock_bh(&tid_info->lock);
1489 }
1490 rcu_read_unlock();
1491 }
1492
1493 for (i = 0; i < ar->hw->queues; i++) {
1494 spin_lock_bh(&ar->tx_pending[i].lock);
1495 skb_queue_walk_safe(&ar->tx_pending[i], skb, tmp) {
1496 struct _carl9170_tx_superframe *super;
1497 struct ieee80211_hdr *hdr;
1498 struct ieee80211_tx_info *info;
1499
1500 super = (void *) skb->data;
1501 hdr = (void *) super->frame_data;
1502
1503 if (compare_ether_addr(hdr->addr1, sta->addr))
1504 continue;
1505
1506 __skb_unlink(skb, &ar->tx_pending[i]);
1507
1508 info = IEEE80211_SKB_CB(skb);
1509 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1510 atomic_dec(&ar->tx_ampdu_upload);
1511
1512 carl9170_tx_status(ar, skb, false);
1513 }
1514 spin_unlock_bh(&ar->tx_pending[i].lock);
1515 }
1516
1517 while ((skb = __skb_dequeue(&free)))
1518 carl9170_tx_status(ar, skb, false);
1519
1520 break;
1521
1522 case STA_NOTIFY_AWAKE:
1523 if (!sta->ht_cap.ht_supported)
1524 return;
1525
1526 rcu_read_lock();
1527 for (i = 0; i < CARL9170_NUM_TID; i++) {
1528 struct carl9170_sta_tid *tid_info;
1529
1530 tid_info = rcu_dereference(sta_info->agg[i]);
1531
1532 if (!tid_info)
1533 continue;
1534
1535 if ((tid_info->state == CARL9170_TID_STATE_SUSPEND))
1536 tid_info->state = CARL9170_TID_STATE_IDLE;
1537 }
1538 rcu_read_unlock();
1539 break;
1540 }
1541 }
1542
1543 static const struct ieee80211_ops carl9170_ops = {
1544 .start = carl9170_op_start,
1545 .stop = carl9170_op_stop,
1546 .tx = carl9170_op_tx,
1547 .flush = carl9170_op_flush,
1548 .add_interface = carl9170_op_add_interface,
1549 .remove_interface = carl9170_op_remove_interface,
1550 .config = carl9170_op_config,
1551 .prepare_multicast = carl9170_op_prepare_multicast,
1552 .configure_filter = carl9170_op_configure_filter,
1553 .conf_tx = carl9170_op_conf_tx,
1554 .bss_info_changed = carl9170_op_bss_info_changed,
1555 .get_tsf = carl9170_op_get_tsf,
1556 .set_key = carl9170_op_set_key,
1557 .sta_add = carl9170_op_sta_add,
1558 .sta_remove = carl9170_op_sta_remove,
1559 .sta_notify = carl9170_op_sta_notify,
1560 .get_survey = carl9170_op_get_survey,
1561 .get_stats = carl9170_op_get_stats,
1562 .ampdu_action = carl9170_op_ampdu_action,
1563 };
1564
1565 void *carl9170_alloc(size_t priv_size)
1566 {
1567 struct ieee80211_hw *hw;
1568 struct ar9170 *ar;
1569 struct sk_buff *skb;
1570 int i;
1571
1572 /*
1573 * this buffer is used for rx stream reconstruction.
1574 * Under heavy load this device (or the transport layer?)
1575 * tends to split the streams into separate rx descriptors.
1576 */
1577
1578 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1579 if (!skb)
1580 goto err_nomem;
1581
1582 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1583 if (!hw)
1584 goto err_nomem;
1585
1586 ar = hw->priv;
1587 ar->hw = hw;
1588 ar->rx_failover = skb;
1589
1590 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1591 ar->rx_has_plcp = false;
1592
1593 /*
1594 * Here's a hidden pitfall!
1595 *
1596 * All 4 AC queues work perfectly well under _legacy_ operation.
1597 * However as soon as aggregation is enabled, the traffic flow
1598 * gets very bumpy. Therefore we have to _switch_ to a
1599 * software AC with a single HW queue.
1600 */
1601 hw->queues = __AR9170_NUM_TXQ;
1602
1603 mutex_init(&ar->mutex);
1604 spin_lock_init(&ar->beacon_lock);
1605 spin_lock_init(&ar->cmd_lock);
1606 spin_lock_init(&ar->tx_stats_lock);
1607 spin_lock_init(&ar->tx_ampdu_list_lock);
1608 spin_lock_init(&ar->mem_lock);
1609 spin_lock_init(&ar->state_lock);
1610 atomic_set(&ar->pending_restarts, 0);
1611 ar->vifs = 0;
1612 for (i = 0; i < ar->hw->queues; i++) {
1613 skb_queue_head_init(&ar->tx_status[i]);
1614 skb_queue_head_init(&ar->tx_pending[i]);
1615 }
1616 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1617 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1618 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1619 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1620 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1621 rcu_assign_pointer(ar->tx_ampdu_iter,
1622 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1623
1624 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1625 INIT_LIST_HEAD(&ar->vif_list);
1626 init_completion(&ar->tx_flush);
1627
1628 /*
1629 * Note:
1630 * IBSS/ADHOC and AP mode are only enabled, if the firmware
1631 * supports these modes. The code which will add the
1632 * additional interface_modes is in fw.c.
1633 */
1634 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1635
1636 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1637 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1638 IEEE80211_HW_SUPPORTS_PS |
1639 IEEE80211_HW_PS_NULLFUNC_STACK |
1640 IEEE80211_HW_SIGNAL_DBM;
1641
1642 if (!modparam_noht) {
1643 /*
1644 * see the comment above, why we allow the user
1645 * to disable HT by a module parameter.
1646 */
1647 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1648 }
1649
1650 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1651 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1652 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1653
1654 hw->max_rates = CARL9170_TX_MAX_RATES;
1655 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1656
1657 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1658 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1659
1660 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1661 return ar;
1662
1663 err_nomem:
1664 kfree_skb(skb);
1665 return ERR_PTR(-ENOMEM);
1666 }
1667
1668 static int carl9170_read_eeprom(struct ar9170 *ar)
1669 {
1670 #define RW 8 /* number of words to read at once */
1671 #define RB (sizeof(u32) * RW)
1672 u8 *eeprom = (void *)&ar->eeprom;
1673 __le32 offsets[RW];
1674 int i, j, err;
1675
1676 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1677
1678 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1679 #ifndef __CHECKER__
1680 /* don't want to handle trailing remains */
1681 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1682 #endif
1683
1684 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
1685 for (j = 0; j < RW; j++)
1686 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1687 RB * i + 4 * j);
1688
1689 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1690 RB, (u8 *) &offsets,
1691 RB, eeprom + RB * i);
1692 if (err)
1693 return err;
1694 }
1695
1696 #undef RW
1697 #undef RB
1698 return 0;
1699 }
1700
1701 static int carl9170_parse_eeprom(struct ar9170 *ar)
1702 {
1703 struct ath_regulatory *regulatory = &ar->common.regulatory;
1704 unsigned int rx_streams, tx_streams, tx_params = 0;
1705 int bands = 0;
1706
1707 if (ar->eeprom.length == cpu_to_le16(0xffff))
1708 return -ENODATA;
1709
1710 rx_streams = hweight8(ar->eeprom.rx_mask);
1711 tx_streams = hweight8(ar->eeprom.tx_mask);
1712
1713 if (rx_streams != tx_streams) {
1714 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1715
1716 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1717 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1718
1719 tx_params = (tx_streams - 1) <<
1720 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1721
1722 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1723 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1724 }
1725
1726 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1727 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1728 &carl9170_band_2GHz;
1729 bands++;
1730 }
1731 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1732 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1733 &carl9170_band_5GHz;
1734 bands++;
1735 }
1736
1737 /*
1738 * I measured this, a bandswitch takes roughly
1739 * 135 ms and a frequency switch about 80.
1740 *
1741 * FIXME: measure these values again once EEPROM settings
1742 * are used, that will influence them!
1743 */
1744 if (bands == 2)
1745 ar->hw->channel_change_time = 135 * 1000;
1746 else
1747 ar->hw->channel_change_time = 80 * 1000;
1748
1749 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1750 regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
1751
1752 /* second part of wiphy init */
1753 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1754
1755 return bands ? 0 : -EINVAL;
1756 }
1757
1758 static int carl9170_reg_notifier(struct wiphy *wiphy,
1759 struct regulatory_request *request)
1760 {
1761 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1762 struct ar9170 *ar = hw->priv;
1763
1764 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1765 }
1766
1767 int carl9170_register(struct ar9170 *ar)
1768 {
1769 struct ath_regulatory *regulatory = &ar->common.regulatory;
1770 int err = 0, i;
1771
1772 if (WARN_ON(ar->mem_bitmap))
1773 return -EINVAL;
1774
1775 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1776 sizeof(unsigned long), GFP_KERNEL);
1777
1778 if (!ar->mem_bitmap)
1779 return -ENOMEM;
1780
1781 /* try to read EEPROM, init MAC addr */
1782 err = carl9170_read_eeprom(ar);
1783 if (err)
1784 return err;
1785
1786 err = carl9170_fw_fix_eeprom(ar);
1787 if (err)
1788 return err;
1789
1790 err = carl9170_parse_eeprom(ar);
1791 if (err)
1792 return err;
1793
1794 err = ath_regd_init(regulatory, ar->hw->wiphy,
1795 carl9170_reg_notifier);
1796 if (err)
1797 return err;
1798
1799 if (modparam_noht) {
1800 carl9170_band_2GHz.ht_cap.ht_supported = false;
1801 carl9170_band_5GHz.ht_cap.ht_supported = false;
1802 }
1803
1804 for (i = 0; i < ar->fw.vif_num; i++) {
1805 ar->vif_priv[i].id = i;
1806 ar->vif_priv[i].vif = NULL;
1807 }
1808
1809 err = ieee80211_register_hw(ar->hw);
1810 if (err)
1811 return err;
1812
1813 /* mac80211 interface is now registered */
1814 ar->registered = true;
1815
1816 if (!ath_is_world_regd(regulatory))
1817 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1818
1819 #ifdef CONFIG_CARL9170_DEBUGFS
1820 carl9170_debugfs_register(ar);
1821 #endif /* CONFIG_CARL9170_DEBUGFS */
1822
1823 err = carl9170_led_init(ar);
1824 if (err)
1825 goto err_unreg;
1826
1827 #ifdef CONFIG_CARL9170_LEDS
1828 err = carl9170_led_register(ar);
1829 if (err)
1830 goto err_unreg;
1831 #endif /* CONFIG_CAR9L170_LEDS */
1832
1833 #ifdef CONFIG_CARL9170_WPC
1834 err = carl9170_register_wps_button(ar);
1835 if (err)
1836 goto err_unreg;
1837 #endif /* CONFIG_CARL9170_WPC */
1838
1839 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
1840 wiphy_name(ar->hw->wiphy));
1841
1842 return 0;
1843
1844 err_unreg:
1845 carl9170_unregister(ar);
1846 return err;
1847 }
1848
1849 void carl9170_unregister(struct ar9170 *ar)
1850 {
1851 if (!ar->registered)
1852 return;
1853
1854 ar->registered = false;
1855
1856 #ifdef CONFIG_CARL9170_LEDS
1857 carl9170_led_unregister(ar);
1858 #endif /* CONFIG_CARL9170_LEDS */
1859
1860 #ifdef CONFIG_CARL9170_DEBUGFS
1861 carl9170_debugfs_unregister(ar);
1862 #endif /* CONFIG_CARL9170_DEBUGFS */
1863
1864 #ifdef CONFIG_CARL9170_WPC
1865 if (ar->wps.pbc) {
1866 input_unregister_device(ar->wps.pbc);
1867 ar->wps.pbc = NULL;
1868 }
1869 #endif /* CONFIG_CARL9170_WPC */
1870
1871 carl9170_cancel_worker(ar);
1872 cancel_work_sync(&ar->restart_work);
1873
1874 ieee80211_unregister_hw(ar->hw);
1875 }
1876
1877 void carl9170_free(struct ar9170 *ar)
1878 {
1879 WARN_ON(ar->registered);
1880 WARN_ON(IS_INITIALIZED(ar));
1881
1882 kfree_skb(ar->rx_failover);
1883 ar->rx_failover = NULL;
1884
1885 kfree(ar->mem_bitmap);
1886 ar->mem_bitmap = NULL;
1887
1888 mutex_destroy(&ar->mutex);
1889
1890 ieee80211_free_hw(ar->hw);
1891 }