mac80211: implement support for 4-address frames for AP and client mode
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ath / ath9k / virtual.c
CommitLineData
8ca21f01
JM
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19struct ath9k_vif_iter_data {
20 int count;
21 u8 *addr;
22};
23
24static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
25{
26 struct ath9k_vif_iter_data *iter_data = data;
27 u8 *nbuf;
28
29 nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN,
30 GFP_ATOMIC);
31 if (nbuf == NULL)
32 return;
33
34 memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN);
35 iter_data->addr = nbuf;
36 iter_data->count++;
37}
38
39void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
40{
bce048d7
JM
41 struct ath_wiphy *aphy = hw->priv;
42 struct ath_softc *sc = aphy->sc;
1510718d 43 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
8ca21f01
JM
44 struct ath9k_vif_iter_data iter_data;
45 int i, j;
46 u8 mask[ETH_ALEN];
47
48 /*
49 * Add primary MAC address even if it is not in active use since it
50 * will be configured to the hardware as the starting point and the
51 * BSSID mask will need to be changed if another address is active.
52 */
53 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC);
54 if (iter_data.addr) {
1510718d 55 memcpy(iter_data.addr, common->macaddr, ETH_ALEN);
8ca21f01
JM
56 iter_data.count = 1;
57 } else
58 iter_data.count = 0;
59
60 /* Get list of all active MAC addresses */
c52f33d0
JM
61 spin_lock_bh(&sc->wiphy_lock);
62 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
8ca21f01 63 &iter_data);
c52f33d0
JM
64 for (i = 0; i < sc->num_sec_wiphy; i++) {
65 if (sc->sec_wiphy[i] == NULL)
66 continue;
67 ieee80211_iterate_active_interfaces_atomic(
68 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
69 }
70 spin_unlock_bh(&sc->wiphy_lock);
8ca21f01
JM
71
72 /* Generate an address mask to cover all active addresses */
73 memset(mask, 0, ETH_ALEN);
74 for (i = 0; i < iter_data.count; i++) {
75 u8 *a1 = iter_data.addr + i * ETH_ALEN;
76 for (j = i + 1; j < iter_data.count; j++) {
77 u8 *a2 = iter_data.addr + j * ETH_ALEN;
78 mask[0] |= a1[0] ^ a2[0];
79 mask[1] |= a1[1] ^ a2[1];
80 mask[2] |= a1[2] ^ a2[2];
81 mask[3] |= a1[3] ^ a2[3];
82 mask[4] |= a1[4] ^ a2[4];
83 mask[5] |= a1[5] ^ a2[5];
84 }
85 }
86
87 kfree(iter_data.addr);
88
89 /* Invert the mask and configure hardware */
1510718d
LR
90 common->bssidmask[0] = ~mask[0];
91 common->bssidmask[1] = ~mask[1];
92 common->bssidmask[2] = ~mask[2];
93 common->bssidmask[3] = ~mask[3];
94 common->bssidmask[4] = ~mask[4];
95 common->bssidmask[5] = ~mask[5];
8ca21f01 96
13b81559 97 ath_hw_setbssidmask(common);
8ca21f01 98}
c52f33d0
JM
99
100int ath9k_wiphy_add(struct ath_softc *sc)
101{
102 int i, error;
103 struct ath_wiphy *aphy;
1510718d 104 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
c52f33d0
JM
105 struct ieee80211_hw *hw;
106 u8 addr[ETH_ALEN];
107
108 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
109 if (hw == NULL)
110 return -ENOMEM;
111
112 spin_lock_bh(&sc->wiphy_lock);
113 for (i = 0; i < sc->num_sec_wiphy; i++) {
114 if (sc->sec_wiphy[i] == NULL)
115 break;
116 }
117
118 if (i == sc->num_sec_wiphy) {
119 /* No empty slot available; increase array length */
120 struct ath_wiphy **n;
121 n = krealloc(sc->sec_wiphy,
122 (sc->num_sec_wiphy + 1) *
123 sizeof(struct ath_wiphy *),
124 GFP_ATOMIC);
125 if (n == NULL) {
126 spin_unlock_bh(&sc->wiphy_lock);
127 ieee80211_free_hw(hw);
128 return -ENOMEM;
129 }
130 n[i] = NULL;
131 sc->sec_wiphy = n;
132 sc->num_sec_wiphy++;
133 }
134
135 SET_IEEE80211_DEV(hw, sc->dev);
136
137 aphy = hw->priv;
138 aphy->sc = sc;
139 aphy->hw = hw;
140 sc->sec_wiphy[i] = aphy;
141 spin_unlock_bh(&sc->wiphy_lock);
142
1510718d 143 memcpy(addr, common->macaddr, ETH_ALEN);
c52f33d0
JM
144 addr[0] |= 0x02; /* Locally managed address */
145 /*
146 * XOR virtual wiphy index into the least significant bits to generate
147 * a different MAC address for each virtual wiphy.
148 */
149 addr[5] ^= i & 0xff;
150 addr[4] ^= (i & 0xff00) >> 8;
151 addr[3] ^= (i & 0xff0000) >> 16;
152
153 SET_IEEE80211_PERM_ADDR(hw, addr);
154
155 ath_set_hw_capab(sc, hw);
156
157 error = ieee80211_register_hw(hw);
158
f98c3bd2
JM
159 if (error == 0) {
160 /* Make sure wiphy scheduler is started (if enabled) */
161 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
162 }
163
c52f33d0
JM
164 return error;
165}
166
167int ath9k_wiphy_del(struct ath_wiphy *aphy)
168{
169 struct ath_softc *sc = aphy->sc;
170 int i;
171
172 spin_lock_bh(&sc->wiphy_lock);
173 for (i = 0; i < sc->num_sec_wiphy; i++) {
174 if (aphy == sc->sec_wiphy[i]) {
175 sc->sec_wiphy[i] = NULL;
176 spin_unlock_bh(&sc->wiphy_lock);
177 ieee80211_unregister_hw(aphy->hw);
178 ieee80211_free_hw(aphy->hw);
179 return 0;
180 }
181 }
182 spin_unlock_bh(&sc->wiphy_lock);
183 return -ENOENT;
184}
f0ed85c6
JM
185
186static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
187 struct ieee80211_vif *vif, const u8 *bssid,
188 int ps)
189{
190 struct ath_softc *sc = aphy->sc;
191 struct ath_tx_control txctl;
192 struct sk_buff *skb;
193 struct ieee80211_hdr *hdr;
194 __le16 fc;
195 struct ieee80211_tx_info *info;
196
197 skb = dev_alloc_skb(24);
198 if (skb == NULL)
199 return -ENOMEM;
200 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
201 memset(hdr, 0, 24);
202 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
203 IEEE80211_FCTL_TODS);
204 if (ps)
205 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
206 hdr->frame_control = fc;
207 memcpy(hdr->addr1, bssid, ETH_ALEN);
208 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
209 memcpy(hdr->addr3, bssid, ETH_ALEN);
210
211 info = IEEE80211_SKB_CB(skb);
212 memset(info, 0, sizeof(*info));
213 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
214 info->control.vif = vif;
215 info->control.rates[0].idx = 0;
216 info->control.rates[0].count = 4;
217 info->control.rates[1].idx = -1;
218
219 memset(&txctl, 0, sizeof(struct ath_tx_control));
220 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
221 txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
222
223 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
224 goto exit;
225
226 return 0;
227exit:
228 dev_kfree_skb_any(skb);
229 return -1;
230}
231
0e2dedf9
JM
232static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
233{
234 int i;
235 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
236 return true;
237 for (i = 0; i < sc->num_sec_wiphy; i++) {
238 if (sc->sec_wiphy[i] &&
239 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
240 return true;
241 }
242 return false;
243}
244
245static bool ath9k_wiphy_pausing(struct ath_softc *sc)
246{
247 bool ret;
248 spin_lock_bh(&sc->wiphy_lock);
249 ret = __ath9k_wiphy_pausing(sc);
250 spin_unlock_bh(&sc->wiphy_lock);
251 return ret;
252}
253
8089cc47
JM
254static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
255{
256 int i;
257 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
258 return true;
259 for (i = 0; i < sc->num_sec_wiphy; i++) {
260 if (sc->sec_wiphy[i] &&
261 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
262 return true;
263 }
264 return false;
265}
266
267bool ath9k_wiphy_scanning(struct ath_softc *sc)
268{
269 bool ret;
270 spin_lock_bh(&sc->wiphy_lock);
271 ret = __ath9k_wiphy_scanning(sc);
272 spin_unlock_bh(&sc->wiphy_lock);
273 return ret;
274}
275
0e2dedf9
JM
276static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
277
278/* caller must hold wiphy_lock */
279static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
280{
281 if (aphy == NULL)
282 return;
283 if (aphy->chan_idx != aphy->sc->chan_idx)
284 return; /* wiphy not on the selected channel */
285 __ath9k_wiphy_unpause(aphy);
286}
287
288static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
289{
290 int i;
291 spin_lock_bh(&sc->wiphy_lock);
292 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
293 for (i = 0; i < sc->num_sec_wiphy; i++)
294 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
295 spin_unlock_bh(&sc->wiphy_lock);
296}
297
298void ath9k_wiphy_chan_work(struct work_struct *work)
299{
300 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
301 struct ath_wiphy *aphy = sc->next_wiphy;
302
303 if (aphy == NULL)
304 return;
305
306 /*
307 * All pending interfaces paused; ready to change
308 * channels.
309 */
310
311 /* Change channels */
312 mutex_lock(&sc->mutex);
313 /* XXX: remove me eventually */
314 ath9k_update_ichannel(sc, aphy->hw,
315 &sc->sc_ah->channels[sc->chan_idx]);
316 ath_update_chainmask(sc, sc->chan_is_ht);
317 if (ath_set_channel(sc, aphy->hw,
318 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
319 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
320 "virtual wiphy\n");
321 mutex_unlock(&sc->mutex);
322 return;
323 }
324 mutex_unlock(&sc->mutex);
325
326 ath9k_wiphy_unpause_channel(sc);
327}
328
f0ed85c6
JM
329/*
330 * ath9k version of ieee80211_tx_status() for TX frames that are generated
331 * internally in the driver.
332 */
333void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
334{
335 struct ath_wiphy *aphy = hw->priv;
336 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
337 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
338 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
339
340 if (tx_info_priv && tx_info_priv->frame_type == ATH9K_INT_PAUSE &&
341 aphy->state == ATH_WIPHY_PAUSING) {
342 if (!(info->flags & IEEE80211_TX_STAT_ACK)) {
343 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
344 "frame\n", wiphy_name(hw->wiphy));
345 /*
346 * The AP did not reply; ignore this to allow us to
347 * continue.
348 */
349 }
350 aphy->state = ATH_WIPHY_PAUSED;
0e2dedf9
JM
351 if (!ath9k_wiphy_pausing(aphy->sc)) {
352 /*
353 * Drop from tasklet to work to allow mutex for channel
354 * change.
355 */
42935eca 356 ieee80211_queue_work(aphy->sc->hw,
0e2dedf9
JM
357 &aphy->sc->chan_work);
358 }
f0ed85c6
JM
359 }
360
361 kfree(tx_info_priv);
362 tx_info->rate_driver_data[0] = NULL;
363
364 dev_kfree_skb(skb);
365}
366
0e2dedf9
JM
367static void ath9k_mark_paused(struct ath_wiphy *aphy)
368{
369 struct ath_softc *sc = aphy->sc;
370 aphy->state = ATH_WIPHY_PAUSED;
371 if (!__ath9k_wiphy_pausing(sc))
42935eca 372 ieee80211_queue_work(sc->hw, &sc->chan_work);
0e2dedf9
JM
373}
374
f0ed85c6
JM
375static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
376{
377 struct ath_wiphy *aphy = data;
378 struct ath_vif *avp = (void *) vif->drv_priv;
379
380 switch (vif->type) {
381 case NL80211_IFTYPE_STATION:
382 if (!vif->bss_conf.assoc) {
0e2dedf9 383 ath9k_mark_paused(aphy);
f0ed85c6
JM
384 break;
385 }
386 /* TODO: could avoid this if already in PS mode */
0e2dedf9
JM
387 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
388 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
389 __func__);
390 ath9k_mark_paused(aphy);
391 }
f0ed85c6
JM
392 break;
393 case NL80211_IFTYPE_AP:
394 /* Beacon transmission is paused by aphy->state change */
0e2dedf9 395 ath9k_mark_paused(aphy);
f0ed85c6
JM
396 break;
397 default:
398 break;
399 }
400}
401
402/* caller must hold wiphy_lock */
403static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
404{
405 ieee80211_stop_queues(aphy->hw);
406 aphy->state = ATH_WIPHY_PAUSING;
407 /*
408 * TODO: handle PAUSING->PAUSED for the case where there are multiple
409 * active vifs (now we do it on the first vif getting ready; should be
410 * on the last)
411 */
412 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
413 aphy);
414 return 0;
415}
416
417int ath9k_wiphy_pause(struct ath_wiphy *aphy)
418{
419 int ret;
420 spin_lock_bh(&aphy->sc->wiphy_lock);
421 ret = __ath9k_wiphy_pause(aphy);
422 spin_unlock_bh(&aphy->sc->wiphy_lock);
423 return ret;
424}
425
426static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
427{
428 struct ath_wiphy *aphy = data;
429 struct ath_vif *avp = (void *) vif->drv_priv;
430
431 switch (vif->type) {
432 case NL80211_IFTYPE_STATION:
433 if (!vif->bss_conf.assoc)
434 break;
435 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
436 break;
437 case NL80211_IFTYPE_AP:
438 /* Beacon transmission is re-enabled by aphy->state change */
439 break;
440 default:
441 break;
442 }
443}
444
445/* caller must hold wiphy_lock */
446static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
447{
448 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
449 ath9k_unpause_iter, aphy);
450 aphy->state = ATH_WIPHY_ACTIVE;
451 ieee80211_wake_queues(aphy->hw);
452 return 0;
453}
454
455int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
456{
457 int ret;
458 spin_lock_bh(&aphy->sc->wiphy_lock);
459 ret = __ath9k_wiphy_unpause(aphy);
460 spin_unlock_bh(&aphy->sc->wiphy_lock);
461 return ret;
462}
0e2dedf9 463
7ec3e514
JM
464static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
465{
466 int i;
467 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
468 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
469 for (i = 0; i < sc->num_sec_wiphy; i++) {
470 if (sc->sec_wiphy[i] &&
471 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
472 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
473 }
474}
475
0e2dedf9
JM
476/* caller must hold wiphy_lock */
477static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
478{
479 int i;
480 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
481 __ath9k_wiphy_pause(sc->pri_wiphy);
482 for (i = 0; i < sc->num_sec_wiphy; i++) {
483 if (sc->sec_wiphy[i] &&
484 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
485 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
486 }
487}
488
489int ath9k_wiphy_select(struct ath_wiphy *aphy)
490{
491 struct ath_softc *sc = aphy->sc;
492 bool now;
493
494 spin_lock_bh(&sc->wiphy_lock);
8089cc47
JM
495 if (__ath9k_wiphy_scanning(sc)) {
496 /*
497 * For now, we are using mac80211 sw scan and it expects to
498 * have full control over channel changes, so avoid wiphy
499 * scheduling during a scan. This could be optimized if the
500 * scanning control were moved into the driver.
501 */
502 spin_unlock_bh(&sc->wiphy_lock);
503 return -EBUSY;
504 }
0e2dedf9 505 if (__ath9k_wiphy_pausing(sc)) {
7ec3e514
JM
506 if (sc->wiphy_select_failures == 0)
507 sc->wiphy_select_first_fail = jiffies;
508 sc->wiphy_select_failures++;
509 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
510 {
511 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
512 "out; disable/enable hw to recover\n");
513 __ath9k_wiphy_mark_all_paused(sc);
514 /*
515 * TODO: this workaround to fix hardware is unlikely to
516 * be specific to virtual wiphy changes. It can happen
517 * on normal channel change, too, and as such, this
518 * should really be made more generic. For example,
519 * tricker radio disable/enable on GTT interrupt burst
520 * (say, 10 GTT interrupts received without any TX
521 * frame being completed)
522 */
523 spin_unlock_bh(&sc->wiphy_lock);
524 ath_radio_disable(sc);
525 ath_radio_enable(sc);
42935eca 526 ieee80211_queue_work(aphy->sc->hw,
7ec3e514
JM
527 &aphy->sc->chan_work);
528 return -EBUSY; /* previous select still in progress */
529 }
0e2dedf9
JM
530 spin_unlock_bh(&sc->wiphy_lock);
531 return -EBUSY; /* previous select still in progress */
532 }
7ec3e514 533 sc->wiphy_select_failures = 0;
0e2dedf9
JM
534
535 /* Store the new channel */
536 sc->chan_idx = aphy->chan_idx;
537 sc->chan_is_ht = aphy->chan_is_ht;
538 sc->next_wiphy = aphy;
539
540 __ath9k_wiphy_pause_all(sc);
541 now = !__ath9k_wiphy_pausing(aphy->sc);
542 spin_unlock_bh(&sc->wiphy_lock);
543
544 if (now) {
545 /* Ready to request channel change immediately */
42935eca 546 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
0e2dedf9
JM
547 }
548
549 /*
550 * wiphys will be unpaused in ath9k_tx_status() once channel has been
551 * changed if any wiphy needs time to become paused.
552 */
553
554 return 0;
555}
9580a222
JM
556
557bool ath9k_wiphy_started(struct ath_softc *sc)
558{
559 int i;
560 spin_lock_bh(&sc->wiphy_lock);
561 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
562 spin_unlock_bh(&sc->wiphy_lock);
563 return true;
564 }
565 for (i = 0; i < sc->num_sec_wiphy; i++) {
566 if (sc->sec_wiphy[i] &&
567 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
568 spin_unlock_bh(&sc->wiphy_lock);
569 return true;
570 }
571 }
572 spin_unlock_bh(&sc->wiphy_lock);
573 return false;
574}
18eb62f8
JM
575
576static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
577 struct ath_wiphy *selected)
578{
8089cc47
JM
579 if (selected->state == ATH_WIPHY_SCAN) {
580 if (aphy == selected)
581 return;
582 /*
583 * Pause all other wiphys for the duration of the scan even if
584 * they are on the current channel now.
585 */
586 } else if (aphy->chan_idx == selected->chan_idx)
18eb62f8
JM
587 return;
588 aphy->state = ATH_WIPHY_PAUSED;
589 ieee80211_stop_queues(aphy->hw);
590}
591
592void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
593 struct ath_wiphy *selected)
594{
595 int i;
596 spin_lock_bh(&sc->wiphy_lock);
597 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
598 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
599 for (i = 0; i < sc->num_sec_wiphy; i++) {
600 if (sc->sec_wiphy[i] &&
601 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
602 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
603 }
604 spin_unlock_bh(&sc->wiphy_lock);
605}
f98c3bd2
JM
606
607void ath9k_wiphy_work(struct work_struct *work)
608{
609 struct ath_softc *sc = container_of(work, struct ath_softc,
610 wiphy_work.work);
611 struct ath_wiphy *aphy = NULL;
612 bool first = true;
613
614 spin_lock_bh(&sc->wiphy_lock);
615
616 if (sc->wiphy_scheduler_int == 0) {
617 /* wiphy scheduler is disabled */
618 spin_unlock_bh(&sc->wiphy_lock);
619 return;
620 }
621
622try_again:
623 sc->wiphy_scheduler_index++;
624 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
625 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
626 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
627 break;
628
629 sc->wiphy_scheduler_index++;
630 aphy = NULL;
631 }
632 if (aphy == NULL) {
633 sc->wiphy_scheduler_index = 0;
634 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
635 if (first) {
636 first = false;
637 goto try_again;
638 }
639 /* No wiphy is ready to be scheduled */
640 } else
641 aphy = sc->pri_wiphy;
642 }
643
644 spin_unlock_bh(&sc->wiphy_lock);
645
646 if (aphy &&
647 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
648 ath9k_wiphy_select(aphy)) {
649 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
650 "change\n");
651 }
652
42935eca
LR
653 ieee80211_queue_delayed_work(sc->hw,
654 &sc->wiphy_work,
655 sc->wiphy_scheduler_int);
f98c3bd2
JM
656}
657
658void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
659{
660 cancel_delayed_work_sync(&sc->wiphy_work);
661 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
662 if (sc->wiphy_scheduler_int)
42935eca
LR
663 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
664 sc->wiphy_scheduler_int);
f98c3bd2 665}
64839170
LR
666
667/* caller must hold wiphy_lock */
668bool ath9k_all_wiphys_idle(struct ath_softc *sc)
669{
670 unsigned int i;
671 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
672 return false;
673 }
674 for (i = 0; i < sc->num_sec_wiphy; i++) {
675 struct ath_wiphy *aphy = sc->sec_wiphy[i];
676 if (!aphy)
677 continue;
678 if (aphy->state != ATH_WIPHY_INACTIVE)
679 return false;
680 }
681 return true;
682}