at76c50x-usb: remove unneeded flush_workqueue() at usb disconnect
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ath / ath9k / virtual.c
CommitLineData
8ca21f01
JM
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19struct ath9k_vif_iter_data {
20 int count;
21 u8 *addr;
22};
23
24static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
25{
26 struct ath9k_vif_iter_data *iter_data = data;
27 u8 *nbuf;
28
29 nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN,
30 GFP_ATOMIC);
31 if (nbuf == NULL)
32 return;
33
34 memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN);
35 iter_data->addr = nbuf;
36 iter_data->count++;
37}
38
39void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
40{
bce048d7
JM
41 struct ath_wiphy *aphy = hw->priv;
42 struct ath_softc *sc = aphy->sc;
8ca21f01
JM
43 struct ath9k_vif_iter_data iter_data;
44 int i, j;
45 u8 mask[ETH_ALEN];
46
47 /*
48 * Add primary MAC address even if it is not in active use since it
49 * will be configured to the hardware as the starting point and the
50 * BSSID mask will need to be changed if another address is active.
51 */
52 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC);
53 if (iter_data.addr) {
54 memcpy(iter_data.addr, sc->sc_ah->macaddr, ETH_ALEN);
55 iter_data.count = 1;
56 } else
57 iter_data.count = 0;
58
59 /* Get list of all active MAC addresses */
c52f33d0
JM
60 spin_lock_bh(&sc->wiphy_lock);
61 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
8ca21f01 62 &iter_data);
c52f33d0
JM
63 for (i = 0; i < sc->num_sec_wiphy; i++) {
64 if (sc->sec_wiphy[i] == NULL)
65 continue;
66 ieee80211_iterate_active_interfaces_atomic(
67 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
68 }
69 spin_unlock_bh(&sc->wiphy_lock);
8ca21f01
JM
70
71 /* Generate an address mask to cover all active addresses */
72 memset(mask, 0, ETH_ALEN);
73 for (i = 0; i < iter_data.count; i++) {
74 u8 *a1 = iter_data.addr + i * ETH_ALEN;
75 for (j = i + 1; j < iter_data.count; j++) {
76 u8 *a2 = iter_data.addr + j * ETH_ALEN;
77 mask[0] |= a1[0] ^ a2[0];
78 mask[1] |= a1[1] ^ a2[1];
79 mask[2] |= a1[2] ^ a2[2];
80 mask[3] |= a1[3] ^ a2[3];
81 mask[4] |= a1[4] ^ a2[4];
82 mask[5] |= a1[5] ^ a2[5];
83 }
84 }
85
86 kfree(iter_data.addr);
87
88 /* Invert the mask and configure hardware */
89 sc->bssidmask[0] = ~mask[0];
90 sc->bssidmask[1] = ~mask[1];
91 sc->bssidmask[2] = ~mask[2];
92 sc->bssidmask[3] = ~mask[3];
93 sc->bssidmask[4] = ~mask[4];
94 sc->bssidmask[5] = ~mask[5];
95
96 ath9k_hw_setbssidmask(sc);
97}
c52f33d0
JM
98
99int ath9k_wiphy_add(struct ath_softc *sc)
100{
101 int i, error;
102 struct ath_wiphy *aphy;
103 struct ieee80211_hw *hw;
104 u8 addr[ETH_ALEN];
105
106 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
107 if (hw == NULL)
108 return -ENOMEM;
109
110 spin_lock_bh(&sc->wiphy_lock);
111 for (i = 0; i < sc->num_sec_wiphy; i++) {
112 if (sc->sec_wiphy[i] == NULL)
113 break;
114 }
115
116 if (i == sc->num_sec_wiphy) {
117 /* No empty slot available; increase array length */
118 struct ath_wiphy **n;
119 n = krealloc(sc->sec_wiphy,
120 (sc->num_sec_wiphy + 1) *
121 sizeof(struct ath_wiphy *),
122 GFP_ATOMIC);
123 if (n == NULL) {
124 spin_unlock_bh(&sc->wiphy_lock);
125 ieee80211_free_hw(hw);
126 return -ENOMEM;
127 }
128 n[i] = NULL;
129 sc->sec_wiphy = n;
130 sc->num_sec_wiphy++;
131 }
132
133 SET_IEEE80211_DEV(hw, sc->dev);
134
135 aphy = hw->priv;
136 aphy->sc = sc;
137 aphy->hw = hw;
138 sc->sec_wiphy[i] = aphy;
139 spin_unlock_bh(&sc->wiphy_lock);
140
141 memcpy(addr, sc->sc_ah->macaddr, ETH_ALEN);
142 addr[0] |= 0x02; /* Locally managed address */
143 /*
144 * XOR virtual wiphy index into the least significant bits to generate
145 * a different MAC address for each virtual wiphy.
146 */
147 addr[5] ^= i & 0xff;
148 addr[4] ^= (i & 0xff00) >> 8;
149 addr[3] ^= (i & 0xff0000) >> 16;
150
151 SET_IEEE80211_PERM_ADDR(hw, addr);
152
153 ath_set_hw_capab(sc, hw);
154
155 error = ieee80211_register_hw(hw);
156
f98c3bd2
JM
157 if (error == 0) {
158 /* Make sure wiphy scheduler is started (if enabled) */
159 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
160 }
161
c52f33d0
JM
162 return error;
163}
164
165int ath9k_wiphy_del(struct ath_wiphy *aphy)
166{
167 struct ath_softc *sc = aphy->sc;
168 int i;
169
170 spin_lock_bh(&sc->wiphy_lock);
171 for (i = 0; i < sc->num_sec_wiphy; i++) {
172 if (aphy == sc->sec_wiphy[i]) {
173 sc->sec_wiphy[i] = NULL;
174 spin_unlock_bh(&sc->wiphy_lock);
175 ieee80211_unregister_hw(aphy->hw);
176 ieee80211_free_hw(aphy->hw);
177 return 0;
178 }
179 }
180 spin_unlock_bh(&sc->wiphy_lock);
181 return -ENOENT;
182}
f0ed85c6
JM
183
184static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
185 struct ieee80211_vif *vif, const u8 *bssid,
186 int ps)
187{
188 struct ath_softc *sc = aphy->sc;
189 struct ath_tx_control txctl;
190 struct sk_buff *skb;
191 struct ieee80211_hdr *hdr;
192 __le16 fc;
193 struct ieee80211_tx_info *info;
194
195 skb = dev_alloc_skb(24);
196 if (skb == NULL)
197 return -ENOMEM;
198 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
199 memset(hdr, 0, 24);
200 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
201 IEEE80211_FCTL_TODS);
202 if (ps)
203 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
204 hdr->frame_control = fc;
205 memcpy(hdr->addr1, bssid, ETH_ALEN);
206 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
207 memcpy(hdr->addr3, bssid, ETH_ALEN);
208
209 info = IEEE80211_SKB_CB(skb);
210 memset(info, 0, sizeof(*info));
211 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
212 info->control.vif = vif;
213 info->control.rates[0].idx = 0;
214 info->control.rates[0].count = 4;
215 info->control.rates[1].idx = -1;
216
217 memset(&txctl, 0, sizeof(struct ath_tx_control));
218 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
219 txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
220
221 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
222 goto exit;
223
224 return 0;
225exit:
226 dev_kfree_skb_any(skb);
227 return -1;
228}
229
0e2dedf9
JM
230static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
231{
232 int i;
233 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
234 return true;
235 for (i = 0; i < sc->num_sec_wiphy; i++) {
236 if (sc->sec_wiphy[i] &&
237 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
238 return true;
239 }
240 return false;
241}
242
243static bool ath9k_wiphy_pausing(struct ath_softc *sc)
244{
245 bool ret;
246 spin_lock_bh(&sc->wiphy_lock);
247 ret = __ath9k_wiphy_pausing(sc);
248 spin_unlock_bh(&sc->wiphy_lock);
249 return ret;
250}
251
8089cc47
JM
252static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
253{
254 int i;
255 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
256 return true;
257 for (i = 0; i < sc->num_sec_wiphy; i++) {
258 if (sc->sec_wiphy[i] &&
259 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
260 return true;
261 }
262 return false;
263}
264
265bool ath9k_wiphy_scanning(struct ath_softc *sc)
266{
267 bool ret;
268 spin_lock_bh(&sc->wiphy_lock);
269 ret = __ath9k_wiphy_scanning(sc);
270 spin_unlock_bh(&sc->wiphy_lock);
271 return ret;
272}
273
0e2dedf9
JM
274static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
275
276/* caller must hold wiphy_lock */
277static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
278{
279 if (aphy == NULL)
280 return;
281 if (aphy->chan_idx != aphy->sc->chan_idx)
282 return; /* wiphy not on the selected channel */
283 __ath9k_wiphy_unpause(aphy);
284}
285
286static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
287{
288 int i;
289 spin_lock_bh(&sc->wiphy_lock);
290 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
291 for (i = 0; i < sc->num_sec_wiphy; i++)
292 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
293 spin_unlock_bh(&sc->wiphy_lock);
294}
295
296void ath9k_wiphy_chan_work(struct work_struct *work)
297{
298 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
299 struct ath_wiphy *aphy = sc->next_wiphy;
300
301 if (aphy == NULL)
302 return;
303
304 /*
305 * All pending interfaces paused; ready to change
306 * channels.
307 */
308
309 /* Change channels */
310 mutex_lock(&sc->mutex);
311 /* XXX: remove me eventually */
312 ath9k_update_ichannel(sc, aphy->hw,
313 &sc->sc_ah->channels[sc->chan_idx]);
314 ath_update_chainmask(sc, sc->chan_is_ht);
315 if (ath_set_channel(sc, aphy->hw,
316 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
317 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
318 "virtual wiphy\n");
319 mutex_unlock(&sc->mutex);
320 return;
321 }
322 mutex_unlock(&sc->mutex);
323
324 ath9k_wiphy_unpause_channel(sc);
325}
326
f0ed85c6
JM
327/*
328 * ath9k version of ieee80211_tx_status() for TX frames that are generated
329 * internally in the driver.
330 */
331void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
332{
333 struct ath_wiphy *aphy = hw->priv;
334 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
335 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
336 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
337
338 if (tx_info_priv && tx_info_priv->frame_type == ATH9K_INT_PAUSE &&
339 aphy->state == ATH_WIPHY_PAUSING) {
340 if (!(info->flags & IEEE80211_TX_STAT_ACK)) {
341 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
342 "frame\n", wiphy_name(hw->wiphy));
343 /*
344 * The AP did not reply; ignore this to allow us to
345 * continue.
346 */
347 }
348 aphy->state = ATH_WIPHY_PAUSED;
0e2dedf9
JM
349 if (!ath9k_wiphy_pausing(aphy->sc)) {
350 /*
351 * Drop from tasklet to work to allow mutex for channel
352 * change.
353 */
354 queue_work(aphy->sc->hw->workqueue,
355 &aphy->sc->chan_work);
356 }
f0ed85c6
JM
357 }
358
359 kfree(tx_info_priv);
360 tx_info->rate_driver_data[0] = NULL;
361
362 dev_kfree_skb(skb);
363}
364
0e2dedf9
JM
365static void ath9k_mark_paused(struct ath_wiphy *aphy)
366{
367 struct ath_softc *sc = aphy->sc;
368 aphy->state = ATH_WIPHY_PAUSED;
369 if (!__ath9k_wiphy_pausing(sc))
370 queue_work(sc->hw->workqueue, &sc->chan_work);
371}
372
f0ed85c6
JM
373static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
374{
375 struct ath_wiphy *aphy = data;
376 struct ath_vif *avp = (void *) vif->drv_priv;
377
378 switch (vif->type) {
379 case NL80211_IFTYPE_STATION:
380 if (!vif->bss_conf.assoc) {
0e2dedf9 381 ath9k_mark_paused(aphy);
f0ed85c6
JM
382 break;
383 }
384 /* TODO: could avoid this if already in PS mode */
0e2dedf9
JM
385 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
386 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
387 __func__);
388 ath9k_mark_paused(aphy);
389 }
f0ed85c6
JM
390 break;
391 case NL80211_IFTYPE_AP:
392 /* Beacon transmission is paused by aphy->state change */
0e2dedf9 393 ath9k_mark_paused(aphy);
f0ed85c6
JM
394 break;
395 default:
396 break;
397 }
398}
399
400/* caller must hold wiphy_lock */
401static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
402{
403 ieee80211_stop_queues(aphy->hw);
404 aphy->state = ATH_WIPHY_PAUSING;
405 /*
406 * TODO: handle PAUSING->PAUSED for the case where there are multiple
407 * active vifs (now we do it on the first vif getting ready; should be
408 * on the last)
409 */
410 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
411 aphy);
412 return 0;
413}
414
415int ath9k_wiphy_pause(struct ath_wiphy *aphy)
416{
417 int ret;
418 spin_lock_bh(&aphy->sc->wiphy_lock);
419 ret = __ath9k_wiphy_pause(aphy);
420 spin_unlock_bh(&aphy->sc->wiphy_lock);
421 return ret;
422}
423
424static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
425{
426 struct ath_wiphy *aphy = data;
427 struct ath_vif *avp = (void *) vif->drv_priv;
428
429 switch (vif->type) {
430 case NL80211_IFTYPE_STATION:
431 if (!vif->bss_conf.assoc)
432 break;
433 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
434 break;
435 case NL80211_IFTYPE_AP:
436 /* Beacon transmission is re-enabled by aphy->state change */
437 break;
438 default:
439 break;
440 }
441}
442
443/* caller must hold wiphy_lock */
444static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
445{
446 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
447 ath9k_unpause_iter, aphy);
448 aphy->state = ATH_WIPHY_ACTIVE;
449 ieee80211_wake_queues(aphy->hw);
450 return 0;
451}
452
453int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
454{
455 int ret;
456 spin_lock_bh(&aphy->sc->wiphy_lock);
457 ret = __ath9k_wiphy_unpause(aphy);
458 spin_unlock_bh(&aphy->sc->wiphy_lock);
459 return ret;
460}
0e2dedf9 461
7ec3e514
JM
462static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
463{
464 int i;
465 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
466 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
467 for (i = 0; i < sc->num_sec_wiphy; i++) {
468 if (sc->sec_wiphy[i] &&
469 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
470 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
471 }
472}
473
0e2dedf9
JM
474/* caller must hold wiphy_lock */
475static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
476{
477 int i;
478 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
479 __ath9k_wiphy_pause(sc->pri_wiphy);
480 for (i = 0; i < sc->num_sec_wiphy; i++) {
481 if (sc->sec_wiphy[i] &&
482 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
483 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
484 }
485}
486
487int ath9k_wiphy_select(struct ath_wiphy *aphy)
488{
489 struct ath_softc *sc = aphy->sc;
490 bool now;
491
492 spin_lock_bh(&sc->wiphy_lock);
8089cc47
JM
493 if (__ath9k_wiphy_scanning(sc)) {
494 /*
495 * For now, we are using mac80211 sw scan and it expects to
496 * have full control over channel changes, so avoid wiphy
497 * scheduling during a scan. This could be optimized if the
498 * scanning control were moved into the driver.
499 */
500 spin_unlock_bh(&sc->wiphy_lock);
501 return -EBUSY;
502 }
0e2dedf9 503 if (__ath9k_wiphy_pausing(sc)) {
7ec3e514
JM
504 if (sc->wiphy_select_failures == 0)
505 sc->wiphy_select_first_fail = jiffies;
506 sc->wiphy_select_failures++;
507 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
508 {
509 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
510 "out; disable/enable hw to recover\n");
511 __ath9k_wiphy_mark_all_paused(sc);
512 /*
513 * TODO: this workaround to fix hardware is unlikely to
514 * be specific to virtual wiphy changes. It can happen
515 * on normal channel change, too, and as such, this
516 * should really be made more generic. For example,
517 * tricker radio disable/enable on GTT interrupt burst
518 * (say, 10 GTT interrupts received without any TX
519 * frame being completed)
520 */
521 spin_unlock_bh(&sc->wiphy_lock);
522 ath_radio_disable(sc);
523 ath_radio_enable(sc);
524 queue_work(aphy->sc->hw->workqueue,
525 &aphy->sc->chan_work);
526 return -EBUSY; /* previous select still in progress */
527 }
0e2dedf9
JM
528 spin_unlock_bh(&sc->wiphy_lock);
529 return -EBUSY; /* previous select still in progress */
530 }
7ec3e514 531 sc->wiphy_select_failures = 0;
0e2dedf9
JM
532
533 /* Store the new channel */
534 sc->chan_idx = aphy->chan_idx;
535 sc->chan_is_ht = aphy->chan_is_ht;
536 sc->next_wiphy = aphy;
537
538 __ath9k_wiphy_pause_all(sc);
539 now = !__ath9k_wiphy_pausing(aphy->sc);
540 spin_unlock_bh(&sc->wiphy_lock);
541
542 if (now) {
543 /* Ready to request channel change immediately */
544 queue_work(aphy->sc->hw->workqueue, &aphy->sc->chan_work);
545 }
546
547 /*
548 * wiphys will be unpaused in ath9k_tx_status() once channel has been
549 * changed if any wiphy needs time to become paused.
550 */
551
552 return 0;
553}
9580a222
JM
554
555bool ath9k_wiphy_started(struct ath_softc *sc)
556{
557 int i;
558 spin_lock_bh(&sc->wiphy_lock);
559 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
560 spin_unlock_bh(&sc->wiphy_lock);
561 return true;
562 }
563 for (i = 0; i < sc->num_sec_wiphy; i++) {
564 if (sc->sec_wiphy[i] &&
565 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
566 spin_unlock_bh(&sc->wiphy_lock);
567 return true;
568 }
569 }
570 spin_unlock_bh(&sc->wiphy_lock);
571 return false;
572}
18eb62f8
JM
573
574static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
575 struct ath_wiphy *selected)
576{
8089cc47
JM
577 if (selected->state == ATH_WIPHY_SCAN) {
578 if (aphy == selected)
579 return;
580 /*
581 * Pause all other wiphys for the duration of the scan even if
582 * they are on the current channel now.
583 */
584 } else if (aphy->chan_idx == selected->chan_idx)
18eb62f8
JM
585 return;
586 aphy->state = ATH_WIPHY_PAUSED;
587 ieee80211_stop_queues(aphy->hw);
588}
589
590void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
591 struct ath_wiphy *selected)
592{
593 int i;
594 spin_lock_bh(&sc->wiphy_lock);
595 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
596 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
597 for (i = 0; i < sc->num_sec_wiphy; i++) {
598 if (sc->sec_wiphy[i] &&
599 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
600 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
601 }
602 spin_unlock_bh(&sc->wiphy_lock);
603}
f98c3bd2
JM
604
605void ath9k_wiphy_work(struct work_struct *work)
606{
607 struct ath_softc *sc = container_of(work, struct ath_softc,
608 wiphy_work.work);
609 struct ath_wiphy *aphy = NULL;
610 bool first = true;
611
612 spin_lock_bh(&sc->wiphy_lock);
613
614 if (sc->wiphy_scheduler_int == 0) {
615 /* wiphy scheduler is disabled */
616 spin_unlock_bh(&sc->wiphy_lock);
617 return;
618 }
619
620try_again:
621 sc->wiphy_scheduler_index++;
622 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
623 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
624 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
625 break;
626
627 sc->wiphy_scheduler_index++;
628 aphy = NULL;
629 }
630 if (aphy == NULL) {
631 sc->wiphy_scheduler_index = 0;
632 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
633 if (first) {
634 first = false;
635 goto try_again;
636 }
637 /* No wiphy is ready to be scheduled */
638 } else
639 aphy = sc->pri_wiphy;
640 }
641
642 spin_unlock_bh(&sc->wiphy_lock);
643
644 if (aphy &&
645 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
646 ath9k_wiphy_select(aphy)) {
647 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
648 "change\n");
649 }
650
651 queue_delayed_work(sc->hw->workqueue, &sc->wiphy_work,
652 sc->wiphy_scheduler_int);
653}
654
655void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
656{
657 cancel_delayed_work_sync(&sc->wiphy_work);
658 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
659 if (sc->wiphy_scheduler_int)
660 queue_delayed_work(sc->hw->workqueue, &sc->wiphy_work,
661 sc->wiphy_scheduler_int);
662}
64839170
LR
663
664/* caller must hold wiphy_lock */
665bool ath9k_all_wiphys_idle(struct ath_softc *sc)
666{
667 unsigned int i;
668 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
669 return false;
670 }
671 for (i = 0; i < sc->num_sec_wiphy; i++) {
672 struct ath_wiphy *aphy = sc->sec_wiphy[i];
673 if (!aphy)
674 continue;
675 if (aphy->state != ATH_WIPHY_INACTIVE)
676 return false;
677 }
678 return true;
679}