bool ath9k_all_wiphys_idle(struct ath_softc *sc);
void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
+void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
+void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
+
int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
#endif /* ATH9K_H */
wiphy_name(aphy->hw->wiphy),
idle ? "idle" : "not-idle");
}
+/* Only bother starting a queue on an active virtual wiphy */
+void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
+{
+ struct ieee80211_hw *hw = sc->pri_wiphy->hw;
+ unsigned int i;
+
+ spin_lock_bh(&sc->wiphy_lock);
+
+ /* Start the primary wiphy */
+ if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
+ ieee80211_wake_queue(hw, skb_queue);
+ goto unlock;
+ }
+
+ /* Now start the secondary wiphy queues */
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ if (!aphy)
+ continue;
+ if (aphy->state != ATH_WIPHY_ACTIVE)
+ continue;
+
+ hw = aphy->hw;
+ ieee80211_wake_queue(hw, skb_queue);
+ break;
+ }
+
+unlock:
+ spin_unlock_bh(&sc->wiphy_lock);
+}
+
+/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
+void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
+{
+ struct ieee80211_hw *hw = sc->pri_wiphy->hw;
+ unsigned int i;
+
+ spin_lock_bh(&sc->wiphy_lock);
+
+ /* Stop the primary wiphy */
+ ieee80211_stop_queue(hw, skb_queue);
+
+ /* Now stop the secondary wiphy queues */
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ if (!aphy)
+ continue;
+ hw = aphy->hw;
+ ieee80211_stop_queue(hw, skb_queue);
+ }
+ spin_unlock_bh(&sc->wiphy_lock);
+}
struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
{
struct ath_txq *txq = NULL;
+ u16 skb_queue = skb_get_queue_mapping(skb);
int qnum;
- qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+ qnum = ath_get_hal_qnum(skb_queue, sc);
txq = &sc->tx.txq[qnum];
spin_lock_bh(&txq->axq_lock);
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
"TX queue: %d is full, depth: %d\n",
qnum, txq->axq_depth);
- ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
+ ath_mac80211_stop_queue(sc, skb_queue);
txq->stopped = 1;
spin_unlock_bh(&txq->axq_lock);
return NULL;
* on the queue */
spin_lock_bh(&txq->axq_lock);
if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
- ieee80211_stop_queue(sc->hw,
- skb_get_queue_mapping(skb));
+ ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
txq->stopped = 1;
}
spin_unlock_bh(&txq->axq_lock);
sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
if (qnum != -1) {
- ieee80211_wake_queue(sc->hw, qnum);
+ ath_mac80211_start_queue(sc, qnum);
txq->stopped = 0;
}
}