spinlock_t rxbuflock;
};
-struct ath9k_htc_tx {
- bool tx_queues_stop;
- spinlock_t tx_lock;
+#define ATH9K_HTC_TX_RESERVE 10
+#define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE)
+
+#define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0)
+struct ath9k_htc_tx {
+ u8 flags;
+ int queued_cnt;
struct sk_buff_head tx_queue;
+ spinlock_t tx_lock;
};
struct ath9k_htc_tx_ctl {
int get_hw_qnum(u16 queue, int *hwq_map);
int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
struct ath9k_tx_queue_info *qinfo);
+void ath9k_htc_check_stop_queues(struct ath9k_htc_priv *priv);
+void ath9k_htc_check_wake_queues(struct ath9k_htc_priv *priv);
int ath9k_rx_init(struct ath9k_htc_priv *priv);
void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
ath_dbg(common, ATH_DBG_FATAL,
"Failed to send CAB frame\n");
dev_kfree_skb_any(skb);
+ } else {
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.queued_cnt++;
+ spin_unlock_bh(&priv->tx.tx_lock);
}
next:
skb = ieee80211_get_buffered_bc(priv->hw, vif);
/* Start TX */
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
- priv->tx.tx_queues_stop = false;
+ priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
spin_unlock_bh(&priv->tx.tx_lock);
ieee80211_wake_queues(hw);
{
struct ieee80211_hdr *hdr;
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
int padpos, padsize, ret;
hdr = (struct ieee80211_hdr *) skb->data;
padpos = ath9k_cmn_padpos(hdr->frame_control);
padsize = padpos & 3;
if (padsize && skb->len > padpos) {
- if (skb_headroom(skb) < padsize)
+ if (skb_headroom(skb) < padsize) {
+ ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n");
goto fail_tx;
+ }
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
}
ret = ath9k_htc_tx_start(priv, skb, false);
if (ret != 0) {
- if (ret == -ENOMEM) {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
- "Stopping TX queues\n");
- ieee80211_stop_queues(hw);
- spin_lock_bh(&priv->tx.tx_lock);
- priv->tx.tx_queues_stop = true;
- spin_unlock_bh(&priv->tx.tx_lock);
- } else {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
- "Tx failed\n");
- }
+ ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n");
goto fail_tx;
}
+ ath9k_htc_check_stop_queues(priv);
+
return;
fail_tx:
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
- priv->tx.tx_queues_stop = false;
+ priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
spin_unlock_bh(&priv->tx.tx_lock);
ieee80211_wake_queues(hw);
}
}
+void ath9k_htc_check_stop_queues(struct ath9k_htc_priv *priv)
+{
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.queued_cnt++;
+ if ((priv->tx.queued_cnt >= ATH9K_HTC_TX_THRESHOLD) &&
+ !(priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) {
+ priv->tx.flags |= ATH9K_HTC_OP_TX_QUEUES_STOP;
+ ieee80211_stop_queues(priv->hw);
+ }
+ spin_unlock_bh(&priv->tx.tx_lock);
+}
+
+void ath9k_htc_check_wake_queues(struct ath9k_htc_priv *priv)
+{
+ spin_lock_bh(&priv->tx.tx_lock);
+ if ((priv->tx.queued_cnt < ATH9K_HTC_TX_THRESHOLD) &&
+ (priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) {
+ priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
+ ieee80211_wake_queues(priv->hw);
+ }
+ spin_unlock_bh(&priv->tx.tx_lock);
+}
+
int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
struct ath9k_tx_queue_info *qinfo)
{
rcu_read_unlock();
send_mac80211:
+ spin_lock_bh(&priv->tx.tx_lock);
+ if (WARN_ON(--priv->tx.queued_cnt < 0))
+ priv->tx.queued_cnt = 0;
+ spin_unlock_bh(&priv->tx.tx_lock);
+
/* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb);
}
/* Wake TX queues if needed */
- spin_lock_bh(&priv->tx.tx_lock);
- if (priv->tx.tx_queues_stop) {
- priv->tx.tx_queues_stop = false;
- spin_unlock_bh(&priv->tx.tx_lock);
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
- "Waking up TX queues\n");
- ieee80211_wake_queues(priv->hw);
- return;
- }
- spin_unlock_bh(&priv->tx.tx_lock);
+ ath9k_htc_check_wake_queues(priv);
}
void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,