return ret;
}
-int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
{
struct ath10k *ar = htt->ar;
- int msdu_id;
+ int ret;
lockdep_assert_held(&htt->tx_lock);
- msdu_id = find_first_zero_bit(htt->used_msdu_ids,
- htt->max_num_pending_tx);
- if (msdu_id == htt->max_num_pending_tx)
- return -ENOBUFS;
+ ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
- __set_bit(msdu_id, htt->used_msdu_ids);
- return msdu_id;
+ return ret;
}
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
lockdep_assert_held(&htt->tx_lock);
- if (!test_bit(msdu_id, htt->used_msdu_ids))
- ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
- msdu_id);
-
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
- __clear_bit(msdu_id, htt->used_msdu_ids);
+
+ idr_remove(&htt->pending_tx, msdu_id);
}
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- spin_lock_init(&htt->tx_lock);
-
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
- htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
- htt->max_num_pending_tx, GFP_KERNEL);
- if (!htt->pending_tx)
- return -ENOMEM;
-
- htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
- BITS_TO_LONGS(htt->max_num_pending_tx),
- GFP_KERNEL);
- if (!htt->used_msdu_ids) {
- kfree(htt->pending_tx);
- return -ENOMEM;
- }
+ spin_lock_init(&htt->tx_lock);
+ idr_init(&htt->pending_tx);
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
- kfree(htt->used_msdu_ids);
- kfree(htt->pending_tx);
+ idr_destroy(&htt->pending_tx);
return -ENOMEM;
}
return 0;
}
-static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
+static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
{
- struct ath10k *ar = htt->ar;
+ struct ath10k *ar = ctx;
+ struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {0};
- int msdu_id;
-
- spin_lock_bh(&htt->tx_lock);
- for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
- if (!test_bit(msdu_id, htt->used_msdu_ids))
- continue;
- ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
- msdu_id);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
- tx_done.discard = 1;
- tx_done.msdu_id = msdu_id;
+ tx_done.discard = 1;
+ tx_done.msdu_id = msdu_id;
- ath10k_txrx_tx_unref(htt, &tx_done);
- }
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_txrx_tx_unref(htt, &tx_done);
spin_unlock_bh(&htt->tx_lock);
+
+ return 0;
}
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
- ath10k_htt_tx_free_pending(htt);
- kfree(htt->pending_tx);
- kfree(htt->used_msdu_ids);
+ idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+ idr_destroy(&htt->pending_tx);
dma_pool_destroy(htt->tx_pool);
}
len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock);
- res = ath10k_htt_tx_alloc_msdu_id(htt);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
goto err;
spin_lock_bh(&htt->tx_lock);
- res = ath10k_htt_tx_alloc_msdu_id(htt);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec: