return 0;
}
-static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
+/* Calculate Atheros packet type from IEEE80211 packet header */
+
+static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
+ struct ieee80211_hdr *hdr;
enum ath9k_pkt_type htype;
__le16 fc;
+ hdr = (struct ieee80211_hdr *)skb->data;
fc = hdr->frame_control;
- /* Calculate Atheros packet type from IEEE80211 packet header */
-
if (ieee80211_is_beacon(fc))
htype = ATH9K_PKT_TYPE_BEACON;
else if (ieee80211_is_probe_resp(fc))
return htype;
}
-static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
+static bool check_min_rate(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ath_tx_info_priv *tx_info_priv;
+ bool use_minrate = false;
__le16 fc;
hdr = (struct ieee80211_hdr *)skb->data;
fc = hdr->frame_control;
- /* XXX: HACK! */
- tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
-
if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
- txctl->use_minrate = 1;
- txctl->min_rate = tx_info_priv->min_rate;
+ use_minrate = true;
} else if (ieee80211_is_data(fc)) {
if (ieee80211_is_nullfunc(fc) ||
- /* Port Access Entity (IEEE 802.1X) */
- (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- txctl->use_minrate = 1;
- txctl->min_rate = tx_info_priv->min_rate;
+ /* Port Access Entity (IEEE 802.1X) */
+ (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ use_minrate = true;
}
- if (is_multicast_ether_addr(hdr->addr1))
- txctl->mcast_rate = tx_info_priv->min_rate;
}
+ return use_minrate;
}
-/* This function will setup additional txctl information, mostly rate stuff */
-/* FIXME: seqno, ps */
-static int ath_tx_prepare(struct ath_softc *sc,
- struct sk_buff *skb,
- struct ath_tx_control *txctl)
+static int get_hw_crypto_keytype(struct sk_buff *skb)
{
- struct ieee80211_hw *hw = sc->hw;
- struct ieee80211_hdr *hdr;
- struct ath_rc_series *rcs;
- struct ath_txq *txq = NULL;
- const struct ath9k_rate_table *rt;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ath_tx_info_priv *tx_info_priv;
- int hdrlen;
- u8 rix, antenna;
- __le16 fc;
- u8 *qc;
-
- txctl->dev = sc;
- hdr = (struct ieee80211_hdr *)skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
-
- rt = sc->sc_currates;
- BUG_ON(!rt);
-
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- txctl->tidno = qc[0] & 0xf;
- }
-
- txctl->if_id = 0;
- txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
-
- /* Always try at highest power possible unless the the device
- * was configured by the user to use another power. */
- if (likely(sc->sc_config.txpowlimit == ATH_TXPOWER_MAX))
- txctl->txpower = ATH_TXPOWER_MAX;
- else
- txctl->txpower = sc->sc_config.txpowlimit;
-
- /* Fill Key related fields */
-
- txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
- txctl->keyix = ATH9K_TXKEYIX_INVALID;
if (tx_info->control.hw_key) {
- txctl->keyix = tx_info->control.hw_key->hw_key_idx;
- txctl->frmlen += tx_info->control.hw_key->icv_len;
-
if (tx_info->control.hw_key->alg == ALG_WEP)
- txctl->keytype = ATH9K_KEY_TYPE_WEP;
+ return ATH9K_KEY_TYPE_WEP;
else if (tx_info->control.hw_key->alg == ALG_TKIP)
- txctl->keytype = ATH9K_KEY_TYPE_TKIP;
+ return ATH9K_KEY_TYPE_TKIP;
else if (tx_info->control.hw_key->alg == ALG_CCMP)
- txctl->keytype = ATH9K_KEY_TYPE_AES;
+ return ATH9K_KEY_TYPE_AES;
}
- /* Fill packet type */
-
- txctl->atype = get_hal_packet_type(hdr);
-
- /* Fill qnum */
+ return ATH9K_KEY_TYPE_CLEAR;
+}
- if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
- txctl->qnum = 0;
- txq = sc->sc_cabq;
- } else {
- txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
- txq = &sc->sc_txq[txctl->qnum];
- }
- spin_lock_bh(&txq->axq_lock);
+static void setup_rate_retries(struct ath_softc *sc, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_tx_info_priv *tx_info_priv;
+ struct ath_rc_series *rcs;
+ struct ieee80211_hdr *hdr;
+ const struct ath9k_rate_table *rt;
+ bool use_minrate;
+ __le16 fc;
+ u8 rix;
- /* Try to avoid running out of descriptors */
- if (txq->axq_depth >= (ATH_TXBUF - 20) &&
- !(txctl->flags & ATH9K_TXDESC_CAB)) {
- DPRINTF(sc, ATH_DBG_FATAL,
- "%s: TX queue: %d is full, depth: %d\n",
- __func__,
- txctl->qnum,
- txq->axq_depth);
- ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
- txq->stopped = 1;
- spin_unlock_bh(&txq->axq_lock);
- return -1;
- }
+ rt = sc->sc_currates;
+ BUG_ON(!rt);
- spin_unlock_bh(&txq->axq_lock);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif; /* HACK */
+ rcs = tx_info_priv->rcs;
- /* Fill rate */
+ /* Check if min rates have to be used */
+ use_minrate = check_min_rate(skb);
- fill_min_rates(skb, txctl);
+ if (ieee80211_is_data(fc) && !use_minrate) {
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ rcs[0].rix =
+ ath_tx_findindex(rt, tx_info_priv->min_rate);
+ /* mcast packets are not re-tried */
+ rcs[0].tries = 1;
+ }
+ } else {
+ /* for management and control frames,
+ or for NULL and EAPOL frames */
+ if (use_minrate)
+ rcs[0].rix = ath_rate_findrateix(sc, tx_info_priv->min_rate);
+ else
+ rcs[0].rix = 0;
+ rcs[0].tries = ATH_MGT_TXMAXTRY;
+ }
- /* Fill flags */
+ rix = rcs[0].rix;
- txctl->flags |= ATH9K_TXDESC_CLRDMASK /* needed for crypto errors */
- | ATH9K_TXDESC_INTREQ; /* Generate an interrupt */
+ if (ieee80211_has_morefrags(fc) ||
+ (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
+ rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
+ rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
+ /* reset tries but keep rate index */
+ rcs[0].tries = ATH_TXMAXTRY;
+ }
+}
- if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
- txctl->flags |= ATH9K_TXDESC_NOACK;
+/* Called only when tx aggregation is enabled and HT is supported */
- if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- txctl->flags |= ATH9K_TXDESC_RTSENA;
+static void assign_aggr_tid_seqno(struct sk_buff *skb,
+ struct ath_buf *bf)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ath_node *an;
+ struct ath_atx_tid *tid;
+ __le16 fc;
+ u8 *qc;
- /*
- * Setup for rate calculations.
- */
+ if (!tx_info->control.sta)
+ return;
- /* XXX: HACK! */
- tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
- rcs = tx_info_priv->rcs;
+ an = (struct ath_node *)tx_info->control.sta->drv_priv;
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
- if (ieee80211_is_data(fc) && !txctl->use_minrate) {
+ /* Get tidno */
- /* Enable HT only for DATA frames and not for EAPOL */
- /* XXX why AMPDU only?? */
- txctl->ht = (hw->conf.ht.enabled &&
- (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ bf->bf_tidno = qc[0] & 0xf;
+ }
- if (is_multicast_ether_addr(hdr->addr1)) {
- rcs[0].rix = (u8)
- ath_tx_findindex(rt, txctl->mcast_rate);
+ /* Get seqno */
- /*
- * mcast packets are not re-tried.
- */
- rcs[0].tries = 1;
- }
+ if (ieee80211_is_data(fc) && !check_min_rate(skb)) {
/* For HT capable stations, we save tidno for later use.
* We also override seqno set by upper layer with the one
* in tx aggregation state.
*
- * First, the fragmentation stat is determined.
* If fragmentation is on, the sequence number is
* not overridden, since it has been
* incremented by the fragmentation routine.
+ *
+ * FIXME: check if the fragmentation threshold exceeds
+ * IEEE80211 max.
*/
- if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
- txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
- struct ath_atx_tid *tid;
+ tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
+ IEEE80211_SEQ_SEQ_SHIFT);
+ bf->bf_seqno = tid->seq_next;
+ INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+ }
+}
- tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
+static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_txq *txq)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ int flags = 0;
- hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
- IEEE80211_SEQ_SEQ_SHIFT);
- txctl->seqno = tid->seq_next;
- INCR(tid->seq_next, IEEE80211_SEQ_MAX);
- }
- } else {
- /* for management and control frames,
- * or for NULL and EAPOL frames */
- if (txctl->min_rate)
- rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
- else
- rcs[0].rix = 0;
- rcs[0].tries = ATH_MGT_TXMAXTRY;
- }
- rix = rcs[0].rix;
+ flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
+ flags |= ATH9K_TXDESC_INTREQ;
- if (ieee80211_has_morefrags(fc) ||
- (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
- /*
- ** Force hardware to use computed duration for next
- ** fragment by disabling multi-rate retry, which
- ** updates duration based on the multi-rate
- ** duration table.
- */
- rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
- rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
- /* reset tries but keep rate index */
- rcs[0].tries = ATH_TXMAXTRY;
- }
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ flags |= ATH9K_TXDESC_NOACK;
+ if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ flags |= ATH9K_TXDESC_RTSENA;
+
+ return flags;
+}
- if (is_multicast_ether_addr(hdr->addr1)) {
- antenna = sc->sc_mcastantenna + 1;
- sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
+static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
+{
+ struct ath_buf *bf = NULL;
+
+ spin_lock_bh(&sc->sc_txbuflock);
+
+ if (unlikely(list_empty(&sc->sc_txbuf))) {
+ spin_unlock_bh(&sc->sc_txbuflock);
+ return NULL;
}
- return 0;
+ bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
+ list_del(&bf->list);
+
+ spin_unlock_bh(&sc->sc_txbuflock);
+
+ return bf;
}
/* To complete a chain of buffers associated a frame */
skb->len,
PCI_DMA_TODEVICE);
/* complete this frame */
- ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
+ ath_tx_complete(sc, skb, &tx_status);
/*
* Return the list of ath_buf of this mpdu to free queue
u32 ctsduration = 0;
u8 rix = 0, cix, ctsrate = 0;
u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
- struct ath_node *an = (struct ath_node *) bf->bf_node;
+ struct ath_node *an = NULL;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+
+ skb = (struct sk_buff *)bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.sta)
+ an = (struct ath_node *)tx_info->control.sta->drv_priv;
/*
* get the cix for the lowest valid rix.
* use RTS.
*/
if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
- BUG_ON(!an);
/*
* 802.11g protection not needed, use our default behavior
*/
* For dynamic MIMO PS, RTS needs to precede the first aggregate
* and the second aggregate should have any protection at all.
*/
- if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
+ if (an && an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
if (!bf_isaggrburst(bf)) {
flags = ATH9K_TXDESC_RTSENA;
dynamic_mimops = 1;
(bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
bf_isshpreamble(bf));
- if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
+ if (an && (an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
(bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
/*
* When sending to an HT node that has enabled static
struct list_head *bf_q,
int txok)
{
- struct ath_node *an = bf->bf_node;
- struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ struct ath_node *an = NULL;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_last = bf->bf_lastbf;
struct ath_desc *ds = bf_last->bf_desc;
struct ath_buf *bf_next, *bf_lastq = NULL;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
+ skb = (struct sk_buff *)bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.sta) {
+ an = (struct ath_node *)tx_info->control.sta->drv_priv;
+ tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ }
+
isaggr = bf_isaggr(bf);
if (isaggr) {
if (txok) {
/* copy descriptor content */
tbf->bf_mpdu = bf_last->bf_mpdu;
- tbf->bf_node = bf_last->bf_node;
tbf->bf_buf_addr = bf_last->bf_buf_addr;
*(tbf->bf_desc) = *(bf_last->bf_desc);
*/
static int ath_tx_send_ampdu(struct ath_softc *sc,
- struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_head,
struct ath_tx_control *txctl)
bf = list_first_entry(bf_head, struct ath_buf, list);
bf->bf_state.bf_type |= BUF_AMPDU;
- bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
- bf->bf_tidno = txctl->tidno;
/*
* Do not queue to h/w when any of the following conditions is true:
*/
if (!list_empty(&tid->buf_q) || tid->paused ||
!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
- txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
+ txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
list_splice_tail_init(bf_head, &tid->buf_q);
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(txctl->txq, tid);
return 0;
}
bf->bf_nframes = 1;
bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
ath_buf_set_rate(sc, bf);
- ath_tx_txqaddbuf(sc, txq, bf_head);
+ ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
return 0;
}
}
}
-static int ath_tx_start_dma(struct ath_softc *sc,
- struct sk_buff *skb,
- struct scatterlist *sg,
- u32 n_sg,
- struct ath_tx_control *txctl)
+static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
+ struct sk_buff *skb, struct scatterlist *sg,
+ struct ath_tx_control *txctl)
{
- struct ath_node *an = txctl->an;
- struct ath_buf *bf = NULL;
- struct list_head bf_head;
- struct ath_desc *ds;
- struct ath_hal *ah = sc->sc_ah;
- struct ath_txq *txq;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_tx_info_priv *tx_info_priv;
struct ath_rc_series *rcs;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- __le16 fc = hdr->frame_control;
-
- if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
- txq = sc->sc_cabq;
- else
- txq = &sc->sc_txq[txctl->qnum];
+ int hdrlen;
+ __le16 fc;
- /* For each sglist entry, allocate an ath_buf for DMA */
- INIT_LIST_HEAD(&bf_head);
- spin_lock_bh(&sc->sc_txbuflock);
- if (unlikely(list_empty(&sc->sc_txbuf))) {
- spin_unlock_bh(&sc->sc_txbuflock);
- return -ENOMEM;
- }
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ rcs = tx_info_priv->rcs;
- bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
- list_del(&bf->list);
- spin_unlock_bh(&sc->sc_txbuflock);
+ ATH_TXBUF_RESET(bf);
- list_add_tail(&bf->list, &bf_head);
+ /* Frame type */
- /* set up this buffer */
- ATH_TXBUF_RESET(bf);
- bf->bf_frmlen = txctl->frmlen;
+ bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
ieee80211_is_data(fc) ?
(bf->bf_state.bf_type |= BUF_DATA) :
(sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
(bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
(bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
+ (sc->hw->conf.ht.enabled &&
+ (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
+ (bf->bf_state.bf_type |= BUF_HT) :
+ (bf->bf_state.bf_type &= ~BUF_HT);
+
+ bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
+
+ /* Crypto */
+
+ bf->bf_keytype = get_hw_crypto_keytype(skb);
+
+ if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
+ bf->bf_frmlen += tx_info->control.hw_key->icv_len;
+ bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
+ } else {
+ bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
+ }
+
+ /* Rate series */
+
+ setup_rate_retries(sc, skb);
- bf->bf_flags = txctl->flags;
- bf->bf_keytype = txctl->keytype;
- /* XXX: HACK! */
- tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
- rcs = tx_info_priv->rcs;
bf->bf_rcs[0] = rcs[0];
bf->bf_rcs[1] = rcs[1];
bf->bf_rcs[2] = rcs[2];
bf->bf_rcs[3] = rcs[3];
- bf->bf_node = an;
+
+ /* Assign seqno, tidno */
+
+ if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
+ assign_aggr_tid_seqno(skb, bf);
+
+ /* DMA setup */
+
bf->bf_mpdu = skb;
- bf->bf_buf_addr = sg_dma_address(sg);
+ bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ bf->bf_buf_addr = bf->bf_dmacontext;
+}
+
+/* FIXME: tx power */
+static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
+ struct scatterlist *sg, u32 n_sg,
+ struct ath_tx_control *txctl)
+{
+ struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_node *an = NULL;
+ struct list_head bf_head;
+ struct ath_desc *ds;
+ struct ath_atx_tid *tid;
+ struct ath_hal *ah = sc->sc_ah;
+ int frm_type;
+
+ if (tx_info->control.sta) {
+ an = (struct ath_node *)tx_info->control.sta->drv_priv;
+ tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ }
+
+ frm_type = get_hw_packet_type(skb);
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add_tail(&bf->list, &bf_head);
/* setup descriptor */
+
ds = bf->bf_desc;
ds->ds_link = 0;
ds->ds_data = bf->bf_buf_addr;
- /*
- * Save the DMA context in the first ath_buf
- */
- bf->bf_dmacontext = txctl->dmacontext;
+ /* Formulate first tx descriptor with tx controls */
- /*
- * Formulate first tx descriptor with tx controls.
- */
- ath9k_hw_set11n_txdesc(ah,
- ds,
- bf->bf_frmlen, /* frame length */
- txctl->atype, /* Atheros packet type */
- min(txctl->txpower, (u16)60), /* txpower */
- txctl->keyix, /* key cache index */
- txctl->keytype, /* key type */
- txctl->flags); /* flags */
- ath9k_hw_filltxdesc(ah,
- ds,
- sg_dma_len(sg), /* segment length */
- true, /* first segment */
- (n_sg == 1) ? true : false, /* last segment */
- ds); /* first descriptor */
+ ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
+ bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
+
+ ath9k_hw_filltxdesc(ah, ds,
+ sg_dma_len(sg), /* segment length */
+ true, /* first segment */
+ (n_sg == 1) ? true : false, /* last segment */
+ ds); /* first descriptor */
bf->bf_lastfrm = bf;
- (txctl->ht) ?
- (bf->bf_state.bf_type |= BUF_HT) :
- (bf->bf_state.bf_type &= ~BUF_HT);
- spin_lock_bh(&txq->axq_lock);
+ spin_lock_bh(&txctl->txq->axq_lock);
- if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
- struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
- if (ath_aggr_query(sc, an, txctl->tidno)) {
+ if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR)) {
+ if (ath_aggr_query(sc, an, bf->bf_tidno)) {
/*
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
*/
- ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
+ ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
} else {
/*
- * Send this frame as regular when ADDBA exchange
- * is neither complete nor pending.
+ * Send this frame as regular when ADDBA
+ * exchange is neither complete nor pending.
*/
- ath_tx_send_normal(sc, txq, tid, &bf_head);
+ ath_tx_send_normal(sc, txctl->txq,
+ tid, &bf_head);
}
} else {
bf->bf_lastbf = bf;
bf->bf_nframes = 1;
- ath_buf_set_rate(sc, bf);
-
- if (ieee80211_is_back_req(fc)) {
- /* This is required for resuming tid
- * during BAR completion */
- bf->bf_tidno = txctl->tidno;
- }
- ath_tx_txqaddbuf(sc, txq, &bf_head);
+ ath_buf_set_rate(sc, bf);
+ ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
}
- spin_unlock_bh(&txq->axq_lock);
- return 0;
+
+ spin_unlock_bh(&txctl->txq->axq_lock);
}
-static void xmit_map_sg(struct ath_softc *sc,
- struct sk_buff *skb,
- struct ath_tx_control *txctl)
+int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
{
- struct ath_xmit_status tx_status;
- struct ath_atx_tid *tid;
+ struct ath_buf *bf;
struct scatterlist sg;
- txctl->dmacontext = pci_map_single(sc->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ /* Check if a tx buffer is available */
+
+ bf = ath_tx_get_buffer(sc);
+ if (!bf) {
+ DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n",
+ __func__);
+ return -1;
+ }
+
+ ath_tx_setup_buffer(sc, bf, skb, &sg, txctl);
+
+ /* Setup S/G */
- /* setup S/G list */
memset(&sg, 0, sizeof(struct scatterlist));
- sg_dma_address(&sg) = txctl->dmacontext;
+ sg_dma_address(&sg) = bf->bf_dmacontext;
sg_dma_len(&sg) = skb->len;
- if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
- /*
- * We have to do drop frame here.
- */
- pci_unmap_single(sc->pdev, txctl->dmacontext,
- skb->len, PCI_DMA_TODEVICE);
-
- tx_status.retries = 0;
- tx_status.flags = ATH_TX_ERROR;
+ ath_tx_start_dma(sc, bf, &sg, 1, txctl);
- if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
- /* Reclaim the seqno. */
- tid = ATH_AN_2_TID((struct ath_node *)
- txctl->an, txctl->tidno);
- DECR(tid->seq_next, IEEE80211_SEQ_MAX);
- }
- ath_tx_complete(sc, skb, &tx_status, txctl->an);
- }
+ return 0;
}
/* Initialize TX queue and h/w */
return qnum;
}
+/* Get a transmit queue, if available */
+
+struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
+{
+ struct ath_txq *txq = NULL;
+ int qnum;
+
+ qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+ txq = &sc->sc_txq[qnum];
+
+ spin_lock_bh(&txq->axq_lock);
+
+ /* Try to avoid running out of descriptors */
+ if (txq->axq_depth >= (ATH_TXBUF - 20)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: TX queue: %d is full, depth: %d\n",
+ __func__, qnum, txq->axq_depth);
+ ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
+ txq->stopped = 1;
+ spin_unlock_bh(&txq->axq_lock);
+ return NULL;
+ }
+
+ spin_unlock_bh(&txq->axq_lock);
+
+ return txq;
+}
+
/* Update parameters for a transmit queue */
int ath_txq_update(struct ath_softc *sc, int qnum,
return 0;
}
-int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
-{
- struct ath_tx_control txctl;
- int error = 0;
-
- memset(&txctl, 0, sizeof(struct ath_tx_control));
- error = ath_tx_prepare(sc, skb, &txctl);
- if (error == 0)
- /*
- * Start DMA mapping.
- * ath_tx_start_dma() will be called either synchronously
- * or asynchrounsly once DMA is complete.
- */
- xmit_map_sg(sc, skb, &txctl);
-
- /* failed packets will be dropped by the caller */
- return error;
-}
-
/* Deferred processing of transmit interrupt */
void ath_tx_tasklet(struct ath_softc *sc)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath_tx_control txctl;
+ memset(&txctl, 0, sizeof(struct ath_tx_control));
+
/*
* As a temporary workaround, assign seq# here; this will likely need
* to be cleaned up to work better with Beacon transmission and virtual
memmove(skb->data, skb->data + padsize, hdrlen);
}
+ txctl.txq = sc->sc_cabq;
+
DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
__func__,
skb);
- memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.flags = ATH9K_TXDESC_CAB;
- if (ath_tx_prepare(sc, skb, &txctl) == 0) {
- /*
- * Start DMA mapping.
- * ath_tx_start_dma() will be called either synchronously
- * or asynchrounsly once DMA is complete.
- */
- xmit_map_sg(sc, skb, &txctl);
- } else {
- DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
- dev_kfree_skb_any(skb);
+ if (ath_tx_start(sc, skb, &txctl) != 0) {
+ DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
+ goto exit;
}
-}
+ return;
+exit:
+ dev_kfree_skb_any(skb);
+}