return skb;
}
-static void ath_rx_requeue(struct ath_softc *sc, struct ath_buf *bf)
-{
- struct sk_buff *skb;
-
- ASSERT(bf != NULL);
-
- if (bf->bf_mpdu == NULL) {
- skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
- if (skb != NULL) {
- bf->bf_mpdu = skb;
- bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
- skb_end_pointer(skb) - skb->head,
- PCI_DMA_FROMDEVICE);
- bf->bf_dmacontext = bf->bf_buf_addr;
-
- }
- }
-
- list_move_tail(&bf->list, &sc->sc_rxbuf);
- ath_rx_buf_link(sc, bf);
-}
-
-
static int ath_rate2idx(struct ath_softc *sc, int rate)
{
int i = 0, cur_band, n_rates;
struct ath_buf *bf;
struct ath_desc *ds;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb = NULL, *requeue_skb;
struct ieee80211_rx_status rx_status;
struct ath_hal *ah = sc->sc_ah;
struct ieee80211_hdr *hdr;
* chain it back at the queue without processing it.
*/
if (flush)
- goto rx_next;
+ goto requeue;
if (!ds->ds_rxstat.rs_datalen)
- goto rx_next;
+ goto requeue;
/* The status portion of the descriptor could get corrupted. */
if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
- goto rx_next;
+ goto requeue;
if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
- goto rx_next;
+ goto requeue;
+
+ /* Ensure we always have an skb to requeue once we are done
+ * processing the current buffer's skb */
+ requeue_skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
+
+ /* If there is no memory we ignore the current RX'd frame,
+ * tell hardware it can give us a new frame using the old
+ * skb and put it at the tail of the sc->sc_rxbuf list for
+ * processing. */
+ if (!requeue_skb)
+ goto requeue;
/* Sync and unmap the frame */
pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr,
/* Send the frame to mac80211 */
__ieee80211_rx(sc->hw, skb, &rx_status);
- bf->bf_mpdu = NULL;
+
+ /* We will now give hardware our shiny new allocated skb */
+ bf->bf_mpdu = requeue_skb;
+ bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data,
+ sc->sc_rxbufsize,
+ PCI_DMA_FROMDEVICE);
+ bf->bf_dmacontext = bf->bf_buf_addr;
/*
* change the default rx antenna if rx diversity chooses the
} else {
sc->sc_rxotherant = 0;
}
-rx_next:
- ath_rx_requeue(sc, bf);
+requeue:
+ list_move_tail(&bf->list, &sc->sc_rxbuf);
+ ath_rx_buf_link(sc, bf);
} while (1);
spin_unlock_bh(&sc->sc_rxbuflock);