pci_map_single(sc->pdev, skb->data,
skb->len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "pci_dma_mapping_error() on beaconing\n");
+ return NULL;
+ }
skb = ieee80211_get_buffered_bc(sc->hw, vif);
memcpy(&hdr[1], &val, sizeof(val));
}
+ bf->bf_mpdu = skb;
bf->bf_buf_addr = bf->bf_dmacontext =
pci_map_single(sc->pdev, skb->data,
skb->len,
PCI_DMA_TODEVICE);
- bf->bf_mpdu = skb;
+ if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "pci_dma_mapping_error() on beacon alloc\n");
+ return -ENOMEM;
+ }
return 0;
}
bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
sc->sc_rxbufsize,
PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(sc->pdev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "pci_dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ break;
+ }
bf->bf_dmacontext = bf->bf_buf_addr;
}
sc->sc_rxlink = NULL;
bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data,
sc->sc_rxbufsize,
PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(sc->pdev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(requeue_skb);
+ bf->bf_mpdu = NULL;
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "pci_dma_mapping_error() on RX\n");
+ break;
+ }
bf->bf_dmacontext = bf->bf_buf_addr;
/*
}
}
-static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
+static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
struct sk_buff *skb,
struct ath_tx_control *txctl)
{
/* DMA setup */
bf->bf_mpdu = skb;
+
bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_dmacontext))) {
+ bf->bf_mpdu = NULL;
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "pci_dma_mapping_error() on TX\n");
+ return -ENOMEM;
+ }
+
bf->bf_buf_addr = bf->bf_dmacontext;
+ return 0;
}
/* FIXME: tx power */
spin_unlock_bh(&txctl->txq->axq_lock);
}
+/* Upon failure caller should free skb */
int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
struct ath_buf *bf;
+ int r;
/* Check if a tx buffer is available */
return -1;
}
- ath_tx_setup_buffer(sc, bf, skb, txctl);
+ r = ath_tx_setup_buffer(sc, bf, skb, txctl);
+ if (unlikely(r)) {
+ spin_lock_bh(&sc->sc_txbuflock);
+ DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
+ list_add_tail(&bf->list, &sc->sc_txbuf);
+ spin_unlock_bh(&sc->sc_txbuflock);
+ return r;
+ }
+
ath_tx_start_dma(sc, bf, txctl);
return 0;