u64 tsf = 0;
u32 tsf_lower = 0;
unsigned long flags;
+ dma_addr_t new_buf_addr;
if (edma)
dma_type = DMA_BIDIRECTIONAL;
goto requeue_drop_frag;
}
+ /* We will now give hardware our shiny new allocated skb */
+ new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
+ common->rx_bufsize, dma_type);
+ if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
+ dev_kfree_skb_any(requeue_skb);
+ goto requeue_drop_frag;
+ }
+
+ bf->bf_mpdu = requeue_skb;
+ bf->bf_buf_addr = new_buf_addr;
+
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
- common->rx_bufsize,
- dma_type);
+ common->rx_bufsize, dma_type);
skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
if (ah->caps.rx_status_len)
ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
rxs, decrypt_error);
- /* We will now give hardware our shiny new allocated skb */
- bf->bf_mpdu = requeue_skb;
- bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
- common->rx_bufsize,
- dma_type);
- if (unlikely(dma_mapping_error(sc->dev,
- bf->bf_buf_addr))) {
- dev_kfree_skb_any(requeue_skb);
- bf->bf_mpdu = NULL;
- bf->bf_buf_addr = 0;
- ath_err(common, "dma_mapping_error() on RX\n");
- ieee80211_rx(hw, skb);
- break;
- }
-
if (rs.rs_more) {
RX_STAT_INC(rx_frags);
/*