int i;
re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, re->data_addr)))
- return -EIO;
+ if (pci_dma_mapping_error(pdev, re->data_addr))
+ goto mapping_error;
pci_unmap_len_set(re, data_size, size);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- re->frag_addr[i] = pci_map_page(pdev,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ re->frag_addr[i] = pci_map_page(pdev, frag->page,
+ frag->page_offset,
+ frag->size,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
+ goto map_page_error;
+ }
return 0;
+
+map_page_error:
+ while (--i >= 0) {
+ pci_unmap_page(pdev, re->frag_addr[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ PCI_DMA_FROMDEVICE);
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
}
static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
struct rx_ring_info *re,
unsigned int length)
{
- struct sk_buff *skb, *nskb;
+ struct sk_buff *skb;
+ struct rx_ring_info nre;
unsigned hdr_space = sky2->rx_data_size;
- /* Don't be tricky about reusing pages (yet) */
- nskb = sky2_rx_alloc(sky2);
- if (unlikely(!nskb))
- return NULL;
+ nre.skb = sky2_rx_alloc(sky2);
+ if (unlikely(!nre.skb))
+ goto nobuf;
+
+ if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+ goto nomap;
skb = re->skb;
sky2_rx_unmap_skb(sky2->hw->pdev, re);
-
prefetch(skb->data);
- re->skb = nskb;
- if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
- dev_kfree_skb(nskb);
- re->skb = skb;
- return NULL;
- }
+ *re = nre;
if (skb_shinfo(skb)->nr_frags)
skb_put_frags(skb, hdr_space, length);
else
skb_put(skb, length);
return skb;
+
+nomap:
+ dev_kfree_skb(nre.skb);
+nobuf:
+ return NULL;
}
/*