struct ring_info {
struct sk_buff *skb;
- dma_addr_t mapping;
u32 len;
};
break;
len = (status & 0x1fff) - 4;
- mapping = cp->rx_skb[rx_tail].mapping;
+ mapping = le64_to_cpu(desc->addr);
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
/* we don't support incoming fragmented frames.
skb_put(skb, len);
- mapping =
- cp->rx_skb[rx_tail].mapping =
- pci_map_single(cp->pdev, new_skb->data,
- buflen, PCI_DMA_FROMDEVICE);
+ mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
+ PCI_DMA_FROMDEVICE);
cp->rx_skb[rx_tail].skb = new_skb;
cp_rx_skb(cp, skb, desc);
unsigned tx_tail = cp->tx_tail;
while (tx_tail != tx_head) {
+ struct cp_desc *txd = cp->tx_ring + tx_tail;
struct sk_buff *skb;
u32 status;
rmb();
- status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
+ status = le32_to_cpu(txd->opts1);
if (status & DescOwn)
break;
skb = cp->tx_skb[tx_tail].skb;
BUG_ON(!skb);
- pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
+ pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
if (status & LastFrag) {
wmb();
cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = mapping;
cp->tx_skb[entry].len = len;
entry = NEXT_TX(entry);
} else {
first_mapping = pci_map_single(cp->pdev, skb->data,
first_len, PCI_DMA_TODEVICE);
cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = first_mapping;
cp->tx_skb[entry].len = first_len;
entry = NEXT_TX(entry);
wmb();
cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = mapping;
cp->tx_skb[entry].len = len;
entry = NEXT_TX(entry);
}
for (i = 0; i < CP_RX_RING_SIZE; i++) {
struct sk_buff *skb;
+ dma_addr_t mapping;
skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
if (!skb)
skb->dev = cp->dev;
skb_reserve(skb, RX_OFFSET);
- cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
- skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
cp->rx_skb[i].skb = skb;
cp->rx_ring[i].opts2 = 0;
- cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
+ cp->rx_ring[i].addr = cpu_to_le64(mapping);
if (i == (CP_RX_RING_SIZE - 1))
cp->rx_ring[i].opts1 =
cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
static void cp_clean_rings (struct cp_private *cp)
{
+ struct cp_desc *desc;
unsigned i;
for (i = 0; i < CP_RX_RING_SIZE; i++) {
if (cp->rx_skb[i].skb) {
- pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
+ desc = cp->rx_ring + i;
+ pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(cp->rx_skb[i].skb);
}
if (cp->tx_skb[i].skb) {
struct sk_buff *skb = cp->tx_skb[i].skb;
- pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
+ desc = cp->tx_ring + i;
+ pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
cp->tx_skb[i].len, PCI_DMA_TODEVICE);
- if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
+ if (le32_to_cpu(desc->opts1) & LastFrag)
dev_kfree_skb(skb);
cp->net_stats.tx_dropped++;
}