struct sa1100_buf {
struct sk_buff *skb;
- dma_addr_t dma;
+ struct scatterlist sg;
dma_regs_t *regs;
};
*/
skb_reserve(si->dma_rx.skb, 1);
- si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data,
- HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(si->dev, si->dma_rx.dma)) {
+ sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
+ if (dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
dev_kfree_skb_any(si->dma_rx.skb);
return -ENOMEM;
}
* Enable the DMA, receiver and receive interrupt.
*/
sa1100_clear_dma(si->dma_rx.regs);
- sa1100_start_dma(si->dma_rx.regs, si->dma_rx.dma, HPSIR_MAX_RXLEN);
+ sa1100_start_dma(si->dma_rx.regs, sg_dma_address(&si->dma_rx.sg),
+ sg_dma_len(&si->dma_rx.sg));
Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE;
}
/* Account and free the packet. */
skb = si->dma_tx.skb;
if (skb) {
- dma_unmap_single(si->dev, si->dma_tx.dma, skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_sg(si->dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
dev->stats.tx_packets ++;
dev->stats.tx_bytes += skb->len;
dev_kfree_skb_irq(skb);
int mtt = irda_get_mtt(skb);
si->dma_tx.skb = skb;
- si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(si->dev, si->dma_tx.dma)) {
+ sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
+ if (dma_map_sg(si->dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
si->dma_tx.skb = NULL;
netif_wake_queue(dev);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len);
+ sa1100_start_dma(si->dma_tx.regs, sg_dma_address(&si->dma_tx.sg),
+ sg_dma_len(&si->dma_tx.sg));
/*
* If we have a mean turn-around time, impose the specified
* Get the current data position.
*/
dma_addr = sa1100_get_dma_pos(si->dma_rx.regs);
- len = dma_addr - si->dma_rx.dma;
+ len = dma_addr - sg_dma_address(&si->dma_rx.sg);
if (len > HPSIR_MAX_RXLEN)
len = HPSIR_MAX_RXLEN;
- dma_unmap_single(si->dev, si->dma_rx.dma, len, DMA_FROM_DEVICE);
+ dma_unmap_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
do {
/*
* Remap the buffer - it was previously mapped, and we
* hope that this succeeds.
*/
- si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data,
- HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
+ dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
}
}
*/
skb = si->dma_rx.skb;
if (skb) {
- dma_unmap_single(si->dev, si->dma_rx.dma, HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
+ dma_unmap_sg(si->dev, &si->dma_rx.sg, 1,
+ DMA_FROM_DEVICE);
dev_kfree_skb(skb);
si->dma_rx.skb = NULL;
}
skb = si->dma_tx.skb;
if (skb) {
- dma_unmap_single(si->dev, si->dma_tx.dma, skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_sg(si->dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
dev_kfree_skb(skb);
si->dma_tx.skb = NULL;
}
si->dev = &pdev->dev;
si->pdata = pdev->dev.platform_data;
+ sg_init_table(&si->dma_rx.sg, 1);
+ sg_init_table(&si->dma_tx.sg, 1);
+
/*
* Initialise the HP-SIR buffers
*/