/* Hardware bug work-around, the chip is unable to do PCI DMA
to/from anything above 1GB :-( */
if (ssb_dma_mapping_error(bp->sdev, mapping) ||
- mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
/* Sigh... */
if (!ssb_dma_mapping_error(bp->sdev, mapping))
ssb_dma_unmap_single(bp->sdev, mapping,
RX_PKT_BUF_SZ,
DMA_FROM_DEVICE);
if (ssb_dma_mapping_error(bp->sdev, mapping) ||
- mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
if (!ssb_dma_mapping_error(bp->sdev, mapping))
ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) {
+ if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
struct sk_buff *bounce_skb;
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
len, DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) {
+ if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
if (!ssb_dma_mapping_error(bp->sdev, mapping))
ssb_dma_unmap_single(bp->sdev, mapping,
len, DMA_TO_DEVICE);
DMA_BIDIRECTIONAL);
if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
- rx_ring_dma + size > DMA_30BIT_MASK) {
+ rx_ring_dma + size > DMA_BIT_MASK(30)) {
kfree(rx_ring);
goto out_err;
}
DMA_TO_DEVICE);
if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
- tx_ring_dma + size > DMA_30BIT_MASK) {
+ tx_ring_dma + size > DMA_BIT_MASK(30)) {
kfree(tx_ring);
goto out_err;
}
"Failed to powerup the bus\n");
goto err_out_free_dev;
}
- err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK);
+ err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
if (err) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system.\n");
if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
return DMA_BIT_MASK(32);
- return DMA_30BIT_MASK;
+ return DMA_BIT_MASK(30);
}
static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
{
- if (dmamask == DMA_30BIT_MASK)
+ if (dmamask == DMA_BIT_MASK(30))
return B43legacy_DMA_30BIT;
if (dmamask == DMA_BIT_MASK(32))
return B43legacy_DMA_32BIT;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
- mask = DMA_30BIT_MASK;
+ mask = DMA_BIT_MASK(30);
fallback = 1;
continue;
}
if ((err = pci_enable_device(pci)) < 0)
return err;
/* check, if we can restrict PCI DMA transfers to 30 bits */
- if (pci_set_dma_mask(pci, DMA_30BIT_MASK) < 0 ||
- pci_set_consistent_dma_mask(pci, DMA_30BIT_MASK) < 0) {
+ if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0 ||
+ pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(30)) < 0) {
snd_printk(KERN_ERR "architecture does not support 30bit PCI busmaster DMA\n");
pci_disable_device(pci);
return -ENXIO;