}
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
- struct iwl_tfd *tfd, int dma_dir)
++ struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
{
- struct pci_dev *dev = priv->pci_dev;
int i;
int num_tbs;
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
- pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
+ dma_unmap_single(priv->bus.dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE);
+ iwl_tfd_tb_get_len(tfd, i), dma_dir);
}
/**
struct iwl_tfd *tfd_tmp = txq->tfds;
int index = txq->q.read_ptr;
- iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
+ iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
- PCI_DMA_TODEVICE);
++ DMA_TO_DEVICE);
/* free SKB */
if (txq->txb) {
i = get_cmd_index(q, q->read_ptr);
if (txq->meta[i].flags & CMD_MAPPED) {
- iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i]);
+ iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
- PCI_DMA_BIDIRECTIONAL);
++ DMA_BIDIRECTIONAL);
txq->meta[i].flags = 0;
}
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
- phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
- cmd->len[i], PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
+ phys_addr = dma_map_single(priv->bus.dev, (void *)cmd->data[i],
- cmd->len[i], DMA_TO_DEVICE);
++ cmd->len[i], DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(priv->bus.dev, phys_addr)) {
iwlagn_unmap_tfd(priv, out_meta,
- &txq->tfds[q->write_ptr]);
+ &txq->tfds[q->write_ptr],
- PCI_DMA_BIDIRECTIONAL);
++ DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
++ iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {