*/
if (desc->group_head && desc->unmap_len) {
struct mv_xor_desc_slot *unmap = desc->group_head;
- struct device *dev =
- &mv_chan->device->pdev->dev;
+ struct device *dev = mv_chan_to_devp(mv_chan);
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
{
struct dma_chan *chan, *_chan;
struct mv_xor_chan *mv_chan;
+ struct device *dev = device->common.dev;
dma_async_device_unregister(&device->common);
- dma_free_coherent(&device->pdev->dev, device->pool_size,
+ dma_free_coherent(dev, device->pool_size,
device->dma_desc_pool_virt, device->dma_desc_pool);
list_for_each_entry_safe(chan, _chan, &device->common.channels,
/* discover transaction capabilites from the platform data */
dma_dev->cap_mask = cap_mask;
- adev->pdev = pdev;
adev->shared = msp;
INIT_LIST_HEAD(&dma_dev->channels);
return adev;
err_free_dma:
- dma_free_coherent(&adev->pdev->dev, pool_size,
+ dma_free_coherent(&pdev->dev, pool_size,
adev->dma_desc_pool_virt, adev->dma_desc_pool);
return ERR_PTR(ret);
}