return 0;
}
-static int rspi_request_dma(struct rspi_data *rspi,
- struct platform_device *pdev)
+static struct dma_chan *rspi_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir,
+ unsigned int id,
+ dma_addr_t port_addr)
{
- const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dma_cap_mask_t mask;
+ struct dma_chan *chan;
struct dma_slave_config cfg;
int ret;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(unsigned long)id);
+ if (!chan) {
+ dev_warn(dev, "dma_request_channel failed\n");
+ return NULL;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.slave_id = id;
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV)
+ cfg.dst_addr = port_addr;
+ else
+ cfg.src_addr = port_addr;
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
+ dma_release_channel(chan);
+ return NULL;
+ }
+
+ return chan;
+}
+
+static int rspi_request_dma(struct rspi_data *rspi,
+ struct platform_device *pdev)
+{
+ const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
if (!res || !rspi_pd)
return 0; /* The driver assumes no error. */
/* If the module receives data by DMAC, it also needs TX DMAC */
if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
- (void *)rspi_pd->dma_rx_id);
- if (rspi->chan_rx) {
- cfg.slave_id = rspi_pd->dma_rx_id;
- cfg.direction = DMA_DEV_TO_MEM;
- cfg.dst_addr = 0;
- cfg.src_addr = res->start + RSPI_SPDR;
- ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
- if (!ret)
- dev_info(&pdev->dev, "Use DMA when rx.\n");
- else
- return ret;
- }
+ rspi->chan_rx = rspi_request_dma_chan(&pdev->dev,
+ DMA_DEV_TO_MEM,
+ rspi_pd->dma_rx_id,
+ res->start + RSPI_SPDR);
+ if (!rspi->chan_rx)
+ return -ENODEV;
+
+ dev_info(&pdev->dev, "Use DMA when rx.\n");
}
if (rspi_pd->dma_tx_id) {
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
- (void *)rspi_pd->dma_tx_id);
- if (rspi->chan_tx) {
- cfg.slave_id = rspi_pd->dma_tx_id;
- cfg.direction = DMA_MEM_TO_DEV;
- cfg.dst_addr = res->start + RSPI_SPDR;
- cfg.src_addr = 0;
- ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
- if (!ret)
- dev_info(&pdev->dev, "Use DMA when tx\n");
- else
- return ret;
- }
+ rspi->chan_tx = rspi_request_dma_chan(&pdev->dev,
+ DMA_MEM_TO_DEV,
+ rspi_pd->dma_tx_id,
+ res->start + RSPI_SPDR);
+ if (!rspi->chan_tx)
+ return -ENODEV;
+
+ dev_info(&pdev->dev, "Use DMA when tx\n");
}
return 0;