From 3eeb5156362bd756859e8c84ceb2c22e1d4ef652 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sun, 27 Aug 2017 16:55:32 +0530 Subject: [PATCH] dmaengine: remove BUG_ON while registering devices DMAengine core has BUG_ON to check for mandatory operations and ones based on capabilities, but they use BUG_ON, so remove and move to error returns and logging the errors gracefully Acked-by: Dan Williams Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 101 +++++++++++++++++++++++++++++++--------- 1 file changed, 79 insertions(+), 22 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 428b1414263a..b451354735d3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -923,28 +923,85 @@ int dma_async_device_register(struct dma_device *device) return -ENODEV; /* validate device routines */ - BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && - !device->device_prep_dma_memcpy); - BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && - !device->device_prep_dma_xor); - BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && - !device->device_prep_dma_xor_val); - BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && - !device->device_prep_dma_pq); - BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && - !device->device_prep_dma_pq_val); - BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && - !device->device_prep_dma_memset); - BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && - !device->device_prep_dma_interrupt); - BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && - !device->device_prep_dma_cyclic); - BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && - !device->device_prep_interleaved_dma); - - BUG_ON(!device->device_tx_status); - BUG_ON(!device->device_issue_pending); - BUG_ON(!device->dev); + if (!device->dev) { + pr_err("DMAdevice must have dev\n"); + return -EIO; + } + + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMCPY"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMSET"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERRUPT"); + return -EIO; + } + + if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_CYCLIC"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERLEAVE"); + return -EIO; + } + + + if (!device->device_tx_status) { + dev_err(device->dev, "Device tx_status is not defined\n"); + return -EIO; + } + + + if (!device->device_issue_pending) { + dev_err(device->dev, "Device issue_pending is not defined\n"); + return -EIO; + } /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case -- 2.20.1