From 0e18b5d7c6339311f1e32e7b186ae3556c5b6d33 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 29 Sep 2008 13:48:17 +0100 Subject: [PATCH] [ARM] dma: add validation of DMA params Validate the direction argument like x86 does. In addition, validate the dma_unmap_* parameters against those passed to dma_map_* when using the DMA bounce code. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 11 +++++++---- arch/arm/include/asm/dma-mapping.h | 8 ++++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 22aec95c9863..f030f0775be7 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -289,6 +289,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, if (buf) { BUG_ON(buf->size != size); + BUG_ON(buf->direction != dir); dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", @@ -334,7 +335,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, ptr, size, dir); - BUG_ON(dir == DMA_NONE); + BUG_ON(!valid_dma_direction(dir)); return map_single(dev, ptr, size, dir); } @@ -346,7 +347,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", __func__, page, offset, size, dir); - BUG_ON(dir == DMA_NONE); + BUG_ON(!valid_dma_direction(dir)); return map_single(dev, page_address(page) + offset, size, dir); } @@ -365,8 +366,6 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, (void *) dma_addr, size, dir); - BUG_ON(dir == DMA_NONE); - unmap_single(dev, dma_addr, size, dir); } EXPORT_SYMBOL(dma_unmap_single); @@ -383,6 +382,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, if (!buf) return 1; + BUG_ON(buf->direction != dir); + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); @@ -410,6 +411,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, if (!buf) return 1; + BUG_ON(buf->direction != dir); + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1532b7a6079d..2544a087c213 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -277,6 +277,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!arch_is_coherent()) dma_cache_maint(cpu_addr, size, dir); @@ -301,6 +303,8 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!arch_is_coherent()) dma_cache_maint(page_address(page) + offset, size, dir); @@ -370,6 +374,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) return; @@ -381,6 +387,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; -- 2.20.1