*
* See Documentation/DMA-mapping.txt
*/
-dma_addr_t
+static dma_addr_t
sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
struct dma_attrs *attrs)
{
#endif
return SBA_IOVA(ioc, iovp, offset);
}
-EXPORT_SYMBOL(sba_map_single_attrs);
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
*
* See Documentation/DMA-mapping.txt
*/
-void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- int dir, struct dma_attrs *attrs)
+static void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+ int dir, struct dma_attrs *attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif /* DELAYED_RESOURCE_CNT == 0 */
}
-EXPORT_SYMBOL(sba_unmap_single_attrs);
/**
* sba_alloc_coherent - allocate/map shared mem for DMA
*
* See Documentation/DMA-mapping.txt
*/
-void *
+static void *
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
{
struct ioc *ioc;
*
* See Documentation/DMA-mapping.txt
*/
-void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
{
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size));
*
* See Documentation/DMA-mapping.txt
*/
-int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
- int dir, struct dma_attrs *attrs)
+static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
+ int nents, int dir, struct dma_attrs *attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
return filled;
}
-EXPORT_SYMBOL(sba_map_sg_attrs);
/**
* sba_unmap_sg_attrs - unmap Scatter/Gather list
*
* See Documentation/DMA-mapping.txt
*/
-void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
- int nents, int dir, struct dma_attrs *attrs)
+static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+ int nents, int dir, struct dma_attrs *attrs)
{
#ifdef ASSERT_PDIR_SANITY
struct ioc *ioc;
#endif
}
-EXPORT_SYMBOL(sba_unmap_sg_attrs);
/**************************************************************
*
return 1;
}
-int
-sba_dma_supported (struct device *dev, u64 mask)
+static int sba_dma_supported (struct device *dev, u64 mask)
{
/* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
-int
-sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
__setup("sbapagesize=",sba_page_override);
-EXPORT_SYMBOL(sba_dma_mapping_error);
-EXPORT_SYMBOL(sba_dma_supported);
-EXPORT_SYMBOL(sba_alloc_coherent);
-EXPORT_SYMBOL(sba_free_coherent);
-
struct dma_mapping_ops sba_dma_ops = {
.alloc_coherent = sba_alloc_coherent,
.free_coherent = sba_free_coherent,