static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *daddr, gfp_t gfp)
{
- return dma_ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *caddr, dma_addr_t daddr)
{
- dma_ops->free_coherent(dev, size, caddr, daddr);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ ops->free_coherent(dev, size, caddr, daddr);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- return dma_ops->map_single_attrs(dev, caddr, size, dir, attrs);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ return ops->map_single_attrs(dev, caddr, size, dir, attrs);
}
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- dma_ops->unmap_single_attrs(dev, daddr, size, dir, attrs);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ ops->unmap_single_attrs(dev, daddr, size, dir, attrs);
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- return dma_ops->map_sg_attrs(dev, sgl, nents, dir, attrs);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ return ops->map_sg_attrs(dev, sgl, nents, dir, attrs);
}
static inline void dma_unmap_sg_attrs(struct device *dev,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- dma_ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs);
}
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
size_t size,
enum dma_data_direction dir)
{
- dma_ops->sync_single_for_cpu(dev, daddr, size, dir);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_single_for_cpu(dev, daddr, size, dir);
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
- dma_ops->sync_sg_for_cpu(dev, sgl, nents, dir);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_sg_for_cpu(dev, sgl, nents, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_ops->sync_single_for_device(dev, daddr, size, dir);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_single_for_device(dev, daddr, size, dir);
}
static inline void dma_sync_sg_for_device(struct device *dev,
int nents,
enum dma_data_direction dir)
{
- dma_ops->sync_sg_for_device(dev, sgl, nents, dir);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_sg_for_device(dev, sgl, nents, dir);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
{
- return dma_ops->mapping_error(dev, daddr);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ return ops->mapping_error(dev, daddr);
}
#define dma_map_page(dev, pg, off, size, dir) \
static inline int dma_supported(struct device *dev, u64 mask)
{
- return dma_ops->dma_supported_op(dev, mask);
+ struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
+ return ops->dma_supported_op(dev, mask);
}
static inline int
#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
-static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
-{
- return dma_ops;
-}
-
#endif /* _ASM_IA64_DMA_MAPPING_H */
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
+typedef struct dma_mapping_ops *ia64_mv_dma_get_ops(struct device *);
/*
* WARNING: The legacy I/O space is _architected_. Platforms are
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init
+# define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init;
+ ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq;
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
platform_global_tlb_purge, \
platform_tlb_migrate_finish, \
platform_dma_init, \
+ platform_dma_get_ops, \
platform_irq_to_vector, \
platform_local_vector_to_irq, \
platform_pci_get_legacy_mem, \
# endif /* CONFIG_IA64_GENERIC */
extern void swiotlb_dma_init(void);
+extern struct dma_mapping_ops *dma_get_ops(struct device *);
/*
* Define default versions so we can extend machvec for new platforms without having
#ifndef platform_dma_init
# define platform_dma_init swiotlb_dma_init
#endif
+#ifndef platform_dma_get_ops
+# define platform_dma_get_ops dma_get_ops
+#endif
#ifndef platform_irq_to_vector
# define platform_irq_to_vector __ia64_irq_to_vector
#endif