#ifndef _ALPHA_DMA_MAPPING_H
#define _ALPHA_DMA_MAPPING_H
-extern struct dma_map_ops *dma_ops;
+extern const struct dma_map_ops *dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return dma_ops;
}
return mask < 0x00ffffffUL ? 0 : 1;
}
-struct dma_map_ops alpha_noop_ops = {
+const struct dma_map_ops alpha_noop_ops = {
.alloc = alpha_noop_alloc_coherent,
.free = dma_noop_free_coherent,
.map_page = dma_noop_map_page,
.dma_supported = alpha_noop_supported,
};
-struct dma_map_ops *dma_ops = &alpha_noop_ops;
+const struct dma_map_ops *dma_ops = &alpha_noop_ops;
EXPORT_SYMBOL(dma_ops);
return dma_addr == 0;
}
-struct dma_map_ops alpha_pci_ops = {
+const struct dma_map_ops alpha_pci_ops = {
.alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page,
.dma_supported = alpha_pci_supported,
};
-struct dma_map_ops *dma_ops = &alpha_pci_ops;
+const struct dma_map_ops *dma_ops = &alpha_pci_ops;
EXPORT_SYMBOL(dma_ops);
#include <plat/dma.h>
#endif
-extern struct dma_map_ops arc_dma_ops;
+extern const struct dma_map_ops arc_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &arc_dma_ops;
}
return dma_mask == DMA_BIT_MASK(32);
}
-struct dma_map_ops arc_dma_ops = {
+const struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
.mmap = arc_dma_mmap,
return arm_dma_ops.set_dma_mask(dev, dma_mask);
}
-static struct dma_map_ops dmabounce_ops = {
+static const struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
#define ASMARM_DEVICE_H
struct dev_archdata {
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
#include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-extern struct dma_map_ops arm_dma_ops;
-extern struct dma_map_ops arm_coherent_dma_ops;
+extern const struct dma_map_ops arm_dma_ops;
+extern const struct dma_map_ops arm_coherent_dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
return __generic_dma_ops(dev);
}
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
BUG_ON(!dev);
dev->archdata.dma_ops = ops;
__dma_page_cpu_to_dev(page, offset, size, dir);
}
-struct dma_map_ops arm_dma_ops = {
+const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
-struct dma_map_ops arm_coherent_dma_ops = {
+const struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free,
.mmap = arm_coherent_dma_mmap,
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
__dma_page_cpu_to_dev(page, offset, size, dir);
}
-struct dma_map_ops iommu_ops = {
+const struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs,
.unmap_resource = arm_iommu_unmap_resource,
};
-struct dma_map_ops iommu_coherent_ops = {
+const struct dma_map_ops iommu_coherent_ops = {
.alloc = arm_coherent_iommu_alloc_attrs,
.free = arm_coherent_iommu_free_attrs,
.mmap = arm_coherent_iommu_mmap_attrs,
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
+static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
{
return coherent ? &iommu_coherent_ops : &iommu_ops;
}
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
-static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
}
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
-struct dma_map_ops *xen_dma_ops;
+const struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops);
-static struct dma_map_ops xen_swiotlb_dma_ops = {
+static const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
#define __ASM_DEVICE_H
struct dev_archdata {
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
#include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0)
-extern struct dma_map_ops dummy_dma_ops;
+extern const struct dma_map_ops dummy_dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &dummy_dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
return 1;
}
-static struct dma_map_ops swiotlb_dma_ops = {
+static const struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.mmap = __swiotlb_mmap,
return 0;
}
-struct dma_map_ops dummy_dma_ops = {
+const struct dma_map_ops dummy_dma_ops = {
.alloc = __dummy_alloc,
.free = __dummy_free,
.mmap = __dummy_mmap,
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}
-static struct dma_map_ops iommu_dma_ops = {
+static const struct dma_map_ops iommu_dma_ops = {
.alloc = __iommu_alloc_attrs,
.free = __iommu_free_attrs,
.mmap = __iommu_mmap_attrs,
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
int direction);
-extern struct dma_map_ops avr32_dma_ops;
+extern const struct dma_map_ops avr32_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &avr32_dma_ops;
}
dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
}
-struct dma_map_ops avr32_dma_ops = {
+const struct dma_map_ops avr32_dma_ops = {
.alloc = avr32_dma_alloc,
.free = avr32_dma_free,
.map_page = avr32_dma_map_page,
__dma_sync(addr, size, dir);
}
-extern struct dma_map_ops bfin_dma_ops;
+extern const struct dma_map_ops bfin_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &bfin_dma_ops;
}
_dma_sync(handle, size, dir);
}
-struct dma_map_ops bfin_dma_ops = {
+const struct dma_map_ops bfin_dma_ops = {
.alloc = bfin_dma_alloc,
.free = bfin_dma_free,
*/
#define DMA_ERROR_CODE ~0
-extern struct dma_map_ops c6x_dma_ops;
+extern const struct dma_map_ops c6x_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &c6x_dma_ops;
}
}
-struct dma_map_ops c6x_dma_ops = {
+const struct dma_map_ops c6x_dma_ops = {
.alloc = c6x_dma_alloc,
.free = c6x_dma_free,
.map_page = c6x_dma_map_page,
return 1;
}
-struct dma_map_ops v32_dma_ops = {
+const struct dma_map_ops v32_dma_ops = {
.alloc = v32_dma_alloc,
.free = v32_dma_free,
.map_page = v32_dma_map_page,
#define _ASM_CRIS_DMA_MAPPING_H
#ifdef CONFIG_PCI
-extern struct dma_map_ops v32_dma_ops;
+extern const struct dma_map_ops v32_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &v32_dma_ops;
}
#else
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
BUG();
return NULL;
extern unsigned long __nongprelbss dma_coherent_mem_start;
extern unsigned long __nongprelbss dma_coherent_mem_end;
-extern struct dma_map_ops frv_dma_ops;
+extern const struct dma_map_ops frv_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &frv_dma_ops;
}
return 1;
}
-struct dma_map_ops frv_dma_ops = {
+const struct dma_map_ops frv_dma_ops = {
.alloc = frv_dma_alloc,
.free = frv_dma_free,
.map_page = frv_dma_map_page,
return 1;
}
-struct dma_map_ops frv_dma_ops = {
+const struct dma_map_ops frv_dma_ops = {
.alloc = frv_dma_alloc,
.free = frv_dma_free,
.map_page = frv_dma_map_page,
#ifndef _H8300_DMA_MAPPING_H
#define _H8300_DMA_MAPPING_H
-extern struct dma_map_ops h8300_dma_map_ops;
+extern const struct dma_map_ops h8300_dma_map_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &h8300_dma_map_ops;
}
return nents;
}
-struct dma_map_ops h8300_dma_map_ops = {
+const struct dma_map_ops h8300_dma_map_ops = {
.alloc = dma_alloc,
.free = dma_free,
.map_page = map_page,
extern int bad_dma_address;
#define DMA_ERROR_CODE bad_dma_address
-extern struct dma_map_ops *dma_ops;
+extern const struct dma_map_ops *dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (unlikely(dev == NULL))
return NULL;
#include <linux/module.h>
#include <asm/page.h>
-struct dma_map_ops *dma_ops;
+const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
int bad_dma_address; /* globals are automatically initialized to zero */
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
-struct dma_map_ops hexagon_dma_ops = {
+const struct dma_map_ops hexagon_dma_ops = {
.alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent,
.map_sg = hexagon_map_sg,
#include <linux/export.h>
#include <asm/machvec.h>
-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
!sba_dma_ops.dma_supported(dev, *dev->dma_mask);
}
-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{
if (use_swiotlb(dev))
return &swiotlb_dma_ops;
/* This has to run before acpi_scan_init(). */
arch_initcall(acpi_sba_ioc_init_acpi);
-extern struct dma_map_ops swiotlb_dma_ops;
+extern const struct dma_map_ops swiotlb_dma_ops;
static int __init
sba_init(void)
__setup("sbapagesize=",sba_page_override);
-struct dma_map_ops sba_dma_ops = {
+const struct dma_map_ops sba_dma_ops = {
.alloc = sba_alloc_coherent,
.free = sba_free_coherent,
.map_page = sba_map_page,
#define DMA_ERROR_CODE 0
-extern struct dma_map_ops *dma_ops;
+extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
typedef u64 ia64_mv_dma_get_required_mask (struct device *);
-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
/*
* WARNING: The legacy I/O space is _architected_. Platforms are
# endif /* CONFIG_IA64_GENERIC */
extern void swiotlb_dma_init(void);
-extern struct dma_map_ops *dma_get_ops(struct device *);
+extern const struct dma_map_ops *dma_get_ops(struct device *);
/*
* Define default versions so we can extend machvec for new platforms without having
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly;
-struct dma_map_ops *dma_ops;
+const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
}
fs_initcall(dma_init);
-struct dma_map_ops *dma_get_ops(struct device *dev)
+const struct dma_map_ops *dma_get_ops(struct device *dev)
{
return dma_ops;
}
{
dma_ops = &intel_dma_ops;
- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
- dma_ops->sync_single_for_device = machvec_dma_sync_single;
- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
- dma_ops->dma_supported = iommu_dma_supported;
+ intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
+ intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
+ intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
+ intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
+ intel_dma_ops.dma_supported = iommu_dma_supported;
/*
* The order of these functions is important for
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
-struct dma_map_ops swiotlb_dma_ops = {
+const struct dma_map_ops swiotlb_dma_ops = {
.alloc = ia64_swiotlb_alloc_coherent,
.free = ia64_swiotlb_free_coherent,
.map_page = swiotlb_map_page,
* This file is released under the GPLv2
*/
struct dev_archdata {
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
#ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H
-extern struct dma_map_ops m68k_dma_ops;
+extern const struct dma_map_ops m68k_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &m68k_dma_ops;
}
return nents;
}
-struct dma_map_ops m68k_dma_ops = {
+const struct dma_map_ops m68k_dma_ops = {
.alloc = m68k_dma_alloc,
.free = m68k_dma_free,
.map_page = m68k_dma_map_page,
#ifndef _ASM_METAG_DMA_MAPPING_H
#define _ASM_METAG_DMA_MAPPING_H
-extern struct dma_map_ops metag_dma_ops;
+extern const struct dma_map_ops metag_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &metag_dma_ops;
}
dma_sync_for_device(sg_virt(sg), sg->length, direction);
}
-struct dma_map_ops metag_dma_ops = {
+const struct dma_map_ops metag_dma_ops = {
.alloc = metag_dma_alloc,
.free = metag_dma_free,
.map_page = metag_dma_map_page,
/*
* Available generic sets of operations
*/
-extern struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_direct_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &dma_direct_ops;
}
#endif
}
-struct dma_map_ops dma_direct_ops = {
+const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent,
}
struct octeon_dma_map_ops {
- struct dma_map_ops dma_map_ops;
+ const struct dma_map_ops dma_map_ops;
dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
};
},
};
-struct dma_map_ops *octeon_pci_dma_map_ops;
+const struct dma_map_ops *octeon_pci_dma_map_ops;
void __init octeon_pci_dma_init(void)
{
struct dev_archdata {
/* DMA operations on that device */
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMA_PERDEV_COHERENT
/* Non-zero if DMA is coherent with CPU caches */
#include <dma-coherence.h>
#endif
-extern struct dma_map_ops *mips_dma_map_ops;
+extern const struct dma_map_ops *mips_dma_map_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
struct dma_map_ops;
-extern struct dma_map_ops *octeon_pci_dma_map_ops;
+extern const struct dma_map_ops *octeon_pci_dma_map_ops;
extern char *octeon_swiotlb;
#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
extern char nlm_reset_entry[], nlm_reset_entry_end[];
/* SWIOTLB */
-extern struct dma_map_ops nlm_swiotlb_dma_ops;
+extern const struct dma_map_ops nlm_swiotlb_dma_ops;
extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask;
return daddr;
}
-static struct dma_map_ops loongson_dma_map_ops = {
+static const struct dma_map_ops loongson_dma_map_ops = {
.alloc = loongson_dma_alloc_coherent,
.free = loongson_dma_free_coherent,
.map_page = loongson_dma_map_page,
EXPORT_SYMBOL(dma_cache_sync);
-static struct dma_map_ops mips_default_dma_map_ops = {
+static const struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent,
.mmap = mips_dma_mmap,
.dma_supported = mips_dma_supported
};
-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
-struct dma_map_ops nlm_swiotlb_dma_ops = {
+const struct dma_map_ops nlm_swiotlb_dma_ops = {
.alloc = nlm_dma_alloc_coherent,
.free = nlm_dma_free_coherent,
.map_page = swiotlb_map_page,
#include <asm/cache.h>
#include <asm/io.h>
-extern struct dma_map_ops mn10300_dma_ops;
+extern const struct dma_map_ops mn10300_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &mn10300_dma_ops;
}
return 1;
}
-struct dma_map_ops mn10300_dma_ops = {
+const struct dma_map_ops mn10300_dma_ops = {
.alloc = mn10300_dma_alloc,
.free = mn10300_dma_free,
.map_page = mn10300_dma_map_page,
#ifndef _ASM_NIOS2_DMA_MAPPING_H
#define _ASM_NIOS2_DMA_MAPPING_H
-extern struct dma_map_ops nios2_dma_ops;
+extern const struct dma_map_ops nios2_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &nios2_dma_ops;
}
}
-struct dma_map_ops nios2_dma_ops = {
+const struct dma_map_ops nios2_dma_ops = {
.alloc = nios2_dma_alloc,
.free = nios2_dma_free,
.map_page = nios2_dma_map_page,
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-extern struct dma_map_ops or1k_dma_map_ops;
+extern const struct dma_map_ops or1k_dma_map_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &or1k_dma_map_ops;
}
mtspr(SPR_DCBFR, cl);
}
-struct dma_map_ops or1k_dma_map_ops = {
+const struct dma_map_ops or1k_dma_map_ops = {
.alloc = or1k_dma_alloc,
.free = or1k_dma_free,
.map_page = or1k_map_page,
*/
#ifdef CONFIG_PA11
-extern struct dma_map_ops pcxl_dma_ops;
-extern struct dma_map_ops pcx_dma_ops;
+extern const struct dma_map_ops pcxl_dma_ops;
+extern const struct dma_map_ops pcx_dma_ops;
#endif
-extern struct dma_map_ops *hppa_dma_ops;
+extern const struct dma_map_ops *hppa_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return hppa_dma_ops;
}
#include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */
-struct dma_map_ops *hppa_dma_ops __read_mostly;
+const struct dma_map_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
-struct dma_map_ops pcxl_dma_ops = {
+const struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc,
.free = pa11_dma_free,
return;
}
-struct dma_map_ops pcx_dma_ops = {
+const struct dma_map_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc = pcx_dma_alloc,
.free = pcx_dma_free,
*/
struct dev_archdata {
/* DMA operations on that device */
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
/*
* These two used to be a union. However, with the hybrid ops we need
#ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops;
#endif
-extern struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_direct_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
/* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now. The
return dev->archdata.dma_ops;
}
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
}
}
#ifdef CONFIG_PCI
-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
-extern struct dma_map_ops *get_pci_dma_ops(void);
+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
+extern const struct dma_map_ops *get_pci_dma_ops(void);
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
#include <linux/swiotlb.h>
-extern struct dma_map_ops swiotlb_dma_ops;
+extern const struct dma_map_ops swiotlb_dma_ops;
static inline void dma_mark_clean(void *addr, size_t size) {}
* map_page, and unmap_page on highmem, use normal dma_ops
* for everything else.
*/
-struct dma_map_ops swiotlb_dma_ops = {
+const struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_direct_alloc_coherent,
.free = __dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent,
}
#endif
-struct dma_map_ops dma_direct_ops = {
+const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent,
int __dma_set_mask(struct device *dev, u64 dma_mask)
{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask);
u64 __dma_get_required_mask(struct device *dev)
{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (unlikely(dma_ops == NULL))
return 0;
EXPORT_SYMBOL(isa_mem_base);
-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{
pci_dma_ops = dma_ops;
}
-struct dma_map_ops *get_pci_dma_ops(void)
+const struct dma_map_ops *get_pci_dma_ops(void)
{
return pci_dma_ops;
}
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
-static struct dma_map_ops dma_iommu_fixed_ops = {
+static const struct dma_map_ops dma_iommu_fixed_ops = {
.alloc = dma_fixed_alloc_coherent,
.free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg,
static u64 cell_dma_get_required_mask(struct device *dev)
{
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
if (!dev->dma_mask)
return 0;
return 0;
}
-static struct dma_map_ops dma_npu_ops = {
+static const struct dma_map_ops dma_npu_ops = {
.map_page = dma_npu_map_page,
.map_sg = dma_npu_map_sg,
.alloc = dma_npu_alloc,
return DMA_BIT_MASK(32);
}
-static struct dma_map_ops ps3_sb_dma_ops = {
+static const struct dma_map_ops ps3_sb_dma_ops = {
.alloc = ps3_alloc_coherent,
.free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg,
.unmap_page = ps3_unmap_page,
};
-static struct dma_map_ops ps3_ioc0_dma_ops = {
+static const struct dma_map_ops ps3_ioc0_dma_ops = {
.alloc = ps3_alloc_coherent,
.free = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg,
return DMA_BIT_MASK(64);
}
-static struct dma_map_ops ibmebus_dma_ops = {
+static const struct dma_map_ops ibmebus_dma_ops = {
.alloc = ibmebus_alloc_coherent,
.free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg,
return dma_iommu_ops.get_required_mask(dev);
}
-static struct dma_map_ops vio_dma_mapping_ops = {
+static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
.mmap = dma_direct_mmap_coherent,
* This file is released under the GPLv2
*/
struct dev_archdata {
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
-extern struct dma_map_ops s390_pci_dma_ops;
+extern const struct dma_map_ops s390_pci_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
}
fs_initcall(dma_debug_do_init);
-struct dma_map_ops s390_pci_dma_ops = {
+const struct dma_map_ops s390_pci_dma_ops = {
.alloc = s390_dma_alloc,
.free = s390_dma_free,
.map_sg = s390_dma_map_sg,
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
-extern struct dma_map_ops *dma_ops;
+extern const struct dma_map_ops *dma_ops;
extern void no_iommu_init(void);
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return dma_ops;
}
}
#endif
-struct dma_map_ops nommu_dma_ops = {
+const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
.map_page = nommu_map_page,
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
-struct dma_map_ops *dma_ops;
+const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
static int __init dma_init(void)
*/
}
-extern struct dma_map_ops *dma_ops;
-extern struct dma_map_ops *leon_dma_ops;
-extern struct dma_map_ops pci32_dma_ops;
+extern const struct dma_map_ops *dma_ops;
+extern const struct dma_map_ops *leon_dma_ops;
+extern const struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static struct dma_map_ops sun4u_dma_ops = {
+static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
};
-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
int dma_supported(struct device *dev, u64 device_mask)
BUG();
}
-static struct dma_map_ops sbus_dma_ops = {
+static const struct dma_map_ops sbus_dma_ops = {
.alloc = sbus_alloc_coherent,
.free = sbus_free_coherent,
.map_page = sbus_map_page,
}
}
-struct dma_map_ops pci32_dma_ops = {
+const struct dma_map_ops pci32_dma_ops = {
.alloc = pci32_alloc_coherent,
.free = pci32_free_coherent,
.map_page = pci32_map_page,
EXPORT_SYMBOL(pci32_dma_ops);
/* leon re-uses pci32_dma_ops */
-struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
+const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
EXPORT_SYMBOL(leon_dma_ops);
-struct dma_map_ops *dma_ops = &sbus_dma_ops;
+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops);
local_irq_restore(flags);
}
-static struct dma_map_ops sun4v_dma_ops = {
+static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
.map_page = dma_4v_map_page,
struct dev_archdata {
/* DMA operations on that device */
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
/* Offset of the DMA address from the PA. */
dma_addr_t dma_offset;
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
#endif
-extern struct dma_map_ops *tile_dma_map_ops;
-extern struct dma_map_ops *gx_pci_dma_map_ops;
-extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
-extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
+extern const struct dma_map_ops *tile_dma_map_ops;
+extern const struct dma_map_ops *gx_pci_dma_map_ops;
+extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
static inline void dma_mark_clean(void *addr, size_t size) {}
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
}
return 1;
}
-static struct dma_map_ops tile_default_dma_map_ops = {
+static const struct dma_map_ops tile_default_dma_map_ops = {
.alloc = tile_dma_alloc_coherent,
.free = tile_dma_free_coherent,
.map_page = tile_dma_map_page,
.dma_supported = tile_dma_supported
};
-struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
+const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
EXPORT_SYMBOL(tile_dma_map_ops);
/* Generic PCI DMA mapping functions */
return 1;
}
-static struct dma_map_ops tile_pci_default_dma_map_ops = {
+static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.alloc = tile_pci_dma_alloc_coherent,
.free = tile_pci_dma_free_coherent,
.map_page = tile_pci_dma_map_page,
.dma_supported = tile_pci_dma_supported
};
-struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
+const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
EXPORT_SYMBOL(gx_pci_dma_map_ops);
/* PCI DMA mapping functions for legacy PCI devices */
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
-static struct dma_map_ops pci_swiotlb_dma_ops = {
+static const struct dma_map_ops pci_swiotlb_dma_ops = {
.alloc = tile_swiotlb_alloc_coherent,
.free = tile_swiotlb_free_coherent,
.map_page = swiotlb_map_page,
.mapping_error = swiotlb_dma_mapping_error,
};
-static struct dma_map_ops pci_hybrid_dma_ops = {
+static const struct dma_map_ops pci_hybrid_dma_ops = {
.alloc = tile_swiotlb_alloc_coherent,
.free = tile_swiotlb_free_coherent,
.map_page = tile_pci_dma_map_page,
.dma_supported = tile_pci_dma_supported
};
-struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
-struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
+const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
+const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
#else
-struct dma_map_ops *gx_legacy_pci_dma_map_ops;
-struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
+const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
#endif
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
int dma_set_mask(struct device *dev, u64 mask)
{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
/*
* For PCI devices with 64-bit DMA addressing capability, promote
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
/*
* For PCI devices with 64-bit DMA addressing capability, promote
#include <asm/memory.h>
#include <asm/cacheflush.h>
-extern struct dma_map_ops swiotlb_dma_map_ops;
+extern const struct dma_map_ops swiotlb_dma_map_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &swiotlb_dma_map_ops;
}
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
-struct dma_map_ops swiotlb_dma_map_ops = {
+const struct dma_map_ops swiotlb_dma_map_ops = {
.alloc = unicore_swiotlb_alloc_coherent,
.free = unicore_swiotlb_free_coherent,
.map_sg = swiotlb_map_sg_attrs,
struct dev_archdata {
#ifdef CONFIG_X86_DEV_DMA_OPS
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
#endif
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
struct dma_domain {
struct list_head node;
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
int domain_nr;
};
void add_dma_domain(struct dma_domain *domain);
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
-extern struct dma_map_ops *dma_ops;
+extern const struct dma_map_ops *dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
#ifndef CONFIG_X86_DEV_DMA_OPS
return dma_ops;
#ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H
-extern struct dma_map_ops nommu_dma_ops;
+extern const struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int iommu_pass_through;
return -1;
}
-static struct dma_map_ops gart_dma_ops = {
+static const struct dma_map_ops gart_dma_ops = {
.map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg,
.map_page = gart_map_page,
free_pages((unsigned long)vaddr, get_order(size));
}
-static struct dma_map_ops calgary_dma_ops = {
+static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent,
.free = calgary_free_coherent,
.map_sg = calgary_map_sg,
static int forbid_dac __read_mostly;
-struct dma_map_ops *dma_ops = &nommu_dma_ops;
+const struct dma_map_ops *dma_ops = &nommu_dma_ops;
EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly;
int dma_supported(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
flush_write_buffers();
}
-struct dma_map_ops nommu_dma_ops = {
+const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
-static struct dma_map_ops swiotlb_dma_ops = {
+static const struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
.alloc = x86_swiotlb_alloc_coherent,
.free = x86_swiotlb_free_coherent,
}
/* We have our own dma_ops: the same as swiotlb but from alloc (above) */
-static struct dma_map_ops sta2x11_dma_ops = {
+static const struct dma_map_ops sta2x11_dma_ops = {
.alloc = sta2x11_swiotlb_alloc_coherent,
.free = x86_swiotlb_free_coherent,
.map_page = swiotlb_map_page,
int xen_swiotlb __read_mostly;
-static struct dma_map_ops xen_swiotlb_dma_ops = {
+static const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
struct dev_archdata {
/* DMA operations on that device */
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-extern struct dma_map_ops xtensa_dma_map_ops;
+extern const struct dma_map_ops xtensa_dma_map_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return 0;
}
-struct dma_map_ops xtensa_dma_map_ops = {
+const struct dma_map_ops xtensa_dma_map_ops = {
.alloc = xtensa_dma_alloc,
.free = xtensa_dma_free,
.map_page = xtensa_map_page,
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
-static struct dma_map_ops amd_iommu_dma_ops;
+static const struct dma_map_ops amd_iommu_dma_ops;
/*
* This struct contains device specific data for the IOMMU
return check_device(dev);
}
-static struct dma_map_ops amd_iommu_dma_ops = {
+static const struct dma_map_ops amd_iommu_dma_ops = {
.alloc = alloc_coherent,
.free = free_coherent,
.map_page = map_page,
}
struct mbus_device *
-mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct mbus_hw_ops *hw_ops, int index,
void __iomem *mmio_va)
{
}
struct scif_hw_dev *
-scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
struct mic_mw *mmio, struct mic_mw *aper, void *dp,
void __iomem *rdp, struct dma_chan **chan, int num_chan,
void scif_unregister_driver(struct scif_driver *driver);
struct scif_hw_dev *
scif_register_device(struct device *pdev, int id,
- struct dma_map_ops *dma_ops,
+ const struct dma_map_ops *dma_ops,
struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
struct mic_mw *mmio, struct mic_mw *aper,
void *dp, void __iomem *rdp,
vdev->dev.parent = pdev;
vdev->id.device = id;
vdev->id.vendor = VOP_DEV_ANY_ID;
- vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops;
+ vdev->dev.archdata.dma_ops = dma_ops;
vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
vdev->dev.release = vop_release_dev;
dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
}
-static struct dma_map_ops __mic_dma_ops = {
+static const struct dma_map_ops __mic_dma_ops = {
.alloc = __mic_dma_alloc,
.free = __mic_dma_free,
.map_page = __mic_dma_map_page,
mic_unmap_single(mdev, dma_addr, size);
}
-static struct dma_map_ops mic_dma_ops = {
+static const struct dma_map_ops mic_dma_ops = {
.map_page = mic_dma_map_page,
.unmap_page = mic_dma_unmap_page,
};
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
}
-static struct dma_map_ops ccio_ops = {
+static const struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported,
.alloc = ccio_alloc,
.free = ccio_free,
}
-static struct dma_map_ops sba_ops = {
+static const struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported,
.alloc = sba_alloc,
.free = sba_free,
return &vmd->dev->dev;
}
-static struct dma_map_ops *vmd_dma_ops(struct device *dev)
+static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
{
return get_dma_ops(to_vmd_dev(dev));
}
int is_phys;
};
-extern struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_noop_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
* dma dependent code. Code that depends on the dma-mapping
* API needs to set 'depends on HAS_DMA' in its Kconfig
*/
-extern struct dma_map_ops bad_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+extern const struct dma_map_ops bad_dma_ops;
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &bad_dma_ops;
}
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
struct scatterlist *s;
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_resource)
size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu)
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device)
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!ops);
void *cpu_addr, dma_addr_t dma_handle,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
WARN_ON(irqs_disabled());
#ifndef HAVE_ARCH_DMA_SUPPORTED
static inline int dma_supported(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
if (!ops)
return 0;
#ifndef HAVE_ARCH_DMA_SET_MASK
static inline int dma_set_mask(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->set_dma_mask)
return ops->set_dma_mask(dev, mask);
};
struct mbus_device *
-mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct mbus_hw_ops *hw_ops, int index,
void __iomem *mmio_va);
void mbus_unregister_device(struct mbus_device *mbdev);
return PARAVIRT_LAZY_NONE;
}
-extern struct dma_map_ops *xen_dma_ops;
+extern const struct dma_map_ops *xen_dma_ops;
#ifdef CONFIG_XEN
void __init xen_early_init(void);
return 1;
}
-struct dma_map_ops dma_noop_ops = {
+const struct dma_map_ops dma_noop_ops = {
.alloc = dma_noop_alloc,
.free = dma_noop_free,
.map_page = dma_noop_map_page,