MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
P: Michael Kerrisk
M: mtk.manpages@gmail.com
--W: http://www.kernel.org/doc/man-pages
--S: Supported
++W: http://www.kernel.org/doc/man-pages
+++L: linux-man@vger.kernel.org
++S: Supported
MARVELL LIBERTAS WIRELESS DRIVER
P: Dan Williams
struct protection_domain *domain;
u16 devid;
phys_addr_t paddr;
+++ u64 dma_mask = dev->coherent_dma_mask;
+++
+++ if (!check_device(dev))
+++ return NULL;
+
+++ if (!get_device_resources(dev, &iommu, &domain, &devid))
+++ flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
++
+++ flag |= __GFP_ZERO;
virt_addr = (void *)__get_free_pages(flag, get_order(size));
if (!virt_addr)
return 0;
AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
--- static int need_flush; /* global flush state. set for each gart wrap */
+++ static bool need_flush; /* global flush state. set for each gart wrap */
-- static unsigned long alloc_iommu(struct device *dev, int size)
++ static unsigned long alloc_iommu(struct device *dev, int size,
++ unsigned long align_mask)
{
unsigned long offset, flags;
unsigned long boundary_size;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
-- size, base_index, boundary_size, 0);
++ size, base_index, boundary_size, align_mask);
if (offset == -1) {
--- need_flush = 1;
+++ need_flush = true;
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
-- size, base_index, boundary_size, 0);
++ size, base_index, boundary_size,
++ align_mask);
}
if (offset != -1) {
next_bit = offset+size;
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}
--- static dma_addr_t
--- gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
--- {
- dma_addr_t map;
- unsigned long align_mask;
-
- align_mask = (1UL << get_order(size)) - 1;
- map = dma_map_area(dev, paddr, size, dir, align_mask);
-- dma_addr_t map = dma_map_area(dev, paddr, size, dir);
---
--- flush_gart();
---
--- return map;
--- }
---
/* Map a single area into the IOMMU */
static dma_addr_t
gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
---#ifndef _ASM_X86_AMD_IOMMU_H
---#define _ASM_X86_AMD_IOMMU_H
+++#ifndef ASM_X86__AMD_IOMMU_H
+++#define ASM_X86__AMD_IOMMU_H
++
+++ #include <linux/irqreturn.h>
+
#ifdef CONFIG_AMD_IOMMU
extern int amd_iommu_init(void);
extern int amd_iommu_init_dma_ops(void);
return dma_ops;
else
return dev->archdata.dma_ops;
----#endif
++++#endif /* ASM_X86__DMA_MAPPING_H */
}
/* Make sure we keep the same behaviour */
return boot_cpu_data.x86_clflush_size;
}
--- #define dma_is_consistent(d, h) (1)
+++ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
+++ gfp_t gfp)
+++ {
+++ unsigned long dma_mask = 0;
++
- #include <asm-generic/dma-coherent.h>
- #endif /* ASM_X86__DMA_MAPPING_H */
+++ dma_mask = dev->coherent_dma_mask;
+++ if (!dma_mask)
+++ dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
+++
+++ return dma_mask;
+++ }
+++
+++ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
+++ {
+++ #ifdef CONFIG_X86_64
+++ unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
+++
+++ if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
+++ gfp |= GFP_DMA32;
+++ #endif
+++ return gfp;
+++ }
+++
+++ static inline void *
+++ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+++ gfp_t gfp)
+++ {
+++ struct dma_mapping_ops *ops = get_dma_ops(dev);
+++ void *memory;
+++
+++ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+++
+++ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+++ return memory;
+++
+++ if (!dev) {
+++ dev = &x86_dma_fallback_dev;
+++ gfp |= GFP_DMA;
+++ }
+++
+++ if (!is_device_dma_capable(dev))
+++ return NULL;
+++
+++ if (!ops->alloc_coherent)
+++ return NULL;
+++
+++ return ops->alloc_coherent(dev, size, dma_handle,
+++ dma_alloc_coherent_gfp_flags(dev, gfp));
+++ }
+++
+++ static inline void dma_free_coherent(struct device *dev, size_t size,
+++ void *vaddr, dma_addr_t bus)
+++ {
+++ struct dma_mapping_ops *ops = get_dma_ops(dev);
+++
+++ WARN_ON(irqs_disabled()); /* for portability */
+++
+++ if (dma_release_from_coherent(dev, get_order(size), vaddr))
+++ return;
+++
+++ if (ops->free_coherent)
+++ ops->free_coherent(dev, size, vaddr, bus);
+++ }
+
-- #include <asm-generic/dma-coherent.h>
+ #endif