switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
- dma_cache_inv(vaddr, size);
+ invalidate_dcache_region(vaddr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
- dma_cache_wback(vaddr, size);
+ clean_dcache_region(vaddr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- dma_cache_wback_inv(vaddr, size);
+ flush_dcache_region(vaddr, size);
break;
default:
BUG();
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
-EXPORT_SYMBOL(_dma_cache_wback);
-EXPORT_SYMBOL(_dma_cache_inv);
#endif /* CONFIG_DMA_NONCOHERENT */
buf = P2SEGADDR(buf);
/* Flush the dcache before we hand off the buffer */
- dma_cache_wback_inv((void *)buf, size);
+ __flush_purge_region((void *)buf, size);
return (void *)buf;
}
/*
* We must flush the cache before we pass it on to the device
*/
- dma_cache_wback_inv(ret, size);
+ __flush_purge_region(ret, size);
page = virt_to_page(ret);
free = page + (size >> PAGE_SHIFT);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
- dma_cache_inv(p1addr, size);
+ __flush_invalidate_region(p1addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
- dma_cache_wback(p1addr, size);
+ __flush_wback_region(p1addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- dma_cache_wback_inv(p1addr, size);
+ __flush_purge_region(p1addr, size);
break;
default:
BUG();
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/io.h>
if (vp != NULL) {
memset(vp, 0, size);
*dma_handle = virt_to_phys(ret);
- dma_cache_wback_inv((unsigned long)ret, size);
+ dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
}
return vp;
#endif
#define RTC_ALWAYS_BCD 0
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
/*
* Some mucking forons use if[n]def writeq to check if platform has it.
* It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
#define ioport_map(port, nr) ioremap(port, nr)
#define ioport_unmap(port) iounmap(port)
-#define dma_cache_wback_inv(_start, _size) \
- flush_dcache_region(_start, _size)
-#define dma_cache_inv(_start, _size) \
- invalidate_dcache_region(_start, _size)
-#define dma_cache_wback(_start, _size) \
- clean_dcache_region(_start, _size)
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
#define ioport_map(port, nr) ((void __iomem*)(port))
#define ioport_unmap(addr)
-#define dma_cache_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
-
/* Pages to physical address... */
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
extern void iounmap(void *addr);
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
/* H8/300 internal I/O functions */
static __inline__ unsigned char ctrl_inb(unsigned long addr)
{
extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
extern void memset_io(volatile void __iomem *s, int c, long n);
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
# endif /* __KERNEL__ */
/*
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
-
-/* m68k caches aren't DMA coherent */
-extern void dma_cache_wback_inv(unsigned long start, unsigned long size);
-extern void dma_cache_wback(unsigned long start, unsigned long size);
-extern void dma_cache_inv(unsigned long start, unsigned long size);
-
static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
__builtin_memset((void __force *) addr, val, count);
extern void iounmap(void *addr);
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
/* Pages to physical address... */
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
+ *
+ * This API used to be exported; it now is for arch code internal use only.
*/
#ifdef CONFIG_DMA_NONCOHERENT
/* IO Port space is : BBiiii where BB is HBA number. */
#define IO_SPACE_LIMIT 0x00ffffff
-
-#define dma_cache_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
-#define dma_cache_wback(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
-#define dma_cache_wback_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
-
/* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32
* bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit
* mode (essentially just sign extending. This macro takes in a 32
#define writeq writeq
#endif
-#ifdef CONFIG_NOT_COHERENT_CACHE
-
-#define dma_cache_inv(_start,_size) \
- invalidate_dcache_range(_start, (_start + _size))
-#define dma_cache_wback(_start,_size) \
- clean_dcache_range(_start, (_start + _size))
-#define dma_cache_wback_inv(_start,_size) \
- flush_dcache_range(_start, (_start + _size))
-
-#else /* CONFIG_NOT_COHERENT_CACHE */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
-#endif /* !CONFIG_NOT_COHERENT_CACHE */
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
#include <asm/mpc8260_pci9.h>
#endif
-#ifdef CONFIG_NOT_COHERENT_CACHE
-
-#define dma_cache_inv(_start,_size) \
- invalidate_dcache_range(_start, (_start + _size))
-#define dma_cache_wback(_start,_size) \
- clean_dcache_range(_start, (_start + _size))
-#define dma_cache_wback_inv(_start,_size) \
- flush_dcache_range(_start, (_start + _size))
-
-#else
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
-#endif
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
}
#endif
- dma_cache_wback_inv(addr, size);
+ __flush_purge_region(addr, size);
/* actual, physical DMA */
doing_pdma = 0;
#define iounmap(addr) \
__iounmap((addr))
-/*
- * The caches on some architectures aren't dma-coherent and have need to
- * handle this in software. There are three types of operations that
- * can be applied to dma buffers.
- *
- * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
- * writing the content of the caches back to memory, if necessary.
- * The function also invalidates the affected part of the caches as
- * necessary before DMA transfers from outside to memory.
- * - dma_cache_inv(start, size) invalidates the affected parts of the
- * caches. Dirty lines of the caches may be written back or simply
- * be discarded. This operation is necessary before dma operations
- * to the memory.
- * - dma_cache_wback(start, size) writes back any dirty lines but does
- * not invalidate the cache. This can be used before DMA reads from
- * memory,
- */
-
-#define dma_cache_wback_inv(_start,_size) \
- __flush_purge_region(_start,_size)
-#define dma_cache_inv(_start,_size) \
- __flush_invalidate_region(_start,_size)
-#define dma_cache_wback(_start,_size) \
- __flush_wback_region(_start,_size)
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
- dma_cache_wback_inv((unsigned long)vaddr, size);
+ unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
+ unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;
+
+ for (; s <= e; s += L1_CACHE_BYTES)
+ asm volatile ("ocbp %0, 0" : : "r" (s));
}
static inline dma_addr_t dma_map_single(struct device *dev,
unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
extern void onchip_unmap(unsigned long vaddr);
-/*
- * The caches on some architectures aren't dma-coherent and have need to
- * handle this in software. There are three types of operations that
- * can be applied to dma buffers.
- *
- * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
- * writing the content of the caches back to memory, if necessary.
- * The function also invalidates the affected part of the caches as
- * necessary before DMA transfers from outside to memory.
- * - dma_cache_inv(start, size) invalidates the affected parts of the
- * caches. Dirty lines of the caches may be written back or simply
- * be discarded. This operation is necessary before dma operations
- * to the memory.
- * - dma_cache_wback(start, size) writes back any dirty lines but does
- * not invalidate the cache. This can be used before DMA reads from
- * memory,
- */
-
-static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
-{
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbp %0, 0" : : "r" (s));
-}
-
-static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
-{
- // Note that caller has to be careful with overzealous
- // invalidation should there be partial cache lines at the extremities
- // of the specified range
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbi %0, 0" : : "r" (s));
-}
-
-static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
-{
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbwb %0, 0" : : "r" (s));
-}
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
#define RTC_PORT(x) (rtc_port + (x))
#define RTC_ALWAYS_BCD 0
-/* Nothing to do */
-/* P3: Only IDE DMA may need these. XXX Verify that it still does... */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
#endif
#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
#define sbus_iounmap(__addr, __size) \
release_region((unsigned long)(__addr), (__size))
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
}
-#define dma_cache_inv(_start,_size) flush_write_buffers()
-#define dma_cache_wback(_start,_size) flush_write_buffers()
-#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
-
#else
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-#define flush_write_buffers()
+#define flush_write_buffers() do { } while (0)
#endif
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
#define flush_write_buffers()
extern int iommu_bio_merge;