These are now unused, and so can be removed.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
* DMA Cache Coherency
* ===================
*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- * - start - virtual start address
- * - end - virtual end address
- *
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- * - start - virtual start address
- * - end - virtual end address
- *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
- void (*dma_inv_range)(const void *, const void *);
- void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
};
*/
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
-#define dmac_inv_range cpu_cache.dma_inv_range
-#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
*/
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
-#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
-#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
-extern void dmac_inv_range(const void *, const void *);
-extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
#endif
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_dma_inv_range)
+fa_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_dma_clean_range)
+fa_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
.long fa_flush_kern_dcache_area
.long fa_dma_map_area
.long fa_dma_unmap_area
- .long fa_dma_inv_range
- .long fa_dma_clean_range
.long fa_dma_flush_range
.size fa_cache_fns, . - fa_cache_fns
ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
-/*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_inv_range)
- /* FALLTHROUGH */
-
/*
* dma_flush_range(start, end)
*
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
- /* FALLTHROUGH */
-
-/*
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_clean_range)
mov pc, lr
/*
*/
ENTRY(v3_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v3_dma_inv_range
+ bne v3_dma_flush_range
/* FALLTHROUGH */
/*
.long v3_flush_kern_dcache_area
.long v3_dma_map_area
.long v3_dma_unmap_area
- .long v3_dma_inv_range
- .long v3_dma_clean_range
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns
ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */
-/*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v4_dma_inv_range)
- /* FALLTHROUGH */
-
/*
* dma_flush_range(start, end)
*
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
- /* FALLTHROUGH */
-
-/*
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v4_dma_clean_range)
mov pc, lr
/*
*/
ENTRY(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v4_dma_inv_range
+ bne v4_dma_flush_range
/* FALLTHROUGH */
/*
.long v4_flush_kern_dcache_area
.long v4_dma_map_area
.long v4_dma_unmap_area
- .long v4_dma_inv_range
- .long v4_dma_clean_range
.long v4_dma_flush_range
.size v4_cache_fns, . - v4_cache_fns
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_inv_range)
+v4wb_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_clean_range)
+v4wb_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
.long v4wb_flush_kern_dcache_area
.long v4wb_dma_map_area
.long v4wb_dma_unmap_area
- .long v4wb_dma_inv_range
- .long v4wb_dma_clean_range
.long v4wb_dma_flush_range
.size v4wb_cache_fns, . - v4wb_cache_fns
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_dma_inv_range)
+v4wt_dma_inv_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- /* FALLTHROUGH */
-
-/*
- * dma_clean_range(start, end)
- *
- * Clean the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
.long v4wt_flush_kern_dcache_area
.long v4wt_dma_map_area
.long v4wt_dma_unmap_area
- .long v4wt_dma_inv_range
- .long v4wt_dma_clean_range
.long v4wt_dma_flush_range
.size v4wt_cache_fns, . - v4wt_cache_fns
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_inv_range)
+v6_dma_inv_range:
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_clean_range)
+v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
.long v6_flush_kern_dcache_area
.long v6_dma_map_area
.long v6_dma_unmap_area
- .long v6_dma_inv_range
- .long v6_dma_clean_range
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_inv_range)
+v7_dma_inv_range:
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_clean_range)
+v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
.long v7_flush_kern_dcache_area
.long v7_dma_map_area
.long v7_dma_unmap_area
- .long v7_dma_inv_range
- .long v7_dma_clean_range
.long v7_dma_flush_range
.size v7_cache_fns, . - v7_cache_fns
*
* (same as v4wb)
*/
-ENTRY(arm1020_dma_inv_range)
+arm1020_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
*
* (same as v4wb)
*/
-ENTRY(arm1020_dma_clean_range)
+arm1020_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
.long arm1020_flush_kern_dcache_area
.long arm1020_dma_map_area
.long arm1020_dma_unmap_area
- .long arm1020_dma_inv_range
- .long arm1020_dma_clean_range
.long arm1020_dma_flush_range
.align 5
*
* (same as v4wb)
*/
-ENTRY(arm1020e_dma_inv_range)
+arm1020e_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
*
* (same as v4wb)
*/
-ENTRY(arm1020e_dma_clean_range)
+arm1020e_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
.long arm1020e_flush_kern_dcache_area
.long arm1020e_dma_map_area
.long arm1020e_dma_unmap_area
- .long arm1020e_dma_inv_range
- .long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range
.align 5
*
* (same as v4wb)
*/
-ENTRY(arm1022_dma_inv_range)
+arm1022_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
*
* (same as v4wb)
*/
-ENTRY(arm1022_dma_clean_range)
+arm1022_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
.long arm1022_flush_kern_dcache_area
.long arm1022_dma_map_area
.long arm1022_dma_unmap_area
- .long arm1022_dma_inv_range
- .long arm1022_dma_clean_range
.long arm1022_dma_flush_range
.align 5
*
* (same as v4wb)
*/
-ENTRY(arm1026_dma_inv_range)
+arm1026_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
*
* (same as v4wb)
*/
-ENTRY(arm1026_dma_clean_range)
+arm1026_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
.long arm1026_flush_kern_dcache_area
.long arm1026_dma_map_area
.long arm1026_dma_unmap_area
- .long arm1026_dma_inv_range
- .long arm1026_dma_clean_range
.long arm1026_dma_flush_range
.align 5
*
* (same as v4wb)
*/
-ENTRY(arm920_dma_inv_range)
+arm920_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
*
* (same as v4wb)
*/
-ENTRY(arm920_dma_clean_range)
+arm920_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
.long arm920_flush_kern_dcache_area
.long arm920_dma_map_area
.long arm920_dma_unmap_area
- .long arm920_dma_inv_range
- .long arm920_dma_clean_range
.long arm920_dma_flush_range
#endif
*
* (same as v4wb)
*/
-ENTRY(arm922_dma_inv_range)
+arm922_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
*
* (same as v4wb)
*/
-ENTRY(arm922_dma_clean_range)
+arm922_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
.long arm922_flush_kern_dcache_area
.long arm922_dma_map_area
.long arm922_dma_unmap_area
- .long arm922_dma_inv_range
- .long arm922_dma_clean_range
.long arm922_dma_flush_range
#endif
*
* (same as v4wb)
*/
-ENTRY(arm925_dma_inv_range)
+arm925_dma_inv_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
*
* (same as v4wb)
*/
-ENTRY(arm925_dma_clean_range)
+arm925_dma_clean_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
.long arm925_flush_kern_dcache_area
.long arm925_dma_map_area
.long arm925_dma_unmap_area
- .long arm925_dma_inv_range
- .long arm925_dma_clean_range
.long arm925_dma_flush_range
ENTRY(cpu_arm925_dcache_clean_area)
*
* (same as v4wb)
*/
-ENTRY(arm926_dma_inv_range)
+arm926_dma_inv_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
*
* (same as v4wb)
*/
-ENTRY(arm926_dma_clean_range)
+arm926_dma_clean_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
.long arm926_flush_kern_dcache_area
.long arm926_dma_map_area
.long arm926_dma_unmap_area
- .long arm926_dma_inv_range
- .long arm926_dma_clean_range
.long arm926_dma_flush_range
ENTRY(cpu_arm926_dcache_clean_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_dma_inv_range)
+arm940_dma_inv_range:
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_dma_clean_range)
+arm940_dma_clean_range:
ENTRY(cpu_arm940_dcache_clean_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.long arm940_flush_kern_dcache_area
.long arm940_dma_map_area
.long arm940_dma_unmap_area
- .long arm940_dma_inv_range
- .long arm940_dma_clean_range
.long arm940_dma_flush_range
__INIT
* - end - virtual end address
* (same as arm926)
*/
-ENTRY(arm946_dma_inv_range)
+arm946_dma_inv_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
*
* (same as arm926)
*/
-ENTRY(arm946_dma_clean_range)
+arm946_dma_clean_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
.long arm946_flush_kern_dcache_area
.long arm946_dma_map_area
.long arm946_dma_unmap_area
- .long arm946_dma_inv_range
- .long arm946_dma_clean_range
.long arm946_dma_flush_range
* (same as v4wb)
*/
.align 5
-ENTRY(feroceon_dma_inv_range)
+feroceon_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
mov pc, lr
.align 5
-ENTRY(feroceon_range_dma_inv_range)
+feroceon_range_dma_inv_range:
mrs r2, cpsr
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
* (same as v4wb)
*/
.align 5
-ENTRY(feroceon_dma_clean_range)
+feroceon_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
mov pc, lr
.align 5
-ENTRY(feroceon_range_dma_clean_range)
+feroceon_range_dma_clean_range:
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
.long feroceon_flush_kern_dcache_area
.long feroceon_dma_map_area
.long feroceon_dma_unmap_area
- .long feroceon_dma_inv_range
- .long feroceon_dma_clean_range
.long feroceon_dma_flush_range
ENTRY(feroceon_range_cache_fns)
.long feroceon_range_flush_kern_dcache_area
.long feroceon_range_dma_map_area
.long feroceon_dma_unmap_area
- .long feroceon_range_dma_inv_range
- .long feroceon_range_dma_clean_range
.long feroceon_range_dma_flush_range
.align 5
*
* (same as v4wb)
*/
-ENTRY(mohawk_dma_inv_range)
+mohawk_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
*
* (same as v4wb)
*/
-ENTRY(mohawk_dma_clean_range)
+mohawk_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
.long mohawk_flush_kern_dcache_area
.long mohawk_dma_map_area
.long mohawk_dma_unmap_area
- .long mohawk_dma_inv_range
- .long mohawk_dma_clean_range
.long mohawk_dma_flush_range
ENTRY(cpu_mohawk_dcache_clean_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xsc3_dma_inv_range)
+xsc3_dma_inv_range:
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xsc3_dma_clean_range)
+xsc3_dma_clean_range:
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
.long xsc3_flush_kern_dcache_area
.long xsc3_dma_map_area
.long xsc3_dma_unmap_area
- .long xsc3_dma_inv_range
- .long xsc3_dma_clean_range
.long xsc3_dma_flush_range
ENTRY(cpu_xsc3_dcache_clean_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_dma_inv_range)
+xscale_dma_inv_range:
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_dma_clean_range)
+xscale_dma_clean_range:
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
.long xscale_flush_kern_dcache_area
.long xscale_dma_map_area
.long xscale_dma_unmap_area
- .long xscale_dma_inv_range
- .long xscale_dma_clean_range
.long xscale_dma_flush_range
/*
.long xscale_dma_a0_map_area
.long xscale_dma_unmap_area
.long xscale_dma_flush_range
- .long xscale_dma_clean_range
- .long xscale_dma_flush_range
ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry