From: Ralf Baechle Date: Sun, 11 Jan 2009 18:44:49 +0000 (+0000) Subject: MIPS: Avoid destructive invalidation on partial cachelines. X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=a8ca8b64e3fdfec17679cba0ca5ce6e3ffed092d;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git MIPS: Avoid destructive invalidation on partial cachelines. See discussion e9c3a7c20901051031y528d0d31r18d44c5096c59e0@mail.gmail.com. Signed-off-by: Ralf Baechle --- diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6e99665ae860..c43f4b26a690 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); - else + else { + unsigned long lsize = cpu_scache_line_size(); + unsigned long almask = ~(lsize - 1); + + /* + * There is no clearly documented alignment requirement + * for the cache instruction on MIPS processors and + * some processors, among them the RM5200 and RM7000 + * QED processors will throw an address error for cache + * hit ops with insufficient alignment. Solved by + * aligning the address to cache line size. + */ + cache_op(Hit_Writeback_Inv_SD, addr & almask); + cache_op(Hit_Writeback_Inv_SD, + (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); + } return; } if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { + unsigned long lsize = cpu_dcache_line_size(); + unsigned long almask = ~(lsize - 1); + R4600_HIT_CACHEOP_WAR_IMPL; + cache_op(Hit_Writeback_Inv_D, addr & almask); + cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); }