From: Vineet Gupta Date: Fri, 29 Aug 2014 05:25:15 +0000 (+0530) Subject: ARCv2: mm: Implement cache region flush operations X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=0d77117fc5c0333d024a183d6790167bb90c3b62;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git ARCv2: mm: Implement cache region flush operations These are more efficient than the per-line ops Signed-off-by: Vineet Gupta --- diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 5008021fba98..16e457706129 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -62,6 +62,8 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_IC_BCR 0x77 /* Build Config reg */ #define ARC_REG_IC_IVIC 0x10 #define ARC_REG_IC_CTRL 0x11 +#define ARC_REG_IC_IVIR 0x16 +#define ARC_REG_IC_ENDR 0x17 #define ARC_REG_IC_IVIL 0x19 #define ARC_REG_IC_PTAG 0x1E #define ARC_REG_IC_PTAG_HI 0x1F @@ -76,6 +78,8 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_DC_IVDL 0x4A #define ARC_REG_DC_FLSH 0x4B #define ARC_REG_DC_FLDL 0x4C +#define ARC_REG_DC_STARTR 0x4D +#define ARC_REG_DC_ENDR 0x4E #define ARC_REG_DC_PTAG 0x5C #define ARC_REG_DC_PTAG_HI 0x5F @@ -83,6 +87,8 @@ extern unsigned long perip_base, perip_end; #define DC_CTRL_DIS 0x001 #define DC_CTRL_INV_MODE_FLUSH 0x040 #define DC_CTRL_FLUSH_STATUS 0x100 +#define DC_CTRL_RGN_OP_INV 0x200 +#define DC_CTRL_RGN_OP_MSK 0xE00 /*System-level cache (L2 cache) related Auxiliary registers */ #define ARC_REG_SLC_CFG 0x901 diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 18132eb56150..8401fcb75d19 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -21,6 +21,10 @@ #include #include +#ifdef CONFIG_ISA_ARCV2 +#define USE_RGN_FLSH 1 +#endif + static int l2_line_sz; static int ioc_exists; int slc_enable = 1, ioc_enable = 1; @@ -332,6 +336,8 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, } } +#ifndef USE_RGN_FLSH + /* * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT * Here's how cache ops are implemented @@ -394,6 +400,68 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, } } +#else + +/* + * optimized flush operation which takes a region as opposed to iterating per line + */ +static inline +void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, + unsigned long sz, const int op, const int full_page) +{ + const unsigned int ctl = ARC_REG_DC_CTRL; + unsigned int s, e, val; + + /* Only for Non aliasing I-cache in HS38 */ + if (op == OP_INV_IC) { + s = ARC_REG_IC_IVIR; + e = ARC_REG_IC_ENDR; + } else { + s = ARC_REG_DC_STARTR; + e = ARC_REG_DC_ENDR; + } + + if (!full_page) { + /* for any leading gap between @paddr and start of cache line */ + sz += paddr & ~CACHE_LINE_MASK; + paddr &= CACHE_LINE_MASK; + + /* + * account for any trailing gap to end of cache line + * this is equivalent to DIV_ROUND_UP() in line ops above + */ + sz += L1_CACHE_BYTES - 1; + } + + if (is_pae40_enabled()) { + /* TBD: check if crossing 4TB boundary */ + if (op == OP_INV_IC) + write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); + else + write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); + } + + /* + * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1 + * Flush-n-invalidate additionally uses setting DC_CTRL.IM = 1 + * just as for line ops which is handled in __before_dc_op() + */ + val = read_aux_reg(ctl) & ~DC_CTRL_RGN_OP_MSK; + + if (op & OP_INV) + val |= DC_CTRL_RGN_OP_INV; + + write_aux_reg(ctl, val); + + /* ENDR needs to be set ahead of START */ + write_aux_reg(e, paddr + sz); /* ENDR is exclusive */ + write_aux_reg(s, paddr); + + /* caller waits on DC_CTRL.FS */ +} + +#endif + #if (CONFIG_ARC_MMU_VER < 3) #define __cache_line_loop __cache_line_loop_v2 #elif (CONFIG_ARC_MMU_VER == 3)