From dbb459d93ca1611811a8c6095231746494b7eae6 Mon Sep 17 00:00:00 2001 From: Park Bumgyu Date: Thu, 13 Apr 2017 14:46:47 +0900 Subject: [PATCH] Revert "arm64: kill flush_cache_all()" This reverts commit 68234df4ea7939f98431aa81113fbdce10c4a84b. Signed-off-by: Park Bumgyu Change-Id: If0b171a071e9e2fe55f0f5dc73d8f963b594ca28 --- arch/arm64/include/asm/cacheflush.h | 5 ++ arch/arm64/include/asm/proc-fns.h | 4 ++ arch/arm64/include/asm/system_misc.h | 1 + arch/arm64/kernel/process.c | 12 ++++- arch/arm64/mm/cache.S | 73 ++++++++++++++++++++++++++++ arch/arm64/mm/flush.c | 1 + arch/arm64/mm/proc.S | 46 ++++++++++++++++++ 7 files changed, 141 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 76d1cc85d5b1..7c4a6aac7d68 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -40,6 +40,10 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT or ASID-tagged VIVT I-cache. * + * flush_cache_all() + * + * Unconditionally clean and invalidate the entire cache. + * * flush_cache_mm(mm) * * Clean and invalidate all user space cache entries @@ -65,6 +69,7 @@ * - kaddr - page address * - size - region size */ +extern void flush_cache_all(void); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 16cef2e8449e..9da52c2c6c5c 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -28,8 +28,12 @@ struct mm_struct; struct cpu_suspend_ctx; +extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); +void cpu_soft_restart(phys_addr_t cpu_reset, + unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 07aa8e3c5630..989879d195e5 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -43,6 +43,7 @@ struct mm_struct; extern void show_pte(unsigned long addr); extern void __show_regs(struct pt_regs *); +void soft_restart(unsigned long); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); #define show_unhandled_signals_ratelimited() \ diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6260752e1b4e..c624baac289e 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -66,6 +66,14 @@ unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif +void soft_restart(unsigned long addr) +{ + setup_mm_for_reboot(); + cpu_soft_restart(virt_to_phys(cpu_reset), addr); + /* Should never get here */ + BUG(); +} + /* * Function pointers to optional machine specific functions */ @@ -138,7 +146,9 @@ void machine_power_off(void) /* * Restart requires that the secondary CPUs stop performing any activity - * while the primary CPU resets the system. Systems with multiple CPUs must + * while the primary CPU resets the system. Systems with a single CPU can + * use soft_restart() as their machine descriptor's .restart hook, since that + * will cause the only available CPU to reset. Systems with multiple CPUs must * provide a HW restart implementation, to ensure that all CPUs reset at once. * This is required so that any code running after reset on the primary CPU * doesn't have to co-ordinate with other CPUs to ensure they aren't still diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 91464e7f77cc..0a3221c266ed 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -25,6 +25,79 @@ #include #include +/* + * __flush_dcache_all() + * + * Flush the whole D-cache. + * + * Corrupted registers: x0-x7, x9-x11 + */ +__flush_dcache_all: + dmb sy // ensure ordering with previous memory accesses + mrs x0, clidr_el1 // read clidr + and x3, x0, #0x7000000 // extract loc from clidr + lsr x3, x3, #23 // left align loc bit field + cbz x3, finished // if loc is 0, then no need to clean + mov x10, #0 // start clean at cache level 0 +loop1: + add x2, x10, x10, lsr #1 // work out 3x current cache level + lsr x1, x0, x2 // extract cache type bits from clidr + and x1, x1, #7 // mask of the bits for current cache only + cmp x1, #2 // see what cache we have at this level + b.lt skip // skip if no cache, or just i-cache + save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic + msr csselr_el1, x10 // select current cache level in csselr + isb // isb to sych the new cssr&csidr + mrs x1, ccsidr_el1 // read the new ccsidr + restore_irqs x9 + and x2, x1, #7 // extract the length of the cache lines + add x2, x2, #4 // add 4 (line length offset) + mov x4, #0x3ff + and x4, x4, x1, lsr #3 // find maximum number on the way size + clz w5, w4 // find bit position of way size increment + mov x7, #0x7fff + and x7, x7, x1, lsr #13 // extract max number of the index size +loop2: + mov x9, x4 // create working copy of max way size +loop3: + lsl x6, x9, x5 + orr x11, x10, x6 // factor way and cache number into x11 + lsl x6, x7, x2 + orr x11, x11, x6 // factor index number into x11 + dc cisw, x11 // clean & invalidate by set/way + subs x9, x9, #1 // decrement the way + b.ge loop3 + subs x7, x7, #1 // decrement the index + b.ge loop2 +skip: + add x10, x10, #2 // increment cache number + cmp x3, x10 + b.gt loop1 +finished: + mov x10, #0 // swith back to cache level 0 + msr csselr_el1, x10 // select current cache level in csselr + dsb sy + isb + ret +ENDPROC(__flush_dcache_all) + +/* + * flush_cache_all() + * + * Flush the entire cache system. The data cache flush is now achieved + * using atomic clean / invalidates working outwards from L1 cache. This + * is done using Set/Way based cache maintainance instructions. The + * instruction cache can still be invalidated back to the point of + * unification in a single instruction. + */ +ENTRY(flush_cache_all) + mov x12, lr + bl __flush_dcache_all + mov x0, #0 + ic ialluis // I+BTB cache invalidate + ret x12 +ENDPROC(flush_cache_all) + /* * flush_icache_range(start,end) * diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index e36ed5087b5c..3ede71cddd09 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -82,6 +82,7 @@ EXPORT_SYMBOL(flush_dcache_page); /* * Additional functions defined in assembly. */ +EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(flush_icache_range); #ifdef CONFIG_ARCH_HAS_PMEM_API diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 139320a7f7a2..8509d55628e0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -43,6 +43,52 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) +/* + * cpu_cache_off() + * + * Turn the CPU D-cache off. + */ +ENTRY(cpu_cache_off) + mrs x0, sctlr_el1 + bic x0, x0, #1 << 2 // clear SCTLR.C + msr sctlr_el1, x0 + isb + ret +ENDPROC(cpu_cache_off) + +/* + * cpu_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the same state + * as it would be if it had been reset, and branch to what would be the + * reset vector. It must be executed with the flat identity mapping. + * + * - loc - location to jump to for soft reset + */ + .align 5 +ENTRY(cpu_reset) + mrs x1, sctlr_el1 + bic x1, x1, #1 + msr sctlr_el1, x1 // disable the MMU + isb + ret x0 +ENDPROC(cpu_reset) + +ENTRY(cpu_soft_restart) + /* Save address of cpu_reset() and reset address */ + mov x19, x0 + mov x20, x1 + + /* Turn D-cache off */ + bl cpu_cache_off + + /* Push out all dirty data, and ensure cache is empty */ + bl flush_cache_all + + mov x0, x20 + ret x19 +ENDPROC(cpu_soft_restart) + /* * cpu_do_idle() * -- 2.20.1