ARM: mm: use inner-shareable barriers for TLB and user cache operations
authorWill Deacon <will.deacon@arm.com>
Mon, 13 May 2013 11:01:12 +0000 (12:01 +0100)
committerWill Deacon <will.deacon@arm.com>
Mon, 12 Aug 2013 11:25:45 +0000 (12:25 +0100)
System-wide barriers aren't required for situations where we only need
to make visibility and ordering guarantees in the inner-shareable domain
(i.e. we are not dealing with devices or potentially incoherent CPUs).

This patch changes the v7 TLB operations, coherent_user_range and
dcache_clean_area functions to user inner-shareable barriers. For cache
maintenance, only the store access type is required to ensure completion.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm/mm/cache-v7.S
arch/arm/mm/proc-v7.S
arch/arm/mm/tlb-v7.S

index 515b00064da8f66db5400c3990905f7ad89e2113..b5c467a65c271a8c8538defc2880b0689067d046 100644 (file)
@@ -282,7 +282,7 @@ ENTRY(v7_coherent_user_range)
        add     r12, r12, r2
        cmp     r12, r1
        blo     1b
-       dsb
+       dsb     ishst
        icache_line_size r2, r3
        sub     r3, r2, #1
        bic     r12, r0, r3
@@ -294,7 +294,7 @@ ENTRY(v7_coherent_user_range)
        mov     r0, #0
        ALT_SMP(mcr     p15, 0, r0, c7, c1, 6)  @ invalidate BTB Inner Shareable
        ALT_UP(mcr      p15, 0, r0, c7, c5, 6)  @ invalidate BTB
-       dsb
+       dsb     ishst
        isb
        mov     pc, lr
 
index 73398bcf9bd8ea8d7293803828cb837306742fb7..0b5462a941a6c30a58f0927c19c9e5c0f3879174 100644 (file)
@@ -83,7 +83,7 @@ ENTRY(cpu_v7_dcache_clean_area)
        add     r0, r0, r2
        subs    r1, r1, r2
        bhi     2b
-       dsb
+       dsb     ishst
        mov     pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)
 
index ea94765acf9a3650f5cb850f6990893f3ccb459b..355308767bae69bf29044d4c85ef30ec3a03b56b 100644 (file)
@@ -35,7 +35,7 @@
 ENTRY(v7wbi_flush_user_tlb_range)
        vma_vm_mm r3, r2                        @ get vma->vm_mm
        mmid    r3, r3                          @ get vm_mm->context.id
-       dsb
+       dsb     ish
        mov     r0, r0, lsr #PAGE_SHIFT         @ align address
        mov     r1, r1, lsr #PAGE_SHIFT
        asid    r3, r3                          @ mask ASID
@@ -56,7 +56,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       dsb
+       dsb     ish
        mov     pc, lr
 ENDPROC(v7wbi_flush_user_tlb_range)
 
@@ -69,7 +69,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
  *     - end   - end address (exclusive, may not be aligned)
  */
 ENTRY(v7wbi_flush_kern_tlb_range)
-       dsb
+       dsb     ish
        mov     r0, r0, lsr #PAGE_SHIFT         @ align address
        mov     r1, r1, lsr #PAGE_SHIFT
        mov     r0, r0, lsl #PAGE_SHIFT
@@ -84,7 +84,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       dsb
+       dsb     ish
        isb
        mov     pc, lr
 ENDPROC(v7wbi_flush_kern_tlb_range)