powerpc/mm: Convert slb_finish_load[_1T] to local symbols
authorMichael Ellerman <mpe@ellerman.id.au>
Mon, 13 Feb 2017 04:26:40 +0000 (15:26 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 16 Feb 2017 23:58:51 +0000 (10:58 +1100)
slb_finish_load and slb_finish_load_1T are both only used within
slb_low.S, so make them local symbols.

This makes the code a little clearer, as it's more obvious neither is
intended to be an entry point from arbitrary other code, only the uses
in this file.

It also prevents them being used with kprobes and other tracing tools,
which is good because we're not able to safely take traps at these
locations, so making them local symbols avoids us needing to blacklist
them.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/slb_low.S

index e2974fcd20f17ba4811232a4cb77a69f2a9dbde8..9beed92c1900bb208ffed821c7d3d8f0b344e628 100644 (file)
@@ -71,9 +71,9 @@ slb_miss_kernel_load_linear:
 
 
 BEGIN_FTR_SECTION
-       b       slb_finish_load
+       b       .Lslb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
-       b       slb_finish_load_1T
+       b       .Lslb_finish_load_1T
 
 1:
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -109,9 +109,9 @@ slb_miss_kernel_load_io:
        addi    r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
 
 BEGIN_FTR_SECTION
-       b       slb_finish_load
+       b       .Lslb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
-       b       slb_finish_load_1T
+       b       .Lslb_finish_load_1T
 
 0:     /*
         * For userspace addresses, make sure this is region 0.
@@ -174,9 +174,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
        ld      r9,PACACONTEXTID(r13)
 BEGIN_FTR_SECTION
        cmpldi  r10,0x1000
-       bge     slb_finish_load_1T
+       bge     .Lslb_finish_load_1T
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       b       slb_finish_load
+       b       .Lslb_finish_load
 
 8:     /* invalid EA - return an error indication */
        crset   4*cr0+eq                /* indicate failure */
@@ -187,7 +187,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
  *
  * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  */
-slb_finish_load:
+.Lslb_finish_load:
        rldimi  r10,r9,ESID_BITS,0
        ASM_VSID_SCRAMBLE(r10,r9,256M)
        /*
@@ -256,7 +256,7 @@ slb_compare_rr_to_size:
  *
  * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
  */
-slb_finish_load_1T:
+.Lslb_finish_load_1T:
        srdi    r10,r10,(SID_SHIFT_1T - SID_SHIFT)      /* get 1T ESID */
        rldimi  r10,r9,ESID_BITS_1T,0
        ASM_VSID_SCRAMBLE(r10,r9,1T)