arm64: use ENDPIPROC() to annotate position independent assembler routines
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Thu, 8 Oct 2015 19:02:03 +0000 (20:02 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Mon, 12 Oct 2015 15:19:45 +0000 (16:19 +0100)
For more control over which functions are called with the MMU off or
with the UEFI 1:1 mapping active, annotate some assembler routines as
position independent. This is done by introducing ENDPIPROC(), which
replaces the ENDPROC() declaration of those routines.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/assembler.h
arch/arm64/lib/memchr.S
arch/arm64/lib/memcmp.S
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S
arch/arm64/lib/memset.S
arch/arm64/lib/strcmp.S
arch/arm64/lib/strlen.S
arch/arm64/lib/strncmp.S
arch/arm64/mm/cache.S

index b51f2cc22ca99731f2fa5efdced173f0a50b6c92..12eff928ef8b38dd18ae3bd157b12eb918f797a6 100644 (file)
@@ -193,4 +193,15 @@ lr .req    x30             // link register
        str     \src, [\tmp, :lo12:\sym]
        .endm
 
+/*
+ * Annotate a function as position independent, i.e., safe to be called before
+ * the kernel virtual mapping is activated.
+ */
+#define ENDPIPROC(x)                   \
+       .globl  __pi_##x;               \
+       .type   __pi_##x, %function;    \
+       .set    __pi_##x, x;            \
+       .size   __pi_##x, . - x;        \
+       ENDPROC(x)
+
 #endif /* __ASM_ASSEMBLER_H */
index 8636b7549163a20078734f26ad984e54a804d1c3..4444c1d25f4bb7217f540715e8cde1b27d28913d 100644 (file)
@@ -41,4 +41,4 @@ ENTRY(memchr)
        ret
 2:     mov     x0, #0
        ret
-ENDPROC(memchr)
+ENDPIPROC(memchr)
index 6ea0776ba6de1014c049c740fc041e30f33a5b2c..ffbdec00327d0463ca0bd608b7612e471a08347d 100644 (file)
@@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 )
 .Lret0:
        mov     result, #0
        ret
-ENDPROC(memcmp)
+ENDPIPROC(memcmp)
index 173a1aace9bbb97951f96e27c26f245ff7f0396b..36a6a62cf2633147cc87b0e44da78dd5468947bb 100644 (file)
@@ -71,4 +71,4 @@
 ENTRY(memcpy)
 #include "copy_template.S"
        ret
-ENDPROC(memcpy)
+ENDPIPROC(memcpy)
index 57b19ea2dad467d885f09991b902d0a52bd6f747..68e2f2035e232d4ddb6969c198a94cc74ed17e21 100644 (file)
@@ -194,4 +194,4 @@ ENTRY(memmove)
        tst     count, #0x3f
        b.ne    .Ltail63
        ret
-ENDPROC(memmove)
+ENDPIPROC(memmove)
index 7c72dfd36b6396a921b7d2b7d66e5880f8314d72..29f405f08792ba29f74ed3387a3b49ac7b61c5d2 100644 (file)
@@ -213,4 +213,4 @@ ENTRY(memset)
        ands    count, count, zva_bits_x
        b.ne    .Ltail_maybe_long
        ret
-ENDPROC(memset)
+ENDPIPROC(memset)
index 42f828b06c59a4daab35562e03aa70516a5d864a..471fe61760ef661213007542df37c7c8fdcb1a18 100644 (file)
@@ -231,4 +231,4 @@ CPU_BE(     orr     syndrome, diff, has_nul )
        lsr     data1, data1, #56
        sub     result, data1, data2, lsr #56
        ret
-ENDPROC(strcmp)
+ENDPIPROC(strcmp)
index 987b68b9ce4474bb9cfd44e77ef61d3a871d40f8..55ccc8e24c08440399034d41bf8aa04699e09812 100644 (file)
@@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
        csinv   data1, data1, xzr, le
        csel    data2, data2, data2a, le
        b       .Lrealigned
-ENDPROC(strlen)
+ENDPIPROC(strlen)
index 0224cf5a55334a297c1cc079f08644fc09707156..e267044761c6f2c1b4cadcba729e8d0dbe79f766 100644 (file)
@@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul )
 .Lret0:
        mov     result, #0
        ret
-ENDPROC(strncmp)
+ENDPIPROC(strncmp)
index eb48d5df4a0f7252462bd34b6209f27960d95e93..cfa44a6adc0ad5ec29f78228196b7e834b65df40 100644 (file)
@@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area)
        b.lo    1b
        dsb     sy
        ret
-ENDPROC(__flush_dcache_area)
+ENDPIPROC(__flush_dcache_area)
 
 /*
  *     __inval_cache_range(start, end)
@@ -131,7 +131,7 @@ __dma_inv_range:
        b.lo    2b
        dsb     sy
        ret
-ENDPROC(__inval_cache_range)
+ENDPIPROC(__inval_cache_range)
 ENDPROC(__dma_inv_range)
 
 /*
@@ -171,7 +171,7 @@ ENTRY(__dma_flush_range)
        b.lo    1b
        dsb     sy
        ret
-ENDPROC(__dma_flush_range)
+ENDPIPROC(__dma_flush_range)
 
 /*
  *     __dma_map_area(start, size, dir)
@@ -184,7 +184,7 @@ ENTRY(__dma_map_area)
        cmp     w2, #DMA_FROM_DEVICE
        b.eq    __dma_inv_range
        b       __dma_clean_range
-ENDPROC(__dma_map_area)
+ENDPIPROC(__dma_map_area)
 
 /*
  *     __dma_unmap_area(start, size, dir)
@@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area)
        cmp     w2, #DMA_TO_DEVICE
        b.ne    __dma_inv_range
        ret
-ENDPROC(__dma_unmap_area)
+ENDPIPROC(__dma_unmap_area)