arm64: use ENDPIPROC() to annotate position independent assembler routines
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Thu, 8 Oct 2015 19:02:03 +0000 (20:02 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Mon, 12 Oct 2015 15:19:45 +0000 (16:19 +0100)
For more control over which functions are called with the MMU off or
with the UEFI 1:1 mapping active, annotate some assembler routines as
position independent. This is done by introducing ENDPIPROC(), which
replaces the ENDPROC() declaration of those routines.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/assembler.h
arch/arm64/lib/memchr.S
arch/arm64/lib/memcmp.S
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S
arch/arm64/lib/memset.S
arch/arm64/lib/strcmp.S
arch/arm64/lib/strlen.S
arch/arm64/lib/strncmp.S
arch/arm64/mm/cache.S

index b51f2cc..12eff92 100644 (file)
@@ -193,4 +193,15 @@ lr .req    x30             // link register
        str     \src, [\tmp, :lo12:\sym]
        .endm
 
+/*
+ * Annotate a function as position independent, i.e., safe to be called before
+ * the kernel virtual mapping is activated.
+ */
+#define ENDPIPROC(x)                   \
+       .globl  __pi_##x;               \
+       .type   __pi_##x, %function;    \
+       .set    __pi_##x, x;            \
+       .size   __pi_##x, . - x;        \
+       ENDPROC(x)
+
 #endif /* __ASM_ASSEMBLER_H */
index 8636b75..4444c1d 100644 (file)
@@ -41,4 +41,4 @@ ENTRY(memchr)
        ret
 2:     mov     x0, #0
        ret
-ENDPROC(memchr)
+ENDPIPROC(memchr)
index 6ea0776..ffbdec0 100644 (file)
@@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 )
 .Lret0:
        mov     result, #0
        ret
-ENDPROC(memcmp)
+ENDPIPROC(memcmp)
index 173a1aa..36a6a62 100644 (file)
@@ -71,4 +71,4 @@
 ENTRY(memcpy)
 #include "copy_template.S"
        ret
-ENDPROC(memcpy)
+ENDPIPROC(memcpy)
index 57b19ea..68e2f20 100644 (file)
@@ -194,4 +194,4 @@ ENTRY(memmove)
        tst     count, #0x3f
        b.ne    .Ltail63
        ret
-ENDPROC(memmove)
+ENDPIPROC(memmove)
index 7c72dfd..29f405f 100644 (file)
@@ -213,4 +213,4 @@ ENTRY(memset)
        ands    count, count, zva_bits_x
        b.ne    .Ltail_maybe_long
        ret
-ENDPROC(memset)
+ENDPIPROC(memset)
index 42f828b..471fe61 100644 (file)
@@ -231,4 +231,4 @@ CPU_BE(     orr     syndrome, diff, has_nul )
        lsr     data1, data1, #56
        sub     result, data1, data2, lsr #56
        ret
-ENDPROC(strcmp)
+ENDPIPROC(strcmp)
index 987b68b..55ccc8e 100644 (file)
@@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
        csinv   data1, data1, xzr, le
        csel    data2, data2, data2a, le
        b       .Lrealigned
-ENDPROC(strlen)
+ENDPIPROC(strlen)
index 0224cf5..e267044 100644 (file)
@@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul )
 .Lret0:
        mov     result, #0
        ret
-ENDPROC(strncmp)
+ENDPIPROC(strncmp)
index eb48d5d..cfa44a6 100644 (file)
@@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area)
        b.lo    1b
        dsb     sy
        ret
-ENDPROC(__flush_dcache_area)
+ENDPIPROC(__flush_dcache_area)
 
 /*
  *     __inval_cache_range(start, end)
@@ -131,7 +131,7 @@ __dma_inv_range:
        b.lo    2b
        dsb     sy
        ret
-ENDPROC(__inval_cache_range)
+ENDPIPROC(__inval_cache_range)
 ENDPROC(__dma_inv_range)
 
 /*
@@ -171,7 +171,7 @@ ENTRY(__dma_flush_range)
        b.lo    1b
        dsb     sy
        ret
-ENDPROC(__dma_flush_range)
+ENDPIPROC(__dma_flush_range)
 
 /*
  *     __dma_map_area(start, size, dir)
@@ -184,7 +184,7 @@ ENTRY(__dma_map_area)
        cmp     w2, #DMA_FROM_DEVICE
        b.eq    __dma_inv_range
        b       __dma_clean_range
-ENDPROC(__dma_map_area)
+ENDPIPROC(__dma_map_area)
 
 /*
  *     __dma_unmap_area(start, size, dir)
@@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area)
        cmp     w2, #DMA_TO_DEVICE
        b.ne    __dma_inv_range
        ret
-ENDPROC(__dma_unmap_area)
+ENDPIPROC(__dma_unmap_area)