selftests/powerpc: Replace stxvx and lxvx with stxvd2x/lxvd2x
authorCyril Bur <cyrilbur@gmail.com>
Tue, 7 Mar 2017 00:39:31 +0000 (11:39 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 9 Mar 2017 02:58:00 +0000 (13:58 +1100)
On POWER8 (ISA 2.07) lxvx and stxvx are defined to be extended mnemonics
of lxvd2x and stxvd2x. For POWER9 (ISA 3.0) the HW architects in their
infinite wisdom made lxvx and stxvx instructions in their own right.

POWER9 aware GCC will use the POWER9 instruction for lxvx and stxvx
causing these selftests to fail on POWER8. Further compounding the
issue, because of the way -mvsx works it will cause the power9
instructions to be used regardless of -mcpu=power8 to GCC or -mpower8 to
AS.

The safest way to address the problem for now is to not use the extended
mnemonic. We don't care how the CPU loads the values from memory since
the tests only performs register comparisons, so using stdvd2x/lxvd2x
does not impact the test.

Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Acked-by: Balbir Singh<bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
tools/testing/selftests/powerpc/include/vsx_asm.h

index d828bfb6ef2d9a55f5752352458bca0ab1958549..54064ced9e95b3c66e57748f82ddf65aca91e94b 100644 (file)
  */
 FUNC_START(load_vsx)
        li      r5,0
-       lxvx    vs20,r5,r3
+       lxvd2x  vs20,r5,r3
        addi    r5,r5,16
-       lxvx    vs21,r5,r3
+       lxvd2x  vs21,r5,r3
        addi    r5,r5,16
-       lxvx    vs22,r5,r3
+       lxvd2x  vs22,r5,r3
        addi    r5,r5,16
-       lxvx    vs23,r5,r3
+       lxvd2x  vs23,r5,r3
        addi    r5,r5,16
-       lxvx    vs24,r5,r3
+       lxvd2x  vs24,r5,r3
        addi    r5,r5,16
-       lxvx    vs25,r5,r3
+       lxvd2x  vs25,r5,r3
        addi    r5,r5,16
-       lxvx    vs26,r5,r3
+       lxvd2x  vs26,r5,r3
        addi    r5,r5,16
-       lxvx    vs27,r5,r3
+       lxvd2x  vs27,r5,r3
        addi    r5,r5,16
-       lxvx    vs28,r5,r3
+       lxvd2x  vs28,r5,r3
        addi    r5,r5,16
-       lxvx    vs29,r5,r3
+       lxvd2x  vs29,r5,r3
        addi    r5,r5,16
-       lxvx    vs30,r5,r3
+       lxvd2x  vs30,r5,r3
        addi    r5,r5,16
-       lxvx    vs31,r5,r3
+       lxvd2x  vs31,r5,r3
        blr
 FUNC_END(load_vsx)
 
 FUNC_START(store_vsx)
        li      r5,0
-       stxvx   vs20,r5,r3
+       stxvd2x vs20,r5,r3
        addi    r5,r5,16
-       stxvx   vs21,r5,r3
+       stxvd2x vs21,r5,r3
        addi    r5,r5,16
-       stxvx   vs22,r5,r3
+       stxvd2x vs22,r5,r3
        addi    r5,r5,16
-       stxvx   vs23,r5,r3
+       stxvd2x vs23,r5,r3
        addi    r5,r5,16
-       stxvx   vs24,r5,r3
+       stxvd2x vs24,r5,r3
        addi    r5,r5,16
-       stxvx   vs25,r5,r3
+       stxvd2x vs25,r5,r3
        addi    r5,r5,16
-       stxvx   vs26,r5,r3
+       stxvd2x vs26,r5,r3
        addi    r5,r5,16
-       stxvx   vs27,r5,r3
+       stxvd2x vs27,r5,r3
        addi    r5,r5,16
-       stxvx   vs28,r5,r3
+       stxvd2x vs28,r5,r3
        addi    r5,r5,16
-       stxvx   vs29,r5,r3
+       stxvd2x vs29,r5,r3
        addi    r5,r5,16
-       stxvx   vs30,r5,r3
+       stxvd2x vs30,r5,r3
        addi    r5,r5,16
-       stxvx   vs31,r5,r3
+       stxvd2x vs31,r5,r3
        blr
 FUNC_END(store_vsx)