summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2017-03-07 11:39:31 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2017-03-09 13:58:00 +1100
commit605df8d674ac65e044a0bf4998b28c2f350b7f9e (patch)
tree270e3a1e5f3b6bc8a843925cca86f53f21abe6b0 /tools
parent78b4416aa249365dd3c1b64da4d3a232014320b0 (diff)
downloadlinux-rpi-605df8d674ac65e044a0bf4998b28c2f350b7f9e.tar.gz
linux-rpi-605df8d674ac65e044a0bf4998b28c2f350b7f9e.tar.bz2
linux-rpi-605df8d674ac65e044a0bf4998b28c2f350b7f9e.zip
selftests/powerpc: Replace stxvx and lxvx with stxvd2x/lxvd2x
On POWER8 (ISA 2.07) lxvx and stxvx are defined to be extended mnemonics of lxvd2x and stxvd2x. For POWER9 (ISA 3.0) the HW architects in their infinite wisdom made lxvx and stxvx instructions in their own right. POWER9 aware GCC will use the POWER9 instruction for lxvx and stxvx causing these selftests to fail on POWER8. Further compounding the issue, because of the way -mvsx works it will cause the power9 instructions to be used regardless of -mcpu=power8 to GCC or -mpower8 to AS. The safest way to address the problem for now is to not use the extended mnemonic. We don't care how the CPU loads the values from memory since the tests only performs register comparisons, so using stdvd2x/lxvd2x does not impact the test. Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Acked-by: Balbir Singh<bsingharora@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/powerpc/include/vsx_asm.h48
1 files changed, 24 insertions, 24 deletions
diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h
index d828bfb6ef2d..54064ced9e95 100644
--- a/tools/testing/selftests/powerpc/include/vsx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vsx_asm.h
@@ -16,56 +16,56 @@
*/
FUNC_START(load_vsx)
li r5,0
- lxvx vs20,r5,r3
+ lxvd2x vs20,r5,r3
addi r5,r5,16
- lxvx vs21,r5,r3
+ lxvd2x vs21,r5,r3
addi r5,r5,16
- lxvx vs22,r5,r3
+ lxvd2x vs22,r5,r3
addi r5,r5,16
- lxvx vs23,r5,r3
+ lxvd2x vs23,r5,r3
addi r5,r5,16
- lxvx vs24,r5,r3
+ lxvd2x vs24,r5,r3
addi r5,r5,16
- lxvx vs25,r5,r3
+ lxvd2x vs25,r5,r3
addi r5,r5,16
- lxvx vs26,r5,r3
+ lxvd2x vs26,r5,r3
addi r5,r5,16
- lxvx vs27,r5,r3
+ lxvd2x vs27,r5,r3
addi r5,r5,16
- lxvx vs28,r5,r3
+ lxvd2x vs28,r5,r3
addi r5,r5,16
- lxvx vs29,r5,r3
+ lxvd2x vs29,r5,r3
addi r5,r5,16
- lxvx vs30,r5,r3
+ lxvd2x vs30,r5,r3
addi r5,r5,16
- lxvx vs31,r5,r3
+ lxvd2x vs31,r5,r3
blr
FUNC_END(load_vsx)
FUNC_START(store_vsx)
li r5,0
- stxvx vs20,r5,r3
+ stxvd2x vs20,r5,r3
addi r5,r5,16
- stxvx vs21,r5,r3
+ stxvd2x vs21,r5,r3
addi r5,r5,16
- stxvx vs22,r5,r3
+ stxvd2x vs22,r5,r3
addi r5,r5,16
- stxvx vs23,r5,r3
+ stxvd2x vs23,r5,r3
addi r5,r5,16
- stxvx vs24,r5,r3
+ stxvd2x vs24,r5,r3
addi r5,r5,16
- stxvx vs25,r5,r3
+ stxvd2x vs25,r5,r3
addi r5,r5,16
- stxvx vs26,r5,r3
+ stxvd2x vs26,r5,r3
addi r5,r5,16
- stxvx vs27,r5,r3
+ stxvd2x vs27,r5,r3
addi r5,r5,16
- stxvx vs28,r5,r3
+ stxvd2x vs28,r5,r3
addi r5,r5,16
- stxvx vs29,r5,r3
+ stxvd2x vs29,r5,r3
addi r5,r5,16
- stxvx vs30,r5,r3
+ stxvd2x vs30,r5,r3
addi r5,r5,16
- stxvx vs31,r5,r3
+ stxvd2x vs31,r5,r3
blr
FUNC_END(store_vsx)