diff --git a/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-f64-mul.ll b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-f64-mul.ll index 5334e2921461..dab66d0e37f9 100644 --- a/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-f64-mul.ll +++ b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-f64-mul.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s --mattr=+mve.fp,+fp64 -o - | FileCheck %s +; RUN: llc < %s --mattr=+mve.fp,+fp64 -o - -verify-machineinstrs | FileCheck %s target triple = "thumbv8.1m.main-none-none-eabi" @@ -60,68 +60,3 @@ entry: %interleaved.vec = shufflevector <2 x double> %5, <2 x double> %2, <4 x i32> ret <4 x double> %interleaved.vec } - -; Expected to not transform -define arm_aapcs_vfpcc <8 x double> @complex_mul_v8f64(<8 x double> %a, <8 x double> %b) { -; CHECK-LABEL: complex_mul_v8f64: -; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #64 -; CHECK-NEXT: sub sp, #64 -; CHECK-NEXT: add r0, sp, #128 -; CHECK-NEXT: vmov q7, q1 -; CHECK-NEXT: vldrw.u32 q4, [r0] -; CHECK-NEXT: add r0, sp, #160 -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vmov q6, q0 -; CHECK-NEXT: vmov q0, q2 -; CHECK-NEXT: add r0, sp, #176 -; CHECK-NEXT: vmov q5, q3 -; CHECK-NEXT: vstrw.32 q2, [sp, #32] @ 16-byte Spill -; CHECK-NEXT: vmul.f64 d5, d3, d0 -; CHECK-NEXT: vstrw.32 q1, [sp, #48] @ 16-byte Spill -; CHECK-NEXT: vstrw.32 q5, [sp] @ 16-byte Spill -; CHECK-NEXT: vmul.f64 d4, d1, d3 -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vmov q0, q5 -; CHECK-NEXT: add r0, sp, #144 -; CHECK-NEXT: vstrw.32 q1, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vmul.f64 d11, d3, d0 -; CHECK-NEXT: vmul.f64 d10, d1, d3 -; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmul.f64 d7, d9, d12 -; CHECK-NEXT: vmul.f64 d2, d15, d1 -; CHECK-NEXT: vmul.f64 d3, d1, d14 -; CHECK-NEXT: vmul.f64 d6, d13, d9 -; CHECK-NEXT: vfma.f64 d7, d8, d13 -; CHECK-NEXT: vfnms.f64 d6, d8, d12 -; CHECK-NEXT: vldrw.u32 q4, [sp, #32] @ 16-byte Reload -; CHECK-NEXT: vfma.f64 d3, d0, d15 -; CHECK-NEXT: vfnms.f64 d2, d0, d14 -; CHECK-NEXT: vldrw.u32 q0, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vfma.f64 d5, d0, d9 -; CHECK-NEXT: vfnms.f64 d4, d0, d8 -; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload -; CHECK-NEXT: vfma.f64 d11, d0, d9 -; CHECK-NEXT: vfnms.f64 d10, d0, d8 -; CHECK-NEXT: vmov q0, q3 -; CHECK-NEXT: vmov q3, q5 -; CHECK-NEXT: add sp, #64 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: bx lr -entry: - %a.real = shufflevector <8 x double> %a, <8 x double> poison, <4 x i32> - %a.imag = shufflevector <8 x double> %a, <8 x double> poison, <4 x i32> - %b.real = shufflevector <8 x double> %b, <8 x double> poison, <4 x i32> - %b.imag = shufflevector <8 x double> %b, <8 x double> poison, <4 x i32> - %0 = fmul fast <4 x double> %b.imag, %a.real - %1 = fmul fast <4 x double> %b.real, %a.imag - %2 = fadd fast <4 x double> %1, %0 - %3 = fmul fast <4 x double> %b.real, %a.real - %4 = fmul fast <4 x double> %a.imag, %b.imag - %5 = fsub fast <4 x double> %3, %4 - %interleaved.vec = shufflevector <4 x double> %5, <4 x double> %2, <8 x i32> - ret <8 x double> %interleaved.vec -}