From 50b23dc21a754223a20391dedfc7f983d114a461 Mon Sep 17 00:00:00 2001 From: zhoujing Date: Tue, 1 Aug 2023 13:25:24 +0800 Subject: [PATCH] [VENTUS][fix] Deprecating vmv.s.x and use vmv.v.x instead As required, vmv.s.x instruction may will later be deprecated --- llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp | 8 ++++---- llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 2 +- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 ++++-- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 2 +- llvm/lib/Target/RISCV/VentusInstrInfoV.td | 7 ++----- llvm/lib/Target/RISCV/VentusInstrInfoVPseudos.td | 2 +- llvm/test/CodeGen/RISCV/VentusGPGPU/resource-usage.ll | 1 + .../test/CodeGen/RISCV/VentusGPGPU/select_instructions.ll | 8 ++++---- 8 files changed, 18 insertions(+), 18 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp index 8bfabdbdc3c6..556d59a8ed8e 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp @@ -86,8 +86,8 @@ bool RISCVExpandAtomicPseudo::expandMBB(MachineBasicBlock &MBB) { bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI) { - // RISCVInstrInfo::getInstSizeInBytes expects that the total size of the - // expanded instructions for each pseudo is correct in the Size field of the + // RISCVInstrInfo::getInstSizeInBytes expects that the total size of the + // expanded instructions for each pseudo is correct in the Size field of the // tablegen definition for the pseudo. switch (MBBI->getOpcode()) { case RISCV::PseudoAtomicLoadNand32: @@ -623,7 +623,7 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg( .addReg(NewValReg); MCRegister VGPR1 = RRI->findUnusedRegister( MF->getRegInfo(), &RISCV::VGPRRegClass, *MF); - BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_S_X), VGPR1) + BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_V_X), VGPR1) .addReg(VGPR1) .addReg(RISCV::X0); BuildMI(LoopTailMBB, DL, TII->get(RISCV::VBNE)) @@ -659,7 +659,7 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg( .addReg(ScratchReg); MCRegister VGPR2 = RRI->findUnusedRegister( MF->getRegInfo(), &RISCV::VGPRRegClass, *MF); - BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_S_X), VGPR2) + BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_V_X), VGPR2) .addReg(VGPR2) .addReg(RISCV::X0); BuildMI(LoopTailMBB, DL, TII->get(RISCV::VBNE)) diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index 3c0817fdf6be..1f52b7370ffc 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -417,7 +417,7 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF, BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex) .setMIFlag(MachineInstr::FrameSetup); - BuildMI(MBB, MBBI, DL, TII->get(RISCV::VMV_S_X), + BuildMI(MBB, MBBI, DL, TII->get(RISCV::VMV_V_X), RI->getPrivateMemoryBaseRegister(MF)) .addReg(RI->getPrivateMemoryBaseRegister(MF)) .addReg(TPReg); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index be0210d8d6dd..4855689bcb0b 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -11144,7 +11144,7 @@ static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB, if(isDivergent) { Register Dummy = MRI.createVirtualRegister(&RISCV::VGPRRegClass); Register Dummy1 = MRI.createVirtualRegister(&RISCV::VGPRRegClass); - BuildMI(MBB, DL, TII.get(RISCV::VMV_S_X), Dummy) + BuildMI(MBB, DL, TII.get(RISCV::VMV_V_X), Dummy) .addReg(Dummy, RegState::Undef) .addReg(RISCV::X0); BuildMI(MBB, DL, TII.get(RISCV::VADD_VX), Dummy1) @@ -11882,10 +11882,10 @@ SDValue RISCVTargetLowering::LowerFormalArguments( VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); VaArgOffset = VarArgsSaveSize; } - // Record the frame index of the first variable argument // which is a value necessary to VASTART. int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); + MFI.setStackID(FI, RISCVStackID::VGPRSpill); RVFI->setVarArgsFrameIndex(FI); // If saving an odd number of registers then create an extra stack slot to @@ -11893,6 +11893,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments( // offsets to even-numbered registered remain 2*XLEN-aligned. if (Idx % 2) { MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); + MFI.setStackID(FI, RISCVStackID::VGPRSpill); VarArgsSaveSize += XLenInBytes; } @@ -11906,6 +11907,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments( RegInfo.addLiveIn(ArgRegs[I], Reg); SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); + MFI.setStackID(FI, RISCVStackID::VGPRSpill); // MFI.setStackID(FI, RISCVStackID::VGPRSpill); SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index d9eb6b89e29a..94b99d9d4f87 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -180,7 +180,7 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB, // sGPR -> vGPR move if (RISCV::GPRRegClass.contains(SrcReg) && RISCV::VGPRRegClass.contains(DstReg)) { - BuildMI(MBB, MBBI, DL, get(RISCV::VMV_S_X), DstReg) + BuildMI(MBB, MBBI, DL, get(RISCV::VMV_V_X), DstReg) .addReg(DstReg, RegState::Undef) .addReg(SrcReg, getKillRegState(KillSrc)); return; diff --git a/llvm/lib/Target/RISCV/VentusInstrInfoV.td b/llvm/lib/Target/RISCV/VentusInstrInfoV.td index 20ded66eb596..db7dfa7ac2a5 100644 --- a/llvm/lib/Target/RISCV/VentusInstrInfoV.td +++ b/llvm/lib/Target/RISCV/VentusInstrInfoV.td @@ -1172,16 +1172,13 @@ def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd), (ins VGPR:$vs2), "vmv.x.s", "$vd, $vs2">, Sched<[WriteVIMovVX, ReadVIMovVX]>; let Constraints = "$vd = $vd_rb" in -def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VGPR:$vd), - (ins VGPR:$vd_rb, GPR:$rs1), "vmv.s.x", "$vd, $rs1">, +def VMV_V_X : RVInstV2<0b010111, 0b00000, OPIVX, (outs VGPR:$vd), + (ins VGPR:$vd_rb, GPR:$rs1), "vmv.v.x", "$vd, $rs1">, Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>; } // RVVConstraint = NoConstraint } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 -def : InstAlias<"vmv.v.x $vd, $rs1", - (VMV_S_X VGPR:$vd, GPR:$rs1)>; - let Predicates = [HasVInstructionsAnyF] in { let hasSideEffects = 0, mayLoad = 0, mayStore = 0, diff --git a/llvm/lib/Target/RISCV/VentusInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/VentusInstrInfoVPseudos.td index 3660b3b27747..e15316e448d7 100644 --- a/llvm/lib/Target/RISCV/VentusInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/VentusInstrInfoVPseudos.td @@ -4839,7 +4839,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>, Sched<[WriteVIMovVX, ReadVIMovVX]>, RISCVVPseudo; - let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, + let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_V_X, Constraints = "$rd = $rs1" in def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd), (ins m.vrclass:$rs1, GPR:$rs2, diff --git a/llvm/test/CodeGen/RISCV/VentusGPGPU/resource-usage.ll b/llvm/test/CodeGen/RISCV/VentusGPGPU/resource-usage.ll index 6f7d7177499e..6cc3e1a9a89d 100644 --- a/llvm/test/CodeGen/RISCV/VentusGPGPU/resource-usage.ll +++ b/llvm/test/CodeGen/RISCV/VentusGPGPU/resource-usage.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mcpu=ventus-gpgpu -verify-machineinstrs -asm-verbose < %s \ ; RUN: | FileCheck -check-prefix=VENTUS %s diff --git a/llvm/test/CodeGen/RISCV/VentusGPGPU/select_instructions.ll b/llvm/test/CodeGen/RISCV/VentusGPGPU/select_instructions.ll index 49c1ef084eb3..16e8c004be52 100644 --- a/llvm/test/CodeGen/RISCV/VentusGPGPU/select_instructions.ll +++ b/llvm/test/CodeGen/RISCV/VentusGPGPU/select_instructions.ll @@ -18,7 +18,7 @@ entry: define dso_local i32 @slt_imm(i32 noundef %a) local_unnamed_addr { ; VENTUS-LABEL: slt_imm: ; VENTUS: # %bb.0: # %entry -; VENTUS-NEXT: vmslt.vi v0, v0, 12 +; VENTUS-NEXT: vmsle.vi v0, v0, 11 ; VENTUS-NEXT: ret entry: %cmp = icmp slt i32 %a, 12 @@ -42,7 +42,7 @@ entry: define dso_local i32 @sltu_imm(i32 noundef %a) local_unnamed_addr { ; VENTUS-LABEL: sltu_imm: ; VENTUS: # %bb.0: # %entry -; VENTUS-NEXT: vmsltu.vi v0, v0, 12 +; VENTUS-NEXT: vmsleu.vi v0, v0, 11 ; VENTUS-NEXT: ret entry: %cmp = icmp ult i32 %a, 12 @@ -66,7 +66,7 @@ entry: define dso_local i32 @sle_imm(i32 noundef %a) local_unnamed_addr { ; VENTUS-LABEL: sle_imm: ; VENTUS: # %bb.0: # %entry -; VENTUS-NEXT: vmslt.vi v0, v0, 13 +; VENTUS-NEXT: vmsle.vi v0, v0, 12 ; VENTUS-NEXT: ret entry: %cmp = icmp slt i32 %a, 13 @@ -90,7 +90,7 @@ entry: define dso_local i32 @sleu_imm(i32 noundef %a) local_unnamed_addr { ; VENTUS-LABEL: sleu_imm: ; VENTUS: # %bb.0: # %entry -; VENTUS-NEXT: vmsltu.vi v0, v0, 13 +; VENTUS-NEXT: vmsleu.vi v0, v0, 12 ; VENTUS-NEXT: ret entry: %cmp = icmp ult i32 %a, 13