[VENTUS][fix] Deprecating vmv.s.x and use vmv.v.x instead
As required, vmv.s.x instruction may will later be deprecated
This commit is contained in:
parent
592d77cb76
commit
50b23dc21a
|
@ -86,8 +86,8 @@ bool RISCVExpandAtomicPseudo::expandMBB(MachineBasicBlock &MBB) {
|
|||
bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI,
|
||||
MachineBasicBlock::iterator &NextMBBI) {
|
||||
// RISCVInstrInfo::getInstSizeInBytes expects that the total size of the
|
||||
// expanded instructions for each pseudo is correct in the Size field of the
|
||||
// RISCVInstrInfo::getInstSizeInBytes expects that the total size of the
|
||||
// expanded instructions for each pseudo is correct in the Size field of the
|
||||
// tablegen definition for the pseudo.
|
||||
switch (MBBI->getOpcode()) {
|
||||
case RISCV::PseudoAtomicLoadNand32:
|
||||
|
@ -623,7 +623,7 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
|
|||
.addReg(NewValReg);
|
||||
MCRegister VGPR1 = RRI->findUnusedRegister(
|
||||
MF->getRegInfo(), &RISCV::VGPRRegClass, *MF);
|
||||
BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_S_X), VGPR1)
|
||||
BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_V_X), VGPR1)
|
||||
.addReg(VGPR1)
|
||||
.addReg(RISCV::X0);
|
||||
BuildMI(LoopTailMBB, DL, TII->get(RISCV::VBNE))
|
||||
|
@ -659,7 +659,7 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
|
|||
.addReg(ScratchReg);
|
||||
MCRegister VGPR2 = RRI->findUnusedRegister(
|
||||
MF->getRegInfo(), &RISCV::VGPRRegClass, *MF);
|
||||
BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_S_X), VGPR2)
|
||||
BuildMI(LoopTailMBB, DL, TII->get(RISCV::VMV_V_X), VGPR2)
|
||||
.addReg(VGPR2)
|
||||
.addReg(RISCV::X0);
|
||||
BuildMI(LoopTailMBB, DL, TII->get(RISCV::VBNE))
|
||||
|
|
|
@ -417,7 +417,7 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
|
|||
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
|
||||
.addCFIIndex(CFIIndex)
|
||||
.setMIFlag(MachineInstr::FrameSetup);
|
||||
BuildMI(MBB, MBBI, DL, TII->get(RISCV::VMV_S_X),
|
||||
BuildMI(MBB, MBBI, DL, TII->get(RISCV::VMV_V_X),
|
||||
RI->getPrivateMemoryBaseRegister(MF))
|
||||
.addReg(RI->getPrivateMemoryBaseRegister(MF))
|
||||
.addReg(TPReg);
|
||||
|
|
|
@ -11144,7 +11144,7 @@ static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
|
|||
if(isDivergent) {
|
||||
Register Dummy = MRI.createVirtualRegister(&RISCV::VGPRRegClass);
|
||||
Register Dummy1 = MRI.createVirtualRegister(&RISCV::VGPRRegClass);
|
||||
BuildMI(MBB, DL, TII.get(RISCV::VMV_S_X), Dummy)
|
||||
BuildMI(MBB, DL, TII.get(RISCV::VMV_V_X), Dummy)
|
||||
.addReg(Dummy, RegState::Undef)
|
||||
.addReg(RISCV::X0);
|
||||
BuildMI(MBB, DL, TII.get(RISCV::VADD_VX), Dummy1)
|
||||
|
@ -11882,10 +11882,10 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
|
|||
VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
|
||||
VaArgOffset = VarArgsSaveSize;
|
||||
}
|
||||
|
||||
// Record the frame index of the first variable argument
|
||||
// which is a value necessary to VASTART.
|
||||
int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
|
||||
MFI.setStackID(FI, RISCVStackID::VGPRSpill);
|
||||
RVFI->setVarArgsFrameIndex(FI);
|
||||
|
||||
// If saving an odd number of registers then create an extra stack slot to
|
||||
|
@ -11893,6 +11893,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
|
|||
// offsets to even-numbered registered remain 2*XLEN-aligned.
|
||||
if (Idx % 2) {
|
||||
MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
|
||||
MFI.setStackID(FI, RISCVStackID::VGPRSpill);
|
||||
VarArgsSaveSize += XLenInBytes;
|
||||
}
|
||||
|
||||
|
@ -11906,6 +11907,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
|
|||
RegInfo.addLiveIn(ArgRegs[I], Reg);
|
||||
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
|
||||
FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
|
||||
MFI.setStackID(FI, RISCVStackID::VGPRSpill);
|
||||
// MFI.setStackID(FI, RISCVStackID::VGPRSpill);
|
||||
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
|
||||
SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
|
||||
|
|
|
@ -180,7 +180,7 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
// sGPR -> vGPR move
|
||||
if (RISCV::GPRRegClass.contains(SrcReg) &&
|
||||
RISCV::VGPRRegClass.contains(DstReg)) {
|
||||
BuildMI(MBB, MBBI, DL, get(RISCV::VMV_S_X), DstReg)
|
||||
BuildMI(MBB, MBBI, DL, get(RISCV::VMV_V_X), DstReg)
|
||||
.addReg(DstReg, RegState::Undef)
|
||||
.addReg(SrcReg, getKillRegState(KillSrc));
|
||||
return;
|
||||
|
|
|
@ -1172,16 +1172,13 @@ def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
|
|||
(ins VGPR:$vs2), "vmv.x.s", "$vd, $vs2">,
|
||||
Sched<[WriteVIMovVX, ReadVIMovVX]>;
|
||||
let Constraints = "$vd = $vd_rb" in
|
||||
def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VGPR:$vd),
|
||||
(ins VGPR:$vd_rb, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
|
||||
def VMV_V_X : RVInstV2<0b010111, 0b00000, OPIVX, (outs VGPR:$vd),
|
||||
(ins VGPR:$vd_rb, GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
|
||||
Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>;
|
||||
} // RVVConstraint = NoConstraint
|
||||
|
||||
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
|
||||
|
||||
def : InstAlias<"vmv.v.x $vd, $rs1",
|
||||
(VMV_S_X VGPR:$vd, GPR:$rs1)>;
|
||||
|
||||
let Predicates = [HasVInstructionsAnyF] in {
|
||||
|
||||
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
|
||||
|
|
|
@ -4839,7 +4839,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
|
|||
Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
|
||||
Sched<[WriteVIMovVX, ReadVIMovVX]>,
|
||||
RISCVVPseudo;
|
||||
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
|
||||
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_V_X,
|
||||
Constraints = "$rd = $rs1" in
|
||||
def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
|
||||
(ins m.vrclass:$rs1, GPR:$rs2,
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mcpu=ventus-gpgpu -verify-machineinstrs -asm-verbose < %s \
|
||||
; RUN: | FileCheck -check-prefix=VENTUS %s
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ entry:
|
|||
define dso_local i32 @slt_imm(i32 noundef %a) local_unnamed_addr {
|
||||
; VENTUS-LABEL: slt_imm:
|
||||
; VENTUS: # %bb.0: # %entry
|
||||
; VENTUS-NEXT: vmslt.vi v0, v0, 12
|
||||
; VENTUS-NEXT: vmsle.vi v0, v0, 11
|
||||
; VENTUS-NEXT: ret
|
||||
entry:
|
||||
%cmp = icmp slt i32 %a, 12
|
||||
|
@ -42,7 +42,7 @@ entry:
|
|||
define dso_local i32 @sltu_imm(i32 noundef %a) local_unnamed_addr {
|
||||
; VENTUS-LABEL: sltu_imm:
|
||||
; VENTUS: # %bb.0: # %entry
|
||||
; VENTUS-NEXT: vmsltu.vi v0, v0, 12
|
||||
; VENTUS-NEXT: vmsleu.vi v0, v0, 11
|
||||
; VENTUS-NEXT: ret
|
||||
entry:
|
||||
%cmp = icmp ult i32 %a, 12
|
||||
|
@ -66,7 +66,7 @@ entry:
|
|||
define dso_local i32 @sle_imm(i32 noundef %a) local_unnamed_addr {
|
||||
; VENTUS-LABEL: sle_imm:
|
||||
; VENTUS: # %bb.0: # %entry
|
||||
; VENTUS-NEXT: vmslt.vi v0, v0, 13
|
||||
; VENTUS-NEXT: vmsle.vi v0, v0, 12
|
||||
; VENTUS-NEXT: ret
|
||||
entry:
|
||||
%cmp = icmp slt i32 %a, 13
|
||||
|
@ -90,7 +90,7 @@ entry:
|
|||
define dso_local i32 @sleu_imm(i32 noundef %a) local_unnamed_addr {
|
||||
; VENTUS-LABEL: sleu_imm:
|
||||
; VENTUS: # %bb.0: # %entry
|
||||
; VENTUS-NEXT: vmsltu.vi v0, v0, 13
|
||||
; VENTUS-NEXT: vmsleu.vi v0, v0, 12
|
||||
; VENTUS-NEXT: ret
|
||||
entry:
|
||||
%cmp = icmp ult i32 %a, 13
|
||||
|
|
Loading…
Reference in New Issue