[Ventus] ABI and stack adjustment.

Remove all SGPRs(except ra) from callee saved register set, as they are mainly used in kernel function.
Unify the stack to use TP only, we will emit customized instructions for SP use which should not be
considered as stack according to LLVM codegen infrastructure(only 1 stack is allowed).
By unifying the stack to TP based, it is much easiler for the backend codegen.
This commit is contained in:
Aries 2023-06-21 13:08:02 +08:00
parent a6e8ff959a
commit e6b7935c89
6 changed files with 73 additions and 285 deletions

View File

@ -316,10 +316,8 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = MBB.begin();
bool IsEntryFunction = RMFI->isEntryFunction();
Register FPReg = getFPReg(STI);
Register SPReg = getSPReg(STI);
Register TPReg = getTPReg(STI);
Register BPReg = RISCVABI::getBPReg();
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
@ -342,7 +340,7 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
determineFrameLayout(MF);
// Determine stack ID for each frame index
deterMineStackID(MF);
determineStackID(MF);
// If libcalls are used to spill and restore callee-saved registers, the frame
// has two sections; the opaque section managed by the libcalls, and the
@ -373,9 +371,9 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// FIXME: TP stack size calculation is also not
uint64_t SPStackSize = getStackSize(MF, RISCVStackID::SGPRSpill);
uint64_t TPStackSize = getStackSize(MF, RISCVStackID::VGPRSpill);
uint64_t RealStackSize = IsEntryFunction ?
SPStackSize + RMFI->getLibCallStackSize() :
TPStackSize + RMFI->getLibCallStackSize();
//uint64_t RealStackSize = IsEntryFunction ?
// SPStackSize + RMFI->getLibCallStackSize() :
// TPStackSize + RMFI->getLibCallStackSize();
// Early exit if there is no need to allocate on the stack
if (MFI.getStackSize() == 0 && !MFI.adjustsStack())
@ -391,29 +389,32 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
MF.getFunction(), "Thread pointer required, but has been reserved."});
// Allocate space on the local-mem stack and private-mem stack if necessary.
if(SPStackSize) {
RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
StackOffset::getFixed(SPStackSize),
MachineInstr::FrameSetup, getStackAlign());
uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
// Split the SP adjustment to reduce the offsets of callee saved spill.
if (FirstSPAdjustAmount) {
SPStackSize = FirstSPAdjustAmount;
RealStackSize = FirstSPAdjustAmount;
// Emit ".cfi_def_cfa_offset SPStackSize"
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::cfiDefCfaOffset(nullptr, SPStackSize));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
}
// Allocate space on the local-mem stack and private-mem stack if necessary.
if(SPStackSize)
RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(SPStackSize),
MachineInstr::FrameSetup, getStackAlign());
if(TPStackSize)
if(TPStackSize) {
RI->adjustReg(MBB, MBBI, DL, TPReg, TPReg,
StackOffset::getFixed(TPStackSize),
MachineInstr::FrameSetup, getStackAlign());
// Emit ".cfi_def_cfa_offset RealStackSize"
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::cfiDefCfaOffset(nullptr, RealStackSize));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
// Emit ".cfi_def_cfa_offset TPStackSize"
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::cfiDefCfaOffset(nullptr, TPStackSize));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
}
const auto &CSI = MFI.getCalleeSavedInfo();
@ -444,96 +445,15 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
}
// Generate new FP.
if (hasFP(MF)) {
if (STI.isRegisterReservedByUser(FPReg))
MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
MF.getFunction(), "Frame pointer required, but has been reserved."});
// The frame pointer does need to be reserved from register allocation.
// assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
RI->adjustReg(MBB, MBBI, DL, FPReg, IsEntryFunction ? SPReg : TPReg,
-StackOffset::getFixed(RealStackSize - RMFI->getVarArgsSaveSize()),
MachineInstr::FrameSetup, getStackAlign());
// Emit ".cfi_def_cfa $fp, RVFI->getVarArgsSaveSize()"
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
nullptr, RI->getDwarfRegNum(FPReg, true), RMFI->getVarArgsSaveSize()));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
}
// Emit the second SP adjustment after saving callee saved registers.
if (FirstSPAdjustAmount) {
uint64_t SecondSPAdjustAmount = MFI.getStackSize() - FirstSPAdjustAmount;
assert(SecondSPAdjustAmount > 0 &&
"SecondSPAdjustAmount should be greater than zero");
RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
StackOffset::getFixed(SecondSPAdjustAmount),
MachineInstr::FrameSetup, getStackAlign());
// If we are using a frame-pointer, and thus emitted ".cfi_def_cfa fp, 0",
// don't emit an sp-based .cfi_def_cfa_offset
if (!hasFP(MF)) {
// Emit ".cfi_def_cfa_offset StackSize"
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(
nullptr, MFI.getStackSize()));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
}
}
if (hasFP(MF)) {
// Realign Stack
const RISCVRegisterInfo *RI = STI.getRegisterInfo();
if (RI->hasStackRealignment(MF)) {
Align MaxAlignment = MFI.getMaxAlign();
const RISCVInstrInfo *TII = STI.getInstrInfo();
if (isInt<12>(-(int)MaxAlignment.value())) {
BuildMI(MBB, MBBI, DL, TII->get(RISCV::ANDI), SPReg)
.addReg(SPReg)
.addImm(-(int)MaxAlignment.value())
.setMIFlag(MachineInstr::FrameSetup);
} else {
unsigned ShiftAmount = Log2(MaxAlignment);
Register VR =
MF.getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
BuildMI(MBB, MBBI, DL, TII->get(RISCV::SRLI), VR)
.addReg(SPReg)
.addImm(ShiftAmount)
.setMIFlag(MachineInstr::FrameSetup);
BuildMI(MBB, MBBI, DL, TII->get(RISCV::SLLI), SPReg)
.addReg(VR)
.addImm(ShiftAmount)
.setMIFlag(MachineInstr::FrameSetup);
}
// FP will be used to restore the frame in the epilogue, so we need
// another base register BP to record SP after re-alignment. SP will
// track the current stack after allocating variable sized objects.
if (hasBP(MF)) {
// move BP, SP
BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), BPReg)
.addReg(SPReg)
.addImm(0)
.setMIFlag(MachineInstr::FrameSetup);
}
}
}
}
void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const RISCVRegisterInfo *RI = STI.getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
auto *RMFI = MF.getInfo<RISCVMachineFunctionInfo>();
Register FPReg = getFPReg(STI);
Register SPReg = getSPReg(STI);
Register TPReg = getTPReg(STI);
bool IsEntryFunction = RMFI->isEntryFunction();
// Get the insert location for the epilogue. If there were no terminators in
// the block, get the last instruction.
MachineBasicBlock::iterator MBBI = MBB.end();
@ -561,49 +481,11 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
if (!CSI.empty())
LastFrameDestroy = std::prev(MBBI, CSI.size());
// FIXME: Need to get 2 stack size for TP and SP!
// Get 2 stack size for TP and SP
uint64_t SPStackSize = getStackSize(MF, RISCVStackID::SGPRSpill);
uint64_t TPStackSize = getStackSize(MF, RISCVStackID::VGPRSpill);
uint64_t RealStackSize = IsEntryFunction ? SPStackSize :
TPStackSize + RMFI->getLibCallStackSize();
uint64_t FPOffset = RealStackSize - RMFI->getVarArgsSaveSize();
// Restore the stack pointer using the value of the frame pointer. Only
// necessary if the stack pointer was modified, meaning the stack size is
// unknown.
//
// In order to make sure the stack point is right through the EH region,
// we also need to restore stack pointer from the frame pointer if we
// don't preserve stack space within prologue/epilogue for outgoing variables,
// normally it's just checking the variable sized object is present or not
// is enough, but we also don't preserve that at prologue/epilogue when
// have vector objects in stack.
if (RI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
!hasReservedCallFrame(MF)) {
assert(hasFP(MF) && "frame pointer should not have been eliminated");
RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, FPReg,
StackOffset::getFixed(-FPOffset),
MachineInstr::FrameDestroy, getStackAlign());
}
uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
if (FirstSPAdjustAmount) {
uint64_t SecondSPAdjustAmount = MFI.getStackSize() - FirstSPAdjustAmount;
assert(SecondSPAdjustAmount > 0 &&
"SecondSPAdjustAmount should be greater than zero");
RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg,
StackOffset::getFixed(-SecondSPAdjustAmount),
MachineInstr::FrameDestroy, getStackAlign());
}
if (FirstSPAdjustAmount)
SPStackSize = FirstSPAdjustAmount;
// Deallocate stack
// FIXME: Allocate space for two stacks, this is depend on the actual use of
// these two stacks, not based on calling convention
if(SPStackSize)
RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
StackOffset::getFixed(-SPStackSize),
@ -621,8 +503,6 @@ StackOffset
RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
Register &FrameReg) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
const auto *RMFI = MF.getInfo<RISCVMachineFunctionInfo>();
// Callee-saved registers should be referenced relative to the stack
// pointer (positive offset), otherwise use the frame pointer (negative
@ -640,107 +520,24 @@ RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
StackOffset::getFixed(MFI.getObjectOffset(FI) - getOffsetOfLocalArea() +
MFI.getOffsetAdjustment());
uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
if (CSI.size()) {
MinCSFI = CSI[0].getFrameIdx();
MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
}
// This goes to the offset calculation of callee saved register, ra/s0
if (FI >= MinCSFI && FI <= MaxCSFI) {
// sp represents SGPR spill, tp represents VGPR spill
// FIXME: we need to define TargetStackID::VGPRSpill?
FrameReg = StackID == TargetStackID::SGPRSpill ? RISCV::X2 : RISCV::X4;
// if (FirstSPAdjustAmount)
// Offset -= StackOffset::getFixed(FirstSPAdjustAmount);
// else
Offset -= StackOffset::getFixed(getStackSize(const_cast<MachineFunction&>(MF)
, RISCVStackID::SGPRSpill));
// Different stacks for sALU and vALU threads.
FrameReg = StackID == RISCVStackID::SGPRSpill ? RISCV::X2 : RISCV::X4;
// TODO: This only saves sGPR CSRs, as we haven't define vGPR CSRs
// within getNonLibcallCSI.
//if (FI >= MinCSFI && FI <= MaxCSFI) {
Offset -= StackOffset::getFixed(
getStackSize(const_cast<MachineFunction&>(MF),
(RISCVStackID::Value)StackID));
return Offset;
}
//}
// assert(StackID == TargetStackID::Default &&
// "SGPRSpill stack should not reach here!");
if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(FI)) {
// TODO: add stack alignment
// assert(0 && "TODO: Add stack realignment support for Ventus?");
// If the per-thread stack was realigned, the frame pointer is set in order
// to allow TP to be restored, so we need another base register to record
// the stack after realignment.
// |--------------------------| -- <-- FP
// | callee-allocated save | | <----|
// | area for register varargs| | |
// |--------------------------| | |
// | callee-saved registers | | |
// |--------------------------| -- |
// | realignment (the size of | | |
// | this area is not counted | | |
// | in MFI.getStackSize()) | | |
// |--------------------------| -- |-- MFI.getStackSize()
// | scalar local variables | | <----'
// |--------------------------| -- <-- BP (if var sized objects present)
// | VarSize objects | |
// |--------------------------| -- <-- TP
if (hasBP(MF)) {
FrameReg = RISCVABI::getBPReg();
} else {
// VarSize objects must be empty in this case!
assert(!MFI.hasVarSizedObjects());
FrameReg = RISCV::X4;
}
} else {
FrameReg = RI->getFrameRegister(MF);
}
if (FrameReg == getFPReg(STI)) {
// assert(0 && "TODO: Add fp support for Ventus?");
Offset -= StackOffset::getFixed(RMFI->getVarArgsSaveSize());
if (FI >= 0)
Offset -= StackOffset::getFixed(RMFI->getLibCallStackSize());
// When using FP to access scalable vector objects, we need to minus
// the frame size.
//
// |--------------------------| -- <-- FP
// | callee-allocated save | |
// | area for register varargs| |
// |--------------------------| |
// | callee-saved registers | |
// |--------------------------| | MFI.getStackSize()
// | scalar local variables | |
// |--------------------------| --
// | VarSize objects |
// |--------------------------| <-- SP
return Offset;
}
// This case handles indexing off both SP and BP.
// If indexing off SP, there must not be any var sized objects
assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
// |--------------------------| -- <-- FP
// | callee-allocated save | | <----|
// | area for register varargs| | |
// |--------------------------| | |
// | callee-saved registers | | |
// |--------------------------| -- |
// | scalar local variables | | <----'
// |--------------------------| -- <-- BP (if var sized objects present)
// | VarSize objects | |
// |--------------------------| -- <-- SP
//
if (MFI.isFixedObjectIndex(FI)) {
// assert(0 && "TODO!");
assert(!RI->hasStackRealignment(MF) &&
"Can't index across variable sized realign");
Offset -= StackOffset::get(MFI.getStackSize() +
RMFI->getLibCallStackSize(),0);
} else {
Offset -= StackOffset::getFixed(MFI.getStackSize());
}
return Offset;
//return Offset;
}
void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF,
@ -877,7 +674,7 @@ RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
}
uint64_t RISCVFrameLowering::getStackSize(MachineFunction &MF,
RISCVStackID::Value ID) const {
RISCVStackID::Value ID) const {
MachineFrameInfo &MFI = MF.getFrameInfo();
uint64_t StackSize = 0;
@ -887,23 +684,25 @@ uint64_t RISCVFrameLowering::getStackSize(MachineFunction &MF,
uint64_t Align = MFI.getObjectAlign(I).value();
uint64_t ActualAlignSize = (Align + 3) >> 2;
uint64_t Size = ActualAlignSize * MFI.getObjectSize(I);
StackSize += Size;
StackSize += Size;
}
}
return StackSize;
}
void RISCVFrameLowering::deterMineStackID(MachineFunction &MF) const {
void RISCVFrameLowering::determineStackID(MachineFunction &MF) const {
llvm::MachineFrameInfo &MFI = MF.getFrameInfo();
for(int I = MFI.getObjectIndexBegin(); I != MFI.getObjectIndexEnd(); I++) {
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF,I);
if((MFI.getStackID(I) != RISCVStackID::SGPRSpill) &&
PtrInfo.getAddrSpace() == RISCVAS::PRIVATE_ADDRESS)
MFI.setStackID(I, RISCVStackID::VGPRSpill);
else
// FIXME: other stack?
MFI.setStackID(I, RISCVStackID::SGPRSpill);
// FIXME: There is no sGPR spill stack!
MFI.setStackID(I, RISCVStackID::VGPRSpill);
// MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF,I);
// if(MFI.getStackID(I) != RISCVStackID::SGPRSpill &&
// PtrInfo.getAddrSpace() == RISCVAS::PRIVATE_ADDRESS)
// MFI.setStackID(I, RISCVStackID::VGPRSpill);
// else
// MFI.setStackID(I, RISCVStackID::SGPRSpill);
}
}
@ -938,12 +737,14 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
if(Reg.id() < RISCV::V0 || Reg.id() > RISCV::V255 ) {
// TODO: Have we allocated stack for vGPR spilling?
if(Reg.id() < RISCV::V0 || Reg.id() > RISCV::V255) {
MF->getFrameInfo().setStackID(CS.getFrameIdx(), RISCVStackID::SGPRSpill);
TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg), CS.getFrameIdx(),
RC, TRI);
} else {
MF->getFrameInfo().setStackID(CS.getFrameIdx(), RISCVStackID::VGPRSpill);
}
TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg), CS.getFrameIdx(),
RC, TRI);
}
return true;

View File

@ -68,7 +68,7 @@ public:
uint64_t getStackSize(MachineFunction &MF, RISCVStackID::Value ID) const;
/// Before insert prolog/epilog information, set stack ID for each frame index
void deterMineStackID(MachineFunction &MF) const;
void determineStackID(MachineFunction &MF) const;
bool enableShrinkWrapping(const MachineFunction &MF) const override;

View File

@ -261,7 +261,7 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
Register FrameReg;
StackOffset Offset =
StackOffset Offset = // FIXME: The FrameReg and Offset should be depended on divergency route.
getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
@ -305,13 +305,8 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = getFrameLowering(MF);
const RISCVMachineFunctionInfo *FuncInfo =
MF.getInfo<RISCVMachineFunctionInfo>();
if(FuncInfo->isEntryFunction())
return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
// Non-kernel function, we also use X8 for frame pointer
return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X4;
return MF.getInfo<RISCVMachineFunctionInfo>()->isEntryFunction()
? RISCV::X2 : RISCV::X4;
}
const uint32_t *

View File

@ -355,20 +355,18 @@ unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) {
return BaseT::getRegUsageForType(Ty);
}
// FIXME: Copy from AMDGPU
bool RISCVTTIImpl::isSourceOfDivergence(const Value *V) const {
// if (const Argument *A = dyn_cast<Argument>(V))
// return !AMDGPU::isArgPassedInSGPR(A);
// Loads from the private and flat address spaces are divergent, because
// Loads from the private memory are divergent, because
// threads can execute the load instruction with the same inputs and get
// different results.
//
// All other loads are not divergent, because if threads issue loads with the
// same arguments, they will always get the same result.
if (const LoadInst *Load = dyn_cast<LoadInst>(V))
return Load->getPointerAddressSpace() == RISCVAS::PRIVATE_ADDRESS ||
Load->getPointerAddressSpace() == RISCVAS::LOCAL_ADDRESS;
return Load->getPointerAddressSpace() == RISCVAS::PRIVATE_ADDRESS;
// Atomics are divergent because they are executed sequentially: when an
// atomic operation refers to the same address in each thread, then each
@ -380,25 +378,20 @@ bool RISCVTTIImpl::isSourceOfDivergence(const Value *V) const {
// if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
// if (Intrinsic->getIntrinsicID() == Intrinsic::read_register)
// return isReadRegisterSourceOfDivergence(Intrinsic);
// return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
// }
// Assume all function calls are a source of divergence.
// if (const CallInst *CI = dyn_cast<CallInst>(V)) {
// if (CI->isInlineAsm() && isa<IntrinsicInst>(CI))
// return RISCVII::isIntrinsicSourceOfDivergence(
// cast<IntrinsicInst>(CI)->getIntrinsicID());
// return true;
// }
if (const CallInst *CI = dyn_cast<CallInst>(V)) {
if (CI->isInlineAsm() && isa<IntrinsicInst>(CI))
return RISCVII::isIntrinsicSourceOfDivergence(
cast<IntrinsicInst>(CI)->getIntrinsicID());
return true;
}
// Assume all function calls are a source of divergence.
if (isa<InvokeInst>(V))
return true;
// Assume all function calls are a source of divergence.
if (isa<CallInst>(V))
return true;
return false;
}

View File

@ -13,9 +13,12 @@
// The Ventus GPGPU calling convention is handled with custom code in
// RISCVISelLowering.cpp (CC_Ventus).
// sGPRs (except ra) can only be set by kernel function, so there is no callee
// saved sGPRs is needed.
def CSR_ILP32_LP64
: CalleeSavedRegs<(add X1, X3, X8, X9, (sequence "X%u", 18, 27),
(sequence "V%u", 32, 47))>;
: CalleeSavedRegs<(add X1, (sequence "V%u", 32, 255))>;
// Following are not used by Ventus GPGPU.
def CSR_ILP32F_LP64F
: CalleeSavedRegs<(add CSR_ILP32_LP64,

View File

@ -247,13 +247,9 @@ def XLenRI : RegInfoByHwMode<
[RegInfo<32,32,32>, RegInfo<64,64,64>]>;
// The order of registers represents the preferred allocation sequence.
// Registers are listed in the order caller-save, callee-save, specials.
// All sGPRs are callee saved registers.
def GPR : RVRegisterClass<"RISCV", [XLenVT], 32, (add
(sequence "X%u", 10, 17),
(sequence "X%u", 5, 7),
(sequence "X%u", 28, 31),
(sequence "X%u", 8, 9),
(sequence "X%u", 18, 27),
(sequence "X%u", 5, 63),
(sequence "X%u", 0, 4)
)> {
let RegInfos = XLenRI;