ARM: Remove implicit iterator conversions, NFC
Remove remaining implicit conversions from MachineInstrBundleIterator to MachineInstr* from the ARM backend. In most cases, I made them less attractive by preferring MachineInstr& or using a ranged-based for loop. Once all the backends are fixed I'll make the operator explicit so that this doesn't bitrot back. llvm-svn: 274920
This commit is contained in:
parent
acf564ca15
commit
29c524983b
|
@ -693,7 +693,7 @@ bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) {
|
||||||
|
|
||||||
for (MachineBasicBlock::iterator MI = MFI->begin(), ME = MFI->end();
|
for (MachineBasicBlock::iterator MI = MFI->begin(), ME = MFI->end();
|
||||||
MI != ME;) {
|
MI != ME;) {
|
||||||
Modified |= runOnInstruction(MI++);
|
Modified |= runOnInstruction(&*MI++);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -360,9 +360,9 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
|
||||||
if (AllowModify) {
|
if (AllowModify) {
|
||||||
MachineBasicBlock::iterator DI = std::next(I);
|
MachineBasicBlock::iterator DI = std::next(I);
|
||||||
while (DI != MBB.end()) {
|
while (DI != MBB.end()) {
|
||||||
MachineInstr *InstToDelete = DI;
|
MachineInstr &InstToDelete = *DI;
|
||||||
++DI;
|
++DI;
|
||||||
InstToDelete->eraseFromParent();
|
InstToDelete.eraseFromParent();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1227,12 +1227,11 @@ unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
|
||||||
|
|
||||||
/// \brief Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
|
/// \brief Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
|
||||||
/// depending on whether the result is used.
|
/// depending on whether the result is used.
|
||||||
void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MBBI) const {
|
void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
|
||||||
bool isThumb1 = Subtarget.isThumb1Only();
|
bool isThumb1 = Subtarget.isThumb1Only();
|
||||||
bool isThumb2 = Subtarget.isThumb2();
|
bool isThumb2 = Subtarget.isThumb2();
|
||||||
const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
|
const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
|
||||||
|
|
||||||
MachineInstr *MI = MBBI;
|
|
||||||
DebugLoc dl = MI->getDebugLoc();
|
DebugLoc dl = MI->getDebugLoc();
|
||||||
MachineBasicBlock *BB = MI->getParent();
|
MachineBasicBlock *BB = MI->getParent();
|
||||||
|
|
||||||
|
@ -1275,7 +1274,7 @@ void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MBBI) const {
|
||||||
STM.addReg(Reg, RegState::Kill);
|
STM.addReg(Reg, RegState::Kill);
|
||||||
}
|
}
|
||||||
|
|
||||||
BB->erase(MBBI);
|
BB->erase(MI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -722,14 +722,10 @@ unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
|
||||||
/// information about the sizes of each block and the locations of all
|
/// information about the sizes of each block and the locations of all
|
||||||
/// the jump tables.
|
/// the jump tables.
|
||||||
void ARMConstantIslands::scanFunctionJumpTables() {
|
void ARMConstantIslands::scanFunctionJumpTables() {
|
||||||
for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
|
for (MachineBasicBlock &MBB : *MF) {
|
||||||
MBBI != E; ++MBBI) {
|
for (MachineInstr &I : MBB)
|
||||||
MachineBasicBlock &MBB = *MBBI;
|
if (I.isBranch() && I.getOpcode() == ARM::t2BR_JT)
|
||||||
|
T2JumpTables.push_back(&I);
|
||||||
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
|
|
||||||
I != E; ++I)
|
|
||||||
if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT)
|
|
||||||
T2JumpTables.push_back(I);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -756,22 +752,18 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
|
||||||
adjustBBOffsetsAfter(&MF->front());
|
adjustBBOffsetsAfter(&MF->front());
|
||||||
|
|
||||||
// Now go back through the instructions and build up our data structures.
|
// Now go back through the instructions and build up our data structures.
|
||||||
for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
|
for (MachineBasicBlock &MBB : *MF) {
|
||||||
MBBI != E; ++MBBI) {
|
|
||||||
MachineBasicBlock &MBB = *MBBI;
|
|
||||||
|
|
||||||
// If this block doesn't fall through into the next MBB, then this is
|
// If this block doesn't fall through into the next MBB, then this is
|
||||||
// 'water' that a constant pool island could be placed.
|
// 'water' that a constant pool island could be placed.
|
||||||
if (!BBHasFallthrough(&MBB))
|
if (!BBHasFallthrough(&MBB))
|
||||||
WaterList.push_back(&MBB);
|
WaterList.push_back(&MBB);
|
||||||
|
|
||||||
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
|
for (MachineInstr &I : MBB) {
|
||||||
I != E; ++I) {
|
if (I.isDebugValue())
|
||||||
if (I->isDebugValue())
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
unsigned Opc = I->getOpcode();
|
unsigned Opc = I.getOpcode();
|
||||||
if (I->isBranch()) {
|
if (I.isBranch()) {
|
||||||
bool isCond = false;
|
bool isCond = false;
|
||||||
unsigned Bits = 0;
|
unsigned Bits = 0;
|
||||||
unsigned Scale = 1;
|
unsigned Scale = 1;
|
||||||
|
@ -780,7 +772,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
|
||||||
default:
|
default:
|
||||||
continue; // Ignore other JT branches
|
continue; // Ignore other JT branches
|
||||||
case ARM::t2BR_JT:
|
case ARM::t2BR_JT:
|
||||||
T2JumpTables.push_back(I);
|
T2JumpTables.push_back(&I);
|
||||||
continue; // Does not get an entry in ImmBranches
|
continue; // Does not get an entry in ImmBranches
|
||||||
case ARM::Bcc:
|
case ARM::Bcc:
|
||||||
isCond = true;
|
isCond = true;
|
||||||
|
@ -814,11 +806,11 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
|
||||||
|
|
||||||
// Record this immediate branch.
|
// Record this immediate branch.
|
||||||
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
|
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
|
||||||
ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
|
ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
|
if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
|
||||||
PushPopMIs.push_back(I);
|
PushPopMIs.push_back(&I);
|
||||||
|
|
||||||
if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
|
if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
|
||||||
Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
|
Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
|
||||||
|
@ -826,8 +818,8 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// Scan the instructions for constant pool operands.
|
// Scan the instructions for constant pool operands.
|
||||||
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
|
for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
|
||||||
if (I->getOperand(op).isCPI() || I->getOperand(op).isJTI()) {
|
if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
|
||||||
// We found one. The addressing mode tells us the max displacement
|
// We found one. The addressing mode tells us the max displacement
|
||||||
// from the PC that this instruction permits.
|
// from the PC that this instruction permits.
|
||||||
|
|
||||||
|
@ -886,15 +878,15 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remember that this is a user of a CP entry.
|
// Remember that this is a user of a CP entry.
|
||||||
unsigned CPI = I->getOperand(op).getIndex();
|
unsigned CPI = I.getOperand(op).getIndex();
|
||||||
if (I->getOperand(op).isJTI()) {
|
if (I.getOperand(op).isJTI()) {
|
||||||
JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
|
JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
|
||||||
CPI = JumpTableEntryIndices[CPI];
|
CPI = JumpTableEntryIndices[CPI];
|
||||||
}
|
}
|
||||||
|
|
||||||
MachineInstr *CPEMI = CPEMIs[CPI];
|
MachineInstr *CPEMI = CPEMIs[CPI];
|
||||||
unsigned MaxOffs = ((1 << Bits)-1) * Scale;
|
unsigned MaxOffs = ((1 << Bits)-1) * Scale;
|
||||||
CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
|
CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
|
||||||
|
|
||||||
// Increment corresponding CPEntry reference count.
|
// Increment corresponding CPEntry reference count.
|
||||||
CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
|
CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
|
||||||
|
@ -917,15 +909,14 @@ void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
|
||||||
BBI.Unalign = 0;
|
BBI.Unalign = 0;
|
||||||
BBI.PostAlign = 0;
|
BBI.PostAlign = 0;
|
||||||
|
|
||||||
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
|
for (MachineInstr &I : *MBB) {
|
||||||
++I) {
|
BBI.Size += TII->GetInstSizeInBytes(I);
|
||||||
BBI.Size += TII->GetInstSizeInBytes(*I);
|
|
||||||
// For inline asm, GetInstSizeInBytes returns a conservative estimate.
|
// For inline asm, GetInstSizeInBytes returns a conservative estimate.
|
||||||
// The actual size may be smaller, but still a multiple of the instr size.
|
// The actual size may be smaller, but still a multiple of the instr size.
|
||||||
if (I->isInlineAsm())
|
if (I.isInlineAsm())
|
||||||
BBI.Unalign = isThumb ? 1 : 2;
|
BBI.Unalign = isThumb ? 1 : 2;
|
||||||
// Also consider instructions that may be shrunk later.
|
// Also consider instructions that may be shrunk later.
|
||||||
else if (isThumb && mayOptimizeThumb2Instruction(I))
|
else if (isThumb && mayOptimizeThumb2Instruction(&I))
|
||||||
BBI.Unalign = 1;
|
BBI.Unalign = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1472,7 +1463,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||||
Offset < BaseInsertOffset;
|
Offset < BaseInsertOffset;
|
||||||
Offset += TII->GetInstSizeInBytes(*MI), MI = std::next(MI)) {
|
Offset += TII->GetInstSizeInBytes(*MI), MI = std::next(MI)) {
|
||||||
assert(MI != UserMBB->end() && "Fell off end of block");
|
assert(MI != UserMBB->end() && "Fell off end of block");
|
||||||
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
|
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
|
||||||
CPUser &U = CPUsers[CPUIndex];
|
CPUser &U = CPUsers[CPUIndex];
|
||||||
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
|
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
|
||||||
// Shift intertion point by one unit of alignment so it is within reach.
|
// Shift intertion point by one unit of alignment so it is within reach.
|
||||||
|
@ -1489,7 +1480,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||||
|
|
||||||
// Remember the last IT instruction.
|
// Remember the last IT instruction.
|
||||||
if (MI->getOpcode() == ARM::t2IT)
|
if (MI->getOpcode() == ARM::t2IT)
|
||||||
LastIT = MI;
|
LastIT = &*MI;
|
||||||
}
|
}
|
||||||
|
|
||||||
--MI;
|
--MI;
|
||||||
|
@ -1506,7 +1497,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||||
DEBUG(unsigned PredReg;
|
DEBUG(unsigned PredReg;
|
||||||
assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
|
assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
|
||||||
|
|
||||||
NewMBB = splitBlockBeforeInstr(MI);
|
NewMBB = splitBlockBeforeInstr(&*MI);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// handleConstantPoolUser - Analyze the specified user, checking to see if it
|
/// handleConstantPoolUser - Analyze the specified user, checking to see if it
|
||||||
|
@ -1627,7 +1618,7 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
|
||||||
CPEBB->setAlignment(0);
|
CPEBB->setAlignment(0);
|
||||||
} else
|
} else
|
||||||
// Entries are sorted by descending alignment, so realign from the front.
|
// Entries are sorted by descending alignment, so realign from the front.
|
||||||
CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
|
CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
|
||||||
|
|
||||||
adjustBBOffsetsAfter(CPEBB);
|
adjustBBOffsetsAfter(CPEBB);
|
||||||
// An island has only one predecessor BB and one successor BB. Check if
|
// An island has only one predecessor BB and one successor BB. Check if
|
||||||
|
|
|
@ -1033,7 +1033,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
|
||||||
.addReg(JumpTarget.getReg(), RegState::Kill);
|
.addReg(JumpTarget.getReg(), RegState::Kill);
|
||||||
}
|
}
|
||||||
|
|
||||||
MachineInstr *NewMI = std::prev(MBBI);
|
auto NewMI = std::prev(MBBI);
|
||||||
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
|
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
|
||||||
NewMI->addOperand(MBBI->getOperand(i));
|
NewMI->addOperand(MBBI->getOperand(i));
|
||||||
|
|
||||||
|
|
|
@ -98,23 +98,22 @@ ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
|
||||||
return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
|
return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool isCSRestore(MachineInstr *MI,
|
static bool isCSRestore(MachineInstr &MI, const ARMBaseInstrInfo &TII,
|
||||||
const ARMBaseInstrInfo &TII,
|
|
||||||
const MCPhysReg *CSRegs) {
|
const MCPhysReg *CSRegs) {
|
||||||
// Integer spill area is handled with "pop".
|
// Integer spill area is handled with "pop".
|
||||||
if (isPopOpcode(MI->getOpcode())) {
|
if (isPopOpcode(MI.getOpcode())) {
|
||||||
// The first two operands are predicates. The last two are
|
// The first two operands are predicates. The last two are
|
||||||
// imp-def and imp-use of SP. Check everything in between.
|
// imp-def and imp-use of SP. Check everything in between.
|
||||||
for (int i = 5, e = MI->getNumOperands(); i != e; ++i)
|
for (int i = 5, e = MI.getNumOperands(); i != e; ++i)
|
||||||
if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
|
if (!isCalleeSavedRegister(MI.getOperand(i).getReg(), CSRegs))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if ((MI->getOpcode() == ARM::LDR_POST_IMM ||
|
if ((MI.getOpcode() == ARM::LDR_POST_IMM ||
|
||||||
MI->getOpcode() == ARM::LDR_POST_REG ||
|
MI.getOpcode() == ARM::LDR_POST_REG ||
|
||||||
MI->getOpcode() == ARM::t2LDR_POST) &&
|
MI.getOpcode() == ARM::t2LDR_POST) &&
|
||||||
isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) &&
|
isCalleeSavedRegister(MI.getOperand(0).getReg(), CSRegs) &&
|
||||||
MI->getOperand(1).getReg() == ARM::SP)
|
MI.getOperand(1).getReg() == ARM::SP)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -143,9 +142,9 @@ static void emitSPUpdate(bool isARM, MachineBasicBlock &MBB,
|
||||||
MIFlags, Pred, PredReg);
|
MIFlags, Pred, PredReg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sizeOfSPAdjustment(const MachineInstr *MI) {
|
static int sizeOfSPAdjustment(const MachineInstr &MI) {
|
||||||
int RegSize;
|
int RegSize;
|
||||||
switch (MI->getOpcode()) {
|
switch (MI.getOpcode()) {
|
||||||
case ARM::VSTMDDB_UPD:
|
case ARM::VSTMDDB_UPD:
|
||||||
RegSize = 8;
|
RegSize = 8;
|
||||||
break;
|
break;
|
||||||
|
@ -163,7 +162,7 @@ static int sizeOfSPAdjustment(const MachineInstr *MI) {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
// ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
|
// ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
|
||||||
// pred) so the list starts at 4.
|
// pred) so the list starts at 4.
|
||||||
for (int i = MI->getNumOperands() - 1; i >= 4; --i)
|
for (int i = MI.getNumOperands() - 1; i >= 4; --i)
|
||||||
count += RegSize;
|
count += RegSize;
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -415,7 +414,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
|
||||||
// .cfi_offset operations will reflect that.
|
// .cfi_offset operations will reflect that.
|
||||||
if (DPRGapSize) {
|
if (DPRGapSize) {
|
||||||
assert(DPRGapSize == 4 && "unexpected alignment requirements for DPRs");
|
assert(DPRGapSize == 4 && "unexpected alignment requirements for DPRs");
|
||||||
if (tryFoldSPUpdateIntoPushPop(STI, MF, LastPush, DPRGapSize))
|
if (tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, DPRGapSize))
|
||||||
DefCFAOffsetCandidates.addExtraBytes(LastPush, DPRGapSize);
|
DefCFAOffsetCandidates.addExtraBytes(LastPush, DPRGapSize);
|
||||||
else {
|
else {
|
||||||
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRGapSize,
|
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRGapSize,
|
||||||
|
@ -429,7 +428,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
|
||||||
// Since vpush register list cannot have gaps, there may be multiple vpush
|
// Since vpush register list cannot have gaps, there may be multiple vpush
|
||||||
// instructions in the prologue.
|
// instructions in the prologue.
|
||||||
while (MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
|
while (MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
|
||||||
DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(MBBI));
|
DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI));
|
||||||
LastPush = MBBI++;
|
LastPush = MBBI++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -493,7 +492,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
|
||||||
if (NumBytes) {
|
if (NumBytes) {
|
||||||
// Adjust SP after all the callee-save spills.
|
// Adjust SP after all the callee-save spills.
|
||||||
if (AFI->getNumAlignedDPRCS2Regs() == 0 &&
|
if (AFI->getNumAlignedDPRCS2Regs() == 0 &&
|
||||||
tryFoldSPUpdateIntoPushPop(STI, MF, LastPush, NumBytes))
|
tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, NumBytes))
|
||||||
DefCFAOffsetCandidates.addExtraBytes(LastPush, NumBytes);
|
DefCFAOffsetCandidates.addExtraBytes(LastPush, NumBytes);
|
||||||
else {
|
else {
|
||||||
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
|
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
|
||||||
|
@ -521,7 +520,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
|
||||||
// that push.
|
// that push.
|
||||||
if (HasFP) {
|
if (HasFP) {
|
||||||
MachineBasicBlock::iterator AfterPush = std::next(GPRCS1Push);
|
MachineBasicBlock::iterator AfterPush = std::next(GPRCS1Push);
|
||||||
unsigned PushSize = sizeOfSPAdjustment(GPRCS1Push);
|
unsigned PushSize = sizeOfSPAdjustment(*GPRCS1Push);
|
||||||
emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush,
|
emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush,
|
||||||
dl, TII, FramePtr, ARM::SP,
|
dl, TII, FramePtr, ARM::SP,
|
||||||
PushSize + FramePtrOffsetInPush,
|
PushSize + FramePtrOffsetInPush,
|
||||||
|
@ -726,8 +725,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||||
if (MBBI != MBB.begin()) {
|
if (MBBI != MBB.begin()) {
|
||||||
do {
|
do {
|
||||||
--MBBI;
|
--MBBI;
|
||||||
} while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
|
} while (MBBI != MBB.begin() && isCSRestore(*MBBI, TII, CSRegs));
|
||||||
if (!isCSRestore(MBBI, TII, CSRegs))
|
if (!isCSRestore(*MBBI, TII, CSRegs))
|
||||||
++MBBI;
|
++MBBI;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -773,8 +772,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||||
.addReg(FramePtr));
|
.addReg(FramePtr));
|
||||||
}
|
}
|
||||||
} else if (NumBytes &&
|
} else if (NumBytes &&
|
||||||
!tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
|
!tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes))
|
||||||
emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
|
emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
|
||||||
|
|
||||||
// Increment past our save areas.
|
// Increment past our save areas.
|
||||||
if (AFI->getDPRCalleeSavedAreaSize()) {
|
if (AFI->getDPRCalleeSavedAreaSize()) {
|
||||||
|
@ -1748,9 +1747,9 @@ MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr(
|
||||||
// If we have alloca, convert as follows:
|
// If we have alloca, convert as follows:
|
||||||
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
|
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
|
||||||
// ADJCALLSTACKUP -> add, sp, sp, amount
|
// ADJCALLSTACKUP -> add, sp, sp, amount
|
||||||
MachineInstr *Old = I;
|
MachineInstr &Old = *I;
|
||||||
DebugLoc dl = Old->getDebugLoc();
|
DebugLoc dl = Old.getDebugLoc();
|
||||||
unsigned Amount = Old->getOperand(0).getImm();
|
unsigned Amount = Old.getOperand(0).getImm();
|
||||||
if (Amount != 0) {
|
if (Amount != 0) {
|
||||||
// We need to keep the stack aligned properly. To do this, we round the
|
// We need to keep the stack aligned properly. To do this, we round the
|
||||||
// amount of space needed for the outgoing arguments up to the next
|
// amount of space needed for the outgoing arguments up to the next
|
||||||
|
@ -1763,18 +1762,19 @@ MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr(
|
||||||
bool isARM = !AFI->isThumbFunction();
|
bool isARM = !AFI->isThumbFunction();
|
||||||
|
|
||||||
// Replace the pseudo instruction with a new instruction...
|
// Replace the pseudo instruction with a new instruction...
|
||||||
unsigned Opc = Old->getOpcode();
|
unsigned Opc = Old.getOpcode();
|
||||||
int PIdx = Old->findFirstPredOperandIdx();
|
int PIdx = Old.findFirstPredOperandIdx();
|
||||||
ARMCC::CondCodes Pred = (PIdx == -1)
|
ARMCC::CondCodes Pred =
|
||||||
? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
|
(PIdx == -1) ? ARMCC::AL
|
||||||
|
: (ARMCC::CondCodes)Old.getOperand(PIdx).getImm();
|
||||||
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
|
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
|
||||||
// Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
|
// Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
|
||||||
unsigned PredReg = Old->getOperand(2).getReg();
|
unsigned PredReg = Old.getOperand(2).getReg();
|
||||||
emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags,
|
emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags,
|
||||||
Pred, PredReg);
|
Pred, PredReg);
|
||||||
} else {
|
} else {
|
||||||
// Note: PredReg is operand 3 for ADJCALLSTACKUP.
|
// Note: PredReg is operand 3 for ADJCALLSTACKUP.
|
||||||
unsigned PredReg = Old->getOperand(3).getReg();
|
unsigned PredReg = Old.getOperand(3).getReg();
|
||||||
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
|
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
|
||||||
emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags,
|
emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags,
|
||||||
Pred, PredReg);
|
Pred, PredReg);
|
||||||
|
|
|
@ -8099,7 +8099,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI,
|
||||||
|
|
||||||
// Add epilogue to handle BytesLeft.
|
// Add epilogue to handle BytesLeft.
|
||||||
BB = exitMBB;
|
BB = exitMBB;
|
||||||
MachineInstr *StartOfExit = exitMBB->begin();
|
auto StartOfExit = exitMBB->begin();
|
||||||
|
|
||||||
// [scratch, srcOut] = LDRB_POST(srcLoop, 1)
|
// [scratch, srcOut] = LDRB_POST(srcLoop, 1)
|
||||||
// [destOut] = STRB_POST(scratch, destLoop, 1)
|
// [destOut] = STRB_POST(scratch, destLoop, 1)
|
||||||
|
|
|
@ -115,8 +115,8 @@ namespace {
|
||||||
MachineInstr *MI;
|
MachineInstr *MI;
|
||||||
int Offset; ///< Load/Store offset.
|
int Offset; ///< Load/Store offset.
|
||||||
unsigned Position; ///< Position as counted from end of basic block.
|
unsigned Position; ///< Position as counted from end of basic block.
|
||||||
MemOpQueueEntry(MachineInstr *MI, int Offset, unsigned Position)
|
MemOpQueueEntry(MachineInstr &MI, int Offset, unsigned Position)
|
||||||
: MI(MI), Offset(Offset), Position(Position) {}
|
: MI(&MI), Offset(Offset), Position(Position) {}
|
||||||
};
|
};
|
||||||
typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
|
typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
|
||||||
|
|
||||||
|
@ -174,8 +174,8 @@ namespace {
|
||||||
|
|
||||||
INITIALIZE_PASS(ARMLoadStoreOpt, "arm-load-store-opt", ARM_LOAD_STORE_OPT_NAME, false, false)
|
INITIALIZE_PASS(ARMLoadStoreOpt, "arm-load-store-opt", ARM_LOAD_STORE_OPT_NAME, false, false)
|
||||||
|
|
||||||
static bool definesCPSR(const MachineInstr *MI) {
|
static bool definesCPSR(const MachineInstr &MI) {
|
||||||
for (const auto &MO : MI->operands()) {
|
for (const auto &MO : MI.operands()) {
|
||||||
if (!MO.isReg())
|
if (!MO.isReg())
|
||||||
continue;
|
continue;
|
||||||
if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
|
if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
|
||||||
|
@ -187,11 +187,11 @@ static bool definesCPSR(const MachineInstr *MI) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int getMemoryOpOffset(const MachineInstr *MI) {
|
static int getMemoryOpOffset(const MachineInstr &MI) {
|
||||||
unsigned Opcode = MI->getOpcode();
|
unsigned Opcode = MI.getOpcode();
|
||||||
bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
|
bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
|
||||||
unsigned NumOperands = MI->getDesc().getNumOperands();
|
unsigned NumOperands = MI.getDesc().getNumOperands();
|
||||||
unsigned OffField = MI->getOperand(NumOperands-3).getImm();
|
unsigned OffField = MI.getOperand(NumOperands - 3).getImm();
|
||||||
|
|
||||||
if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
|
if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
|
||||||
Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
|
Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
|
||||||
|
@ -491,7 +491,7 @@ void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
|
||||||
InsertSub = true;
|
InsertSub = true;
|
||||||
|
|
||||||
} else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
|
} else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
|
||||||
!definesCPSR(MBBI)) {
|
!definesCPSR(*MBBI)) {
|
||||||
// SUBS/ADDS using this register, with a dead def of the CPSR.
|
// SUBS/ADDS using this register, with a dead def of the CPSR.
|
||||||
// Merge it with the update; if the merged offset is too large,
|
// Merge it with the update; if the merged offset is too large,
|
||||||
// insert a new sub instead.
|
// insert a new sub instead.
|
||||||
|
@ -515,7 +515,7 @@ void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
|
||||||
InsertSub = true;
|
InsertSub = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (definesCPSR(MBBI) || MBBI->isCall() || MBBI->isBranch()) {
|
} else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) {
|
||||||
// Since SUBS sets the condition flags, we can't place the base reset
|
// Since SUBS sets the condition flags, we can't place the base reset
|
||||||
// after an instruction that has a live CPSR def.
|
// after an instruction that has a live CPSR def.
|
||||||
// The base register might also contain an argument for a function call.
|
// The base register might also contain an argument for a function call.
|
||||||
|
@ -854,7 +854,7 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) {
|
||||||
MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
|
MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
|
||||||
iterator InsertBefore = std::next(iterator(LatestMI));
|
iterator InsertBefore = std::next(iterator(LatestMI));
|
||||||
MachineBasicBlock &MBB = *LatestMI->getParent();
|
MachineBasicBlock &MBB = *LatestMI->getParent();
|
||||||
unsigned Offset = getMemoryOpOffset(First);
|
unsigned Offset = getMemoryOpOffset(*First);
|
||||||
unsigned Base = getLoadStoreBaseOp(*First).getReg();
|
unsigned Base = getLoadStoreBaseOp(*First).getReg();
|
||||||
bool BaseKill = LatestMI->killsRegister(Base);
|
bool BaseKill = LatestMI->killsRegister(Base);
|
||||||
unsigned PredReg = 0;
|
unsigned PredReg = 0;
|
||||||
|
@ -1146,7 +1146,7 @@ static int isIncrementOrDecrement(const MachineInstr &MI, unsigned Reg,
|
||||||
MIPredReg != PredReg)
|
MIPredReg != PredReg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (CheckCPSRDef && definesCPSR(&MI))
|
if (CheckCPSRDef && definesCPSR(MI))
|
||||||
return 0;
|
return 0;
|
||||||
return MI.getOperand(2).getImm() * Scale;
|
return MI.getOperand(2).getImm() * Scale;
|
||||||
}
|
}
|
||||||
|
@ -1606,7 +1606,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
||||||
bool BaseUndef = BaseOp.isUndef();
|
bool BaseUndef = BaseOp.isUndef();
|
||||||
bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
|
bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
|
||||||
bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
|
bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
|
||||||
int OffImm = getMemoryOpOffset(MI);
|
int OffImm = getMemoryOpOffset(*MI);
|
||||||
unsigned PredReg = 0;
|
unsigned PredReg = 0;
|
||||||
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
|
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
|
||||||
|
|
||||||
|
@ -1715,13 +1715,13 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||||
unsigned Base = getLoadStoreBaseOp(*MBBI).getReg();
|
unsigned Base = getLoadStoreBaseOp(*MBBI).getReg();
|
||||||
unsigned PredReg = 0;
|
unsigned PredReg = 0;
|
||||||
ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg);
|
ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg);
|
||||||
int Offset = getMemoryOpOffset(MBBI);
|
int Offset = getMemoryOpOffset(*MBBI);
|
||||||
if (CurrBase == 0) {
|
if (CurrBase == 0) {
|
||||||
// Start of a new chain.
|
// Start of a new chain.
|
||||||
CurrBase = Base;
|
CurrBase = Base;
|
||||||
CurrOpc = Opcode;
|
CurrOpc = Opcode;
|
||||||
CurrPred = Pred;
|
CurrPred = Pred;
|
||||||
MemOps.push_back(MemOpQueueEntry(MBBI, Offset, Position));
|
MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Note: No need to match PredReg in the next if.
|
// Note: No need to match PredReg in the next if.
|
||||||
|
@ -1749,7 +1749,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||||
if (!Overlap) {
|
if (!Overlap) {
|
||||||
// Check offset and sort memory operation into the current chain.
|
// Check offset and sort memory operation into the current chain.
|
||||||
if (Offset > MemOps.back().Offset) {
|
if (Offset > MemOps.back().Offset) {
|
||||||
MemOps.push_back(MemOpQueueEntry(MBBI, Offset, Position));
|
MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
MemOpQueue::iterator MI, ME;
|
MemOpQueue::iterator MI, ME;
|
||||||
|
@ -1765,7 +1765,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (MI != MemOps.end()) {
|
if (MI != MemOps.end()) {
|
||||||
MemOps.insert(MI, MemOpQueueEntry(MBBI, Offset, Position));
|
MemOps.insert(MI, MemOpQueueEntry(*MBBI, Offset, Position));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1782,7 +1782,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||||
MBBI->getOpcode() == ARM::t2STRDi8) {
|
MBBI->getOpcode() == ARM::t2STRDi8) {
|
||||||
// ARMPreAllocLoadStoreOpt has already formed some LDRD/STRD instructions
|
// ARMPreAllocLoadStoreOpt has already formed some LDRD/STRD instructions
|
||||||
// remember them because we may still be able to merge add/sub into them.
|
// remember them because we may still be able to merge add/sub into them.
|
||||||
MergeBaseCandidates.push_back(MBBI);
|
MergeBaseCandidates.push_back(&*MBBI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1864,20 +1864,20 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
|
||||||
// Ignore any DBG_VALUE instructions.
|
// Ignore any DBG_VALUE instructions.
|
||||||
while (PrevI->isDebugValue() && PrevI != MBB.begin())
|
while (PrevI->isDebugValue() && PrevI != MBB.begin())
|
||||||
--PrevI;
|
--PrevI;
|
||||||
MachineInstr *PrevMI = PrevI;
|
MachineInstr &PrevMI = *PrevI;
|
||||||
unsigned Opcode = PrevMI->getOpcode();
|
unsigned Opcode = PrevMI.getOpcode();
|
||||||
if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
|
if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
|
||||||
Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
|
Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
|
||||||
Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
|
Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
|
||||||
MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
|
MachineOperand &MO = PrevMI.getOperand(PrevMI.getNumOperands() - 1);
|
||||||
if (MO.getReg() != ARM::LR)
|
if (MO.getReg() != ARM::LR)
|
||||||
return false;
|
return false;
|
||||||
unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
|
unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
|
||||||
assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
|
assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
|
||||||
Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
|
Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
|
||||||
PrevMI->setDesc(TII->get(NewOpc));
|
PrevMI.setDesc(TII->get(NewOpc));
|
||||||
MO.setReg(ARM::PC);
|
MO.setReg(ARM::PC);
|
||||||
PrevMI->copyImplicitOps(*MBB.getParent(), *MBBI);
|
PrevMI.copyImplicitOps(*MBB.getParent(), *MBBI);
|
||||||
MBB.erase(MBBI);
|
MBB.erase(MBBI);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -2099,7 +2099,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Then make sure the immediate offset fits.
|
// Then make sure the immediate offset fits.
|
||||||
int OffImm = getMemoryOpOffset(Op0);
|
int OffImm = getMemoryOpOffset(*Op0);
|
||||||
if (isT2) {
|
if (isT2) {
|
||||||
int Limit = (1 << 8) * Scale;
|
int Limit = (1 << 8) * Scale;
|
||||||
if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
|
if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
|
||||||
|
@ -2135,11 +2135,11 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
|
||||||
// Sort by offset (in reverse order).
|
// Sort by offset (in reverse order).
|
||||||
std::sort(Ops.begin(), Ops.end(),
|
std::sort(Ops.begin(), Ops.end(),
|
||||||
[](const MachineInstr *LHS, const MachineInstr *RHS) {
|
[](const MachineInstr *LHS, const MachineInstr *RHS) {
|
||||||
int LOffset = getMemoryOpOffset(LHS);
|
int LOffset = getMemoryOpOffset(*LHS);
|
||||||
int ROffset = getMemoryOpOffset(RHS);
|
int ROffset = getMemoryOpOffset(*RHS);
|
||||||
assert(LHS == RHS || LOffset != ROffset);
|
assert(LHS == RHS || LOffset != ROffset);
|
||||||
return LOffset > ROffset;
|
return LOffset > ROffset;
|
||||||
});
|
});
|
||||||
|
|
||||||
// The loads / stores of the same base are in order. Scan them from first to
|
// The loads / stores of the same base are in order. Scan them from first to
|
||||||
// last and check for the following:
|
// last and check for the following:
|
||||||
|
@ -2171,7 +2171,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
|
||||||
if (LastOpcode && LSMOpcode != LastOpcode)
|
if (LastOpcode && LSMOpcode != LastOpcode)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
int Offset = getMemoryOpOffset(Op);
|
int Offset = getMemoryOpOffset(*Op);
|
||||||
unsigned Bytes = getLSMultipleTransferSize(Op);
|
unsigned Bytes = getLSMultipleTransferSize(Op);
|
||||||
if (LastBytes) {
|
if (LastBytes) {
|
||||||
if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
|
if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
|
||||||
|
@ -2206,8 +2206,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
|
||||||
} else {
|
} else {
|
||||||
// This is the new location for the loads / stores.
|
// This is the new location for the loads / stores.
|
||||||
MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
|
MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
|
||||||
while (InsertPos != MBB->end()
|
while (InsertPos != MBB->end() &&
|
||||||
&& (MemOps.count(InsertPos) || InsertPos->isDebugValue()))
|
(MemOps.count(&*InsertPos) || InsertPos->isDebugValue()))
|
||||||
++InsertPos;
|
++InsertPos;
|
||||||
|
|
||||||
// If we are moving a pair of loads / stores, see if it makes sense
|
// If we are moving a pair of loads / stores, see if it makes sense
|
||||||
|
@ -2302,25 +2302,25 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
|
||||||
MachineBasicBlock::iterator E = MBB->end();
|
MachineBasicBlock::iterator E = MBB->end();
|
||||||
while (MBBI != E) {
|
while (MBBI != E) {
|
||||||
for (; MBBI != E; ++MBBI) {
|
for (; MBBI != E; ++MBBI) {
|
||||||
MachineInstr *MI = MBBI;
|
MachineInstr &MI = *MBBI;
|
||||||
if (MI->isCall() || MI->isTerminator()) {
|
if (MI.isCall() || MI.isTerminator()) {
|
||||||
// Stop at barriers.
|
// Stop at barriers.
|
||||||
++MBBI;
|
++MBBI;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!MI->isDebugValue())
|
if (!MI.isDebugValue())
|
||||||
MI2LocMap[MI] = ++Loc;
|
MI2LocMap[&MI] = ++Loc;
|
||||||
|
|
||||||
if (!isMemoryOp(*MI))
|
if (!isMemoryOp(MI))
|
||||||
continue;
|
continue;
|
||||||
unsigned PredReg = 0;
|
unsigned PredReg = 0;
|
||||||
if (getInstrPredicate(*MI, PredReg) != ARMCC::AL)
|
if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
int Opc = MI->getOpcode();
|
int Opc = MI.getOpcode();
|
||||||
bool isLd = isLoadSingle(Opc);
|
bool isLd = isLoadSingle(Opc);
|
||||||
unsigned Base = MI->getOperand(1).getReg();
|
unsigned Base = MI.getOperand(1).getReg();
|
||||||
int Offset = getMemoryOpOffset(MI);
|
int Offset = getMemoryOpOffset(MI);
|
||||||
|
|
||||||
bool StopHere = false;
|
bool StopHere = false;
|
||||||
|
@ -2329,15 +2329,15 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
|
||||||
Base2LdsMap.find(Base);
|
Base2LdsMap.find(Base);
|
||||||
if (BI != Base2LdsMap.end()) {
|
if (BI != Base2LdsMap.end()) {
|
||||||
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
|
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
|
||||||
if (Offset == getMemoryOpOffset(BI->second[i])) {
|
if (Offset == getMemoryOpOffset(*BI->second[i])) {
|
||||||
StopHere = true;
|
StopHere = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!StopHere)
|
if (!StopHere)
|
||||||
BI->second.push_back(MI);
|
BI->second.push_back(&MI);
|
||||||
} else {
|
} else {
|
||||||
Base2LdsMap[Base].push_back(MI);
|
Base2LdsMap[Base].push_back(&MI);
|
||||||
LdBases.push_back(Base);
|
LdBases.push_back(Base);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -2345,15 +2345,15 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
|
||||||
Base2StsMap.find(Base);
|
Base2StsMap.find(Base);
|
||||||
if (BI != Base2StsMap.end()) {
|
if (BI != Base2StsMap.end()) {
|
||||||
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
|
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
|
||||||
if (Offset == getMemoryOpOffset(BI->second[i])) {
|
if (Offset == getMemoryOpOffset(*BI->second[i])) {
|
||||||
StopHere = true;
|
StopHere = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!StopHere)
|
if (!StopHere)
|
||||||
BI->second.push_back(MI);
|
BI->second.push_back(&MI);
|
||||||
} else {
|
} else {
|
||||||
Base2StsMap[Base].push_back(MI);
|
Base2StsMap[Base].push_back(&MI);
|
||||||
StBases.push_back(Base);
|
StBases.push_back(Base);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,9 +59,9 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||||
// If we have alloca, convert as follows:
|
// If we have alloca, convert as follows:
|
||||||
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
|
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
|
||||||
// ADJCALLSTACKUP -> add, sp, sp, amount
|
// ADJCALLSTACKUP -> add, sp, sp, amount
|
||||||
MachineInstr *Old = I;
|
MachineInstr &Old = *I;
|
||||||
DebugLoc dl = Old->getDebugLoc();
|
DebugLoc dl = Old.getDebugLoc();
|
||||||
unsigned Amount = Old->getOperand(0).getImm();
|
unsigned Amount = Old.getOperand(0).getImm();
|
||||||
if (Amount != 0) {
|
if (Amount != 0) {
|
||||||
// We need to keep the stack aligned properly. To do this, we round the
|
// We need to keep the stack aligned properly. To do this, we round the
|
||||||
// amount of space needed for the outgoing arguments up to the next
|
// amount of space needed for the outgoing arguments up to the next
|
||||||
|
@ -70,7 +70,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||||
Amount = (Amount+Align-1)/Align*Align;
|
Amount = (Amount+Align-1)/Align*Align;
|
||||||
|
|
||||||
// Replace the pseudo instruction with a new instruction...
|
// Replace the pseudo instruction with a new instruction...
|
||||||
unsigned Opc = Old->getOpcode();
|
unsigned Opc = Old.getOpcode();
|
||||||
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
|
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
|
||||||
emitSPUpdate(MBB, I, TII, dl, *RegInfo, -Amount);
|
emitSPUpdate(MBB, I, TII, dl, *RegInfo, -Amount);
|
||||||
} else {
|
} else {
|
||||||
|
@ -188,7 +188,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
|
||||||
|
|
||||||
int FramePtrOffsetInBlock = 0;
|
int FramePtrOffsetInBlock = 0;
|
||||||
unsigned adjustedGPRCS1Size = GPRCS1Size;
|
unsigned adjustedGPRCS1Size = GPRCS1Size;
|
||||||
if (tryFoldSPUpdateIntoPushPop(STI, MF, std::prev(MBBI), NumBytes)) {
|
if (tryFoldSPUpdateIntoPushPop(STI, MF, &*std::prev(MBBI), NumBytes)) {
|
||||||
FramePtrOffsetInBlock = NumBytes;
|
FramePtrOffsetInBlock = NumBytes;
|
||||||
adjustedGPRCS1Size += NumBytes;
|
adjustedGPRCS1Size += NumBytes;
|
||||||
NumBytes = 0;
|
NumBytes = 0;
|
||||||
|
@ -303,16 +303,15 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
|
||||||
AFI->setShouldRestoreSPFromFP(true);
|
AFI->setShouldRestoreSPFromFP(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool isCSRestore(MachineInstr *MI, const MCPhysReg *CSRegs) {
|
static bool isCSRestore(MachineInstr &MI, const MCPhysReg *CSRegs) {
|
||||||
if (MI->getOpcode() == ARM::tLDRspi &&
|
if (MI.getOpcode() == ARM::tLDRspi && MI.getOperand(1).isFI() &&
|
||||||
MI->getOperand(1).isFI() &&
|
isCalleeSavedRegister(MI.getOperand(0).getReg(), CSRegs))
|
||||||
isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
|
|
||||||
return true;
|
return true;
|
||||||
else if (MI->getOpcode() == ARM::tPOP) {
|
else if (MI.getOpcode() == ARM::tPOP) {
|
||||||
// The first two operands are predicates. The last two are
|
// The first two operands are predicates. The last two are
|
||||||
// imp-def and imp-use of SP. Check everything in between.
|
// imp-def and imp-use of SP. Check everything in between.
|
||||||
for (int i = 2, e = MI->getNumOperands() - 2; i != e; ++i)
|
for (int i = 2, e = MI.getNumOperands() - 2; i != e; ++i)
|
||||||
if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
|
if (!isCalleeSavedRegister(MI.getOperand(i).getReg(), CSRegs))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -345,8 +344,8 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
|
||||||
if (MBBI != MBB.begin()) {
|
if (MBBI != MBB.begin()) {
|
||||||
do
|
do
|
||||||
--MBBI;
|
--MBBI;
|
||||||
while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
|
while (MBBI != MBB.begin() && isCSRestore(*MBBI, CSRegs));
|
||||||
if (!isCSRestore(MBBI, CSRegs))
|
if (!isCSRestore(*MBBI, CSRegs))
|
||||||
++MBBI;
|
++MBBI;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,11 +374,11 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
|
||||||
.addReg(FramePtr));
|
.addReg(FramePtr));
|
||||||
} else {
|
} else {
|
||||||
if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tBX_RET &&
|
if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tBX_RET &&
|
||||||
&MBB.front() != MBBI && std::prev(MBBI)->getOpcode() == ARM::tPOP) {
|
&MBB.front() != &*MBBI && std::prev(MBBI)->getOpcode() == ARM::tPOP) {
|
||||||
MachineBasicBlock::iterator PMBBI = std::prev(MBBI);
|
MachineBasicBlock::iterator PMBBI = std::prev(MBBI);
|
||||||
if (!tryFoldSPUpdateIntoPushPop(STI, MF, PMBBI, NumBytes))
|
if (!tryFoldSPUpdateIntoPushPop(STI, MF, &*PMBBI, NumBytes))
|
||||||
emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
|
emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
|
||||||
} else if (!tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
|
} else if (!tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes))
|
||||||
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
|
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue