[NFC] format InstructionSimplify & lowerCaseFunctionNames

Clang-format InstructionSimplify and convert all "FunctionName"s to
"functionName".  This patch does touch a lot of files but gets done with
the cleanup of InstructionSimplify in one commit.

This is the alternative to the less invasive clang-format only patch: D126783

Reviewed By: spatel, rengolin

Differential Revision: https://reviews.llvm.org/D126889
This commit is contained in:
Simon Moll 2022-06-09 16:09:14 +02:00
parent 7dbfcfa735
commit b8c2781ff6
49 changed files with 643 additions and 629 deletions

View File

@ -48,38 +48,38 @@ public:
//===--------------------------------------------------------------------===//
Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
bool HasNSW = false) const override {
return SimplifyAddInst(LHS, RHS, HasNUW, HasNSW, SQ);
return simplifyAddInst(LHS, RHS, HasNUW, HasNSW, SQ);
}
Value *FoldAnd(Value *LHS, Value *RHS) const override {
return SimplifyAndInst(LHS, RHS, SQ);
return simplifyAndInst(LHS, RHS, SQ);
}
Value *FoldOr(Value *LHS, Value *RHS) const override {
return SimplifyOrInst(LHS, RHS, SQ);
return simplifyOrInst(LHS, RHS, SQ);
}
Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
return SimplifyICmpInst(P, LHS, RHS, SQ);
return simplifyICmpInst(P, LHS, RHS, SQ);
}
Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
bool IsInBounds = false) const override {
return SimplifyGEPInst(Ty, Ptr, IdxList, IsInBounds, SQ);
return simplifyGEPInst(Ty, Ptr, IdxList, IsInBounds, SQ);
}
Value *FoldSelect(Value *C, Value *True, Value *False) const override {
return SimplifySelectInst(C, True, False, SQ);
return simplifySelectInst(C, True, False, SQ);
}
Value *FoldExtractValue(Value *Agg,
ArrayRef<unsigned> IdxList) const override {
return SimplifyExtractValueInst(Agg, IdxList, SQ);
return simplifyExtractValueInst(Agg, IdxList, SQ);
};
Value *FoldInsertValue(Value *Agg, Value *Val,
ArrayRef<unsigned> IdxList) const override {
return SimplifyInsertValueInst(Agg, Val, IdxList, SQ);
return simplifyInsertValueInst(Agg, Val, IdxList, SQ);
}
//===--------------------------------------------------------------------===//

View File

@ -144,162 +144,162 @@ struct SimplifyQuery {
// Please use the SimplifyQuery versions in new code.
/// Given operand for an FNeg, fold the result or return null.
Value *SimplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q);
Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q);
/// Given operands for an Add, fold the result or return null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
Value *simplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
/// Given operands for a Sub, fold the result or return null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
Value *simplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
/// Given operands for an FAdd, fold the result or return null.
Value *
SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q,
fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
RoundingMode Rounding = RoundingMode::NearestTiesToEven);
/// Given operands for an FSub, fold the result or return null.
Value *
SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q,
fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
RoundingMode Rounding = RoundingMode::NearestTiesToEven);
/// Given operands for an FMul, fold the result or return null.
Value *
SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q,
fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
RoundingMode Rounding = RoundingMode::NearestTiesToEven);
/// Given operands for the multiplication of a FMA, fold the result or return
/// null. In contrast to SimplifyFMulInst, this function will not perform
/// null. In contrast to simplifyFMulInst, this function will not perform
/// simplifications whose unrounded results differ when rounded to the argument
/// type.
Value *SimplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF,
Value *simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q,
fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
RoundingMode Rounding = RoundingMode::NearestTiesToEven);
/// Given operands for a Mul, fold the result or return null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an SDiv, fold the result or return null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for a UDiv, fold the result or return null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an FDiv, fold the result or return null.
Value *
SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q,
fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
RoundingMode Rounding = RoundingMode::NearestTiesToEven);
/// Given operands for an SRem, fold the result or return null.
Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for a URem, fold the result or return null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an FRem, fold the result or return null.
Value *
SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q,
fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
RoundingMode Rounding = RoundingMode::NearestTiesToEven);
/// Given operands for a Shl, fold the result or return null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *simplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
/// Given operands for a LShr, fold the result or return null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
Value *simplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
/// Given operands for a AShr, fold the result or return nulll.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
Value *simplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
/// Given operands for an And, fold the result or return null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an Or, fold the result or return null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an Xor, fold the result or return null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
Value *simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an ICmpInst, fold the result or return null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
/// Given operands for an FCmpInst, fold the result or return null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q);
/// Given operands for a SelectInst, fold the result or return null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const SimplifyQuery &Q);
/// Given operands for a GetElementPtrInst, fold the result or return null.
Value *SimplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
Value *simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
bool InBounds, const SimplifyQuery &Q);
/// Given operands for an InsertValueInst, fold the result or return null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
Value *simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q);
/// Given operands for an InsertElement, fold the result or return null.
Value *SimplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx,
Value *simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx,
const SimplifyQuery &Q);
/// Given operands for an ExtractValueInst, fold the result or return null.
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q);
/// Given operands for an ExtractElementInst, fold the result or return null.
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
const SimplifyQuery &Q);
/// Given operands for a CastInst, fold the result or return null.
Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
const SimplifyQuery &Q);
/// Given operands for a ShuffleVectorInst, fold the result or return null.
/// See class ShuffleVectorInst for a description of the mask representation.
Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef<int> Mask,
Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef<int> Mask,
Type *RetTy, const SimplifyQuery &Q);
//=== Helper functions for higher up the class hierarchy.
/// Given operands for a CmpInst, fold the result or return null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Value *simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
/// Given operand for a UnaryOperator, fold the result or return null.
Value *SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q);
Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q);
/// Given operand for a UnaryOperator, fold the result or return null.
/// Try to use FastMathFlags when folding the result.
Value *SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
Value *simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for a BinaryOperator, fold the result or return null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
/// Given operands for a BinaryOperator, fold the result or return null.
/// Try to use FastMathFlags when folding the result.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, FastMathFlags FMF,
Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given a callsite, fold the result or return null.
Value *SimplifyCall(CallBase *Call, const SimplifyQuery &Q);
Value *simplifyCall(CallBase *Call, const SimplifyQuery &Q);
/// Given a constrained FP intrinsic call, tries to compute its simplified
/// version. Returns a simplified result or null.
@ -308,21 +308,21 @@ Value *SimplifyCall(CallBase *Call, const SimplifyQuery &Q);
/// simplification succeeds that the intrinsic is side effect free. As a result,
/// successful simplification can be used to delete the intrinsic not just
/// replace its result.
Value *SimplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q);
Value *simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q);
/// Given an operand for a Freeze, see if we can fold the result.
/// If not, this returns null.
Value *SimplifyFreezeInst(Value *Op, const SimplifyQuery &Q);
Value *simplifyFreezeInst(Value *Op, const SimplifyQuery &Q);
/// See if we can compute a simplified version of this instruction. If not,
/// return null.
Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
Value *simplifyInstruction(Instruction *I, const SimplifyQuery &Q,
OptimizationRemarkEmitter *ORE = nullptr);
/// Like \p SimplifyInstruction but the operands of \p I are replaced with
/// Like \p simplifyInstruction but the operands of \p I are replaced with
/// \p NewOps. Returns a simplified value, or null if none was found.
Value *
SimplifyInstructionWithOperands(Instruction *I, ArrayRef<Value *> NewOps,
simplifyInstructionWithOperands(Instruction *I, ArrayRef<Value *> NewOps,
const SimplifyQuery &Q,
OptimizationRemarkEmitter *ORE = nullptr);

View File

@ -1984,11 +1984,11 @@ bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
Value *SimpleV = nullptr;
if (auto FI = dyn_cast<FPMathOperator>(&I))
SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
SimpleV = simplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
FI->getFastMathFlags(), DL);
else
SimpleV =
SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
simplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
SimplifiedValues[&I] = C;
@ -2018,7 +2018,7 @@ bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
if (!COp)
COp = SimplifiedValues.lookup(Op);
Value *SimpleV = SimplifyFNegInst(
Value *SimpleV = simplifyFNegInst(
COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))

File diff suppressed because it is too large Load Diff

View File

@ -1012,7 +1012,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueExtractValue(
// Handle extractvalue of insertvalue to allow further simplification
// based on replaced with.overflow intrinsics.
if (Value *V = SimplifyExtractValueInst(
if (Value *V = simplifyExtractValueInst(
EVI->getAggregateOperand(), EVI->getIndices(),
EVI->getModule()->getDataLayout()))
return getBlockValue(V, BB, EVI);
@ -1270,7 +1270,7 @@ static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
if (auto *CI = dyn_cast<CastInst>(Usr)) {
assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
if (auto *C = dyn_cast_or_null<ConstantInt>(
SimplifyCastInst(CI->getOpcode(), OpConst,
simplifyCastInst(CI->getOpcode(), OpConst,
CI->getDestTy(), DL))) {
return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
}
@ -1282,7 +1282,7 @@ static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
if (auto *C = dyn_cast_or_null<ConstantInt>(
SimplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
}
} else if (isa<FreezeInst>(Usr)) {

View File

@ -690,7 +690,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
if (Value *W = SimplifyInstruction(Inst, {*DL, TLI, DT, AC}))
if (Value *W = simplifyInstruction(Inst, {*DL, TLI, DT, AC}))
return findValueImpl(W, OffsetOk, Visited);
} else if (auto *C = dyn_cast<Constant>(V)) {
Value *W = ConstantFoldConstant(C, *DL, TLI);

View File

@ -87,9 +87,9 @@ bool UnrolledInstAnalyzer::visitBinaryOperator(BinaryOperator &I) {
const DataLayout &DL = I.getModule()->getDataLayout();
if (auto FI = dyn_cast<FPMathOperator>(&I))
SimpleV =
SimplifyBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
simplifyBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
else
SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
SimpleV = simplifyBinOp(I.getOpcode(), LHS, RHS, DL);
if (SimpleV) {
SimplifiedValues[&I] = SimpleV;
@ -158,7 +158,7 @@ bool UnrolledInstAnalyzer::visitCastInst(CastInst &I) {
// i32 0).
if (CastInst::castIsValid(I.getOpcode(), Op, I.getType())) {
const DataLayout &DL = I.getModule()->getDataLayout();
if (Value *V = SimplifyCastInst(I.getOpcode(), Op, I.getType(), DL)) {
if (Value *V = simplifyCastInst(I.getOpcode(), Op, I.getType(), DL)) {
SimplifiedValues[&I] = V;
return true;
}
@ -195,7 +195,7 @@ bool UnrolledInstAnalyzer::visitCmpInst(CmpInst &I) {
}
const DataLayout &DL = I.getModule()->getDataLayout();
if (Value *V = SimplifyCmpInst(I.getPredicate(), LHS, RHS, DL)) {
if (Value *V = simplifyCmpInst(I.getPredicate(), LHS, RHS, DL)) {
SimplifiedValues[&I] = V;
return true;
}

View File

@ -140,7 +140,7 @@ static bool CanProveNotTakenFirstIteration(const BasicBlock *ExitBlock,
return false;
auto DL = ExitBlock->getModule()->getDataLayout();
auto *IVStart = LHS->getIncomingValueForBlock(CurLoop->getLoopPreheader());
auto *SimpleValOrNull = SimplifyCmpInst(Cond->getPredicate(),
auto *SimpleValOrNull = simplifyCmpInst(Cond->getPredicate(),
IVStart, RHS,
{DL, /*TLI*/ nullptr,
DT, /*AC*/ nullptr, BI});

View File

@ -222,7 +222,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return GEP;
// Simplify the GEP to handle 'gep x, 0' -> x etc.
if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(), GEPOps[0],
if (Value *V = simplifyGEPInst(GEP->getSourceElementType(), GEPOps[0],
ArrayRef<Value *>(GEPOps).slice(1),
GEP->isInBounds(), {DL, TLI, DT, AC})) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
@ -274,7 +274,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
// See if the add simplifies away.
if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, {DL, TLI, DT, AC})) {
if (Value *Res = simplifyAddInst(LHS, RHS, isNSW, isNUW, {DL, TLI, DT, AC})) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);

View File

@ -5931,7 +5931,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
if (Value *V = simplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
if (LI.replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);

View File

@ -3394,7 +3394,7 @@ public:
if (!Visited.insert(P).second)
continue;
if (auto *PI = dyn_cast<Instruction>(P))
if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) {
if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
for (auto *U : PI->users())
WorkList.push_back(cast<Value>(U));
Put(PI, V);
@ -7877,7 +7877,7 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
// trivial PHI, go ahead and zap it here.
if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) {
if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) {
LargeOffsetGEPMap.erase(P);
P->replaceAllUsesWith(V);
P->eraseFromParent();

View File

@ -67,7 +67,7 @@ private:
Value *simplify(Instruction *I, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return SimplifyInstruction(I, {*TD, TLI, DT});
return simplifyInstruction(I, {*TD, TLI, DT});
}
const DataLayout *TD;

View File

@ -1487,7 +1487,7 @@ bool PolynomialMultiplyRecognize::convertShiftsToLeft(BasicBlock *LoopB,
void PolynomialMultiplyRecognize::cleanupLoopBody(BasicBlock *LoopB) {
for (auto &I : *LoopB)
if (Value *SV = SimplifyInstruction(&I, {DL, &TLI, &DT}))
if (Value *SV = simplifyInstruction(&I, {DL, &TLI, &DT}))
I.replaceAllUsesWith(SV);
for (Instruction &I : llvm::make_early_inc_range(*LoopB))
@ -2169,7 +2169,7 @@ CleanupAndExit:
SCEV::FlagNUW);
Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntPtrTy, ExpPt);
if (Instruction *In = dyn_cast<Instruction>(NumBytes))
if (Value *Simp = SimplifyInstruction(In, {*DL, TLI, DT}))
if (Value *Simp = simplifyInstruction(In, {*DL, TLI, DT}))
NumBytes = Simp;
CallInst *NewCall;
@ -2279,7 +2279,7 @@ CleanupAndExit:
Value *NumWords = Expander.expandCodeFor(NumWordsS, Int32Ty,
MemmoveB->getTerminator());
if (Instruction *In = dyn_cast<Instruction>(NumWords))
if (Value *Simp = SimplifyInstruction(In, {*DL, TLI, DT}))
if (Value *Simp = simplifyInstruction(In, {*DL, TLI, DT}))
NumWords = Simp;
Value *Op0 = (StoreBasePtr->getType() == Int32PtrTy)

View File

@ -1310,7 +1310,7 @@ auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0,
auto Simplify = [&](Value *V) {
if (auto *I = dyn_cast<Instruction>(V)) {
SimplifyQuery Q(DL, &TLI, &DT, &AC, I);
if (Value *S = SimplifyInstruction(I, Q))
if (Value *S = simplifyInstruction(I, Q))
return S;
}
return V;

View File

@ -5851,7 +5851,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
const DataLayout &DL = I.getModule()->getDataLayout();
SimplifyQuery Q(DL, TLI, DT, AC, &I);
if (Value *SimplifiedI =
SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
simplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
SimplifiedAssociatedValue, SimplifiedI, I.getType());
return SimplifiedAssociatedValue != Optional<Value *>(nullptr);

View File

@ -1271,7 +1271,7 @@ static Instruction *factorizeMathWithShlOps(BinaryOperator &I,
}
Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
if (Value *V = SimplifyAddInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1),
I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1529,7 +1529,7 @@ static Instruction *factorizeFAddFSub(BinaryOperator &I,
}
Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) {
if (Value *V = SimplifyFAddInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyFAddInst(I.getOperand(0), I.getOperand(1),
I.getFastMathFlags(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1751,7 +1751,7 @@ Value *InstCombinerImpl::OptimizePointerDifference(Value *LHS, Value *RHS,
}
Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
if (Value *V = SimplifySubInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifySubInst(I.getOperand(0), I.getOperand(1),
I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -2278,7 +2278,7 @@ static Instruction *hoistFNegAboveFMulFDiv(Instruction &I,
Instruction *InstCombinerImpl::visitFNeg(UnaryOperator &I) {
Value *Op = I.getOperand(0);
if (Value *V = SimplifyFNegInst(Op, I.getFastMathFlags(),
if (Value *V = simplifyFNegInst(Op, I.getFastMathFlags(),
getSimplifyQuery().getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -2329,7 +2329,7 @@ Instruction *InstCombinerImpl::visitFNeg(UnaryOperator &I) {
}
Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
if (Value *V = SimplifyFSubInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyFSubInst(I.getOperand(0), I.getOperand(1),
I.getFastMathFlags(),
getSimplifyQuery().getWithInstruction(&I)))
return replaceInstUsesWith(I, V);

View File

@ -1122,7 +1122,7 @@ static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1,
// (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C)
// Can think of the 'or' substitution with the 'and' bool equivalent:
// A || B --> A || (!A && B)
Value *SubstituteCmp = SimplifyICmpInst(Pred1, Y, C, Q);
Value *SubstituteCmp = simplifyICmpInst(Pred1, Y, C, Q);
if (!SubstituteCmp) {
// If we need to create a new instruction, require that the old compare can
// be removed.
@ -1728,7 +1728,7 @@ static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
Type *Ty = I.getType();
if (Value *V = SimplifyAndInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyAndInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -2570,7 +2570,7 @@ Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
if (Value *V = SimplifyOrInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyOrInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -3101,10 +3101,10 @@ Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
//
// This is based on a truth table definition of xor:
// X ^ Y --> (X | Y) & !(X & Y)
if (Value *OrICmp = SimplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
if (Value *OrICmp = simplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
// TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
// TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
if (Value *AndICmp = SimplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
if (Value *AndICmp = simplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
// TODO: Independently handle cases where the 'and' side is a constant.
ICmpInst *X = nullptr, *Y = nullptr;
if (OrICmp == LHS && AndICmp == RHS) {
@ -3471,7 +3471,7 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
if (Value *V = SimplifyXorInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyXorInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);

View File

@ -1137,7 +1137,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// Don't try to simplify calls without uses. It will not do anything useful,
// but will result in the following folds being skipped.
if (!CI.use_empty())
if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
if (Value *V = simplifyCall(&CI, SQ.getWithInstruction(&CI)))
return replaceInstUsesWith(CI, V);
if (isFreeCall(&CI, &TLI))
@ -1242,7 +1242,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// actually absent. To detect this case, call SimplifyConstrainedFPCall. If it
// returns a replacement, the call may be removed.
if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
if (SimplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI)))
if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI)))
return eraseInstFromFunction(CI);
}
@ -1840,7 +1840,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
// Try to simplify the underlying FMul.
if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
II->getFastMathFlags(),
SQ.getWithInstruction(II))) {
auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
@ -1871,7 +1871,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// Try to simplify the underlying FMul. We can only apply simplifications
// that do not require rounding.
if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
if (Value *V = simplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
II->getFastMathFlags(),
SQ.getWithInstruction(II))) {
auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));

View File

@ -3496,7 +3496,7 @@ Instruction *InstCombinerImpl::foldSelectICmp(ICmpInst::Predicate Pred,
// Try to fold the comparison into the select arms, which will cause the
// select to be converted into a logical and/or.
auto SimplifyOp = [&](Value *Op, bool SelectCondIsTrue) -> Value * {
if (Value *Res = SimplifyICmpInst(Pred, Op, RHS, SQ))
if (Value *Res = simplifyICmpInst(Pred, Op, RHS, SQ))
return Res;
if (Optional<bool> Impl = isImpliedCondition(SI->getCondition(), Pred, Op,
RHS, DL, SelectCondIsTrue))
@ -3812,7 +3812,7 @@ foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
// Can we fold (XShAmt+YShAmt) ?
auto *NewShAmt = dyn_cast_or_null<Constant>(
SimplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
simplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
/*isNUW=*/false, SQ.getWithInstruction(&I)));
if (!NewShAmt)
return nullptr;
@ -4836,7 +4836,7 @@ Instruction *InstCombinerImpl::foldICmpWithZextOrSext(ICmpInst &ICmp) {
// or could not be determined to be equal (in the case of a constant
// expression), so the constant cannot be represented in the shorter type.
// All the cases that fold to true or false will have already been handled
// by SimplifyICmpInst, so only deal with the tricky case.
// by simplifyICmpInst, so only deal with the tricky case.
if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
return nullptr;
@ -6064,7 +6064,7 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
Changed = true;
}
if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
if (Value *V = simplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
return replaceInstUsesWith(I, V);
// Comparing -val or val with non-zero is the same as just comparing val
@ -6687,7 +6687,7 @@ Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
const CmpInst::Predicate Pred = I.getPredicate();
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
if (Value *V = simplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);

View File

@ -140,7 +140,7 @@ static Value *foldMulSelectToNegate(BinaryOperator &I,
}
Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyMulInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -460,7 +460,7 @@ Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
}
Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyFMulInst(I.getOperand(0), I.getOperand(1),
I.getFastMathFlags(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1020,7 +1020,7 @@ static Instruction *narrowUDivURem(BinaryOperator &I,
}
Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyUDivInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1090,7 +1090,7 @@ Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
}
Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifySDivInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1321,7 +1321,7 @@ static Instruction *foldFDivPowDivisor(BinaryOperator &I,
Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
Module *M = I.getModule();
if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyFDivInst(I.getOperand(0), I.getOperand(1),
I.getFastMathFlags(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1484,7 +1484,7 @@ Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
}
Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyURemInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1537,7 +1537,7 @@ Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
}
Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifySRemInst(I.getOperand(0), I.getOperand(1),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1609,7 +1609,7 @@ Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
}
Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyFRemInst(I.getOperand(0), I.getOperand(1),
I.getFastMathFlags(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);

View File

@ -1363,7 +1363,7 @@ static Value *simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN,
// PHINode simplification
//
Instruction *InstCombinerImpl::visitPHINode(PHINode &PN) {
if (Value *V = SimplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
if (Value *V = simplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
return replaceInstUsesWith(PN, V);
if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))

View File

@ -2644,7 +2644,7 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
Value *FalseVal = SI.getFalseValue();
Type *SelType = SI.getType();
if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal,
if (Value *V = simplifySelectInst(CondVal, TrueVal, FalseVal,
SQ.getWithInstruction(&SI)))
return replaceInstUsesWith(SI, V);
@ -3190,7 +3190,7 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
// between the load and select masks.
// (i.e (load_mask & select_mask) == 0 == no overlap)
bool CanMergeSelectIntoLoad = false;
if (Value *V = SimplifyAndInst(CondVal, Mask, SQ.getWithInstruction(&SI)))
if (Value *V = simplifyAndInst(CondVal, Mask, SQ.getWithInstruction(&SI)))
CanMergeSelectIntoLoad = match(V, m_Zero());
if (CanMergeSelectIntoLoad) {

View File

@ -107,7 +107,7 @@ Value *InstCombinerImpl::reassociateShiftAmtsOfTwoSameDirectionShifts(
// Can we fold (ShAmt0+ShAmt1) ?
auto *NewShAmt = dyn_cast_or_null<Constant>(
SimplifyAddInst(ShAmt0, ShAmt1, /*isNSW=*/false, /*isNUW=*/false,
simplifyAddInst(ShAmt0, ShAmt1, /*isNSW=*/false, /*isNUW=*/false,
SQ.getWithInstruction(Sh0)));
if (!NewShAmt)
return nullptr; // Did not simplify.
@ -231,7 +231,7 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
return nullptr;
// Can we simplify (MaskShAmt+ShiftShAmt) ?
auto *SumOfShAmts = dyn_cast_or_null<Constant>(SimplifyAddInst(
auto *SumOfShAmts = dyn_cast_or_null<Constant>(simplifyAddInst(
MaskShAmt, ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
if (!SumOfShAmts)
return nullptr; // Did not simplify.
@ -263,7 +263,7 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
return nullptr;
// Can we simplify (ShiftShAmt-MaskShAmt) ?
auto *ShAmtsDiff = dyn_cast_or_null<Constant>(SimplifySubInst(
auto *ShAmtsDiff = dyn_cast_or_null<Constant>(simplifySubInst(
ShiftShAmt, MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
if (!ShAmtsDiff)
return nullptr; // Did not simplify.
@ -811,7 +811,7 @@ Instruction *InstCombinerImpl::FoldShiftByConstant(Value *Op0, Constant *C1,
Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
const SimplifyQuery Q = SQ.getWithInstruction(&I);
if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
if (Value *V = simplifyShlInst(I.getOperand(0), I.getOperand(1),
I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), Q))
return replaceInstUsesWith(I, V);
@ -1038,7 +1038,7 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
}
Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
if (Value *V = simplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
@ -1347,7 +1347,7 @@ InstCombinerImpl::foldVariableSignZeroExtensionOfVariableHighBitExtract(
}
Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
if (Value *V = simplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);

View File

@ -377,7 +377,7 @@ ConstantInt *getPreferredVectorIndex(ConstantInt *IndexC) {
Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
Value *SrcVec = EI.getVectorOperand();
Value *Index = EI.getIndexOperand();
if (Value *V = SimplifyExtractElementInst(SrcVec, Index,
if (Value *V = simplifyExtractElementInst(SrcVec, Index,
SQ.getWithInstruction(&EI)))
return replaceInstUsesWith(EI, V);
@ -1488,7 +1488,7 @@ Instruction *InstCombinerImpl::visitInsertElementInst(InsertElementInst &IE) {
Value *ScalarOp = IE.getOperand(1);
Value *IdxOp = IE.getOperand(2);
if (auto *V = SimplifyInsertElementInst(
if (auto *V = simplifyInsertElementInst(
VecOp, ScalarOp, IdxOp, SQ.getWithInstruction(&IE)))
return replaceInstUsesWith(IE, V);
@ -2533,7 +2533,7 @@ Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Value *LHS = SVI.getOperand(0);
Value *RHS = SVI.getOperand(1);
SimplifyQuery ShufQuery = SQ.getWithInstruction(&SVI);
if (auto *V = SimplifyShuffleVectorInst(LHS, RHS, SVI.getShuffleMask(),
if (auto *V = simplifyShuffleVectorInst(LHS, RHS, SVI.getShuffleMask(),
SVI.getType(), ShufQuery))
return replaceInstUsesWith(SVI, V);
@ -2588,7 +2588,7 @@ Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
if (!ScaledMask.empty()) {
// If the shuffled source vector simplifies, cast that value to this
// shuffle's type.
if (auto *V = SimplifyShuffleVectorInst(X, UndefValue::get(XType),
if (auto *V = simplifyShuffleVectorInst(X, UndefValue::get(XType),
ScaledMask, XType, ShufQuery))
return BitCastInst::Create(Instruction::BitCast, V, SVI.getType());
}

View File

@ -426,7 +426,7 @@ bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = I.getOperand(1);
// Does "B op C" simplify?
if (Value *V = SimplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
// It simplifies to V. Form "A op V".
replaceOperand(I, 0, A);
replaceOperand(I, 1, V);
@ -459,7 +459,7 @@ bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = Op1->getOperand(1);
// Does "A op B" simplify?
if (Value *V = SimplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
// It simplifies to V. Form "V op C".
replaceOperand(I, 0, V);
replaceOperand(I, 1, C);
@ -487,7 +487,7 @@ bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = I.getOperand(1);
// Does "C op A" simplify?
if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
// It simplifies to V. Form "V op B".
replaceOperand(I, 0, V);
replaceOperand(I, 1, B);
@ -507,7 +507,7 @@ bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = Op1->getOperand(1);
// Does "C op A" simplify?
if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
// It simplifies to V. Form "B op V".
replaceOperand(I, 0, B);
replaceOperand(I, 1, V);
@ -654,7 +654,7 @@ Value *InstCombinerImpl::tryFactorization(BinaryOperator &I,
std::swap(C, D);
// Consider forming "A op' (B op D)".
// If "B op D" simplifies then it can be formed with no cost.
V = SimplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
// If "B op D" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used.
if (!V && LHS->hasOneUse() && RHS->hasOneUse())
@ -673,7 +673,7 @@ Value *InstCombinerImpl::tryFactorization(BinaryOperator &I,
std::swap(C, D);
// Consider forming "(A op C) op' B".
// If "A op C" simplifies then it can be formed with no cost.
V = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
// If "A op C" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used.
@ -782,8 +782,8 @@ Value *InstCombinerImpl::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
// Disable the use of undef because it's not safe to distribute undef.
auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
Value *L = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
Value *R = SimplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
// Do "A op C" and "B op C" both simplify?
if (L && R) {
@ -821,8 +821,8 @@ Value *InstCombinerImpl::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
// Disable the use of undef because it's not safe to distribute undef.
auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
Value *L = SimplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
Value *R = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
// Do "A op B" and "A op C" both simplify?
if (L && R) {
@ -878,8 +878,8 @@ Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
if (LHSIsSelect && RHSIsSelect && A == D) {
// (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
Cond = A;
True = SimplifyBinOp(Opcode, B, E, FMF, Q);
False = SimplifyBinOp(Opcode, C, F, FMF, Q);
True = simplifyBinOp(Opcode, B, E, FMF, Q);
False = simplifyBinOp(Opcode, C, F, FMF, Q);
if (LHS->hasOneUse() && RHS->hasOneUse()) {
if (False && !True)
@ -890,13 +890,13 @@ Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
} else if (LHSIsSelect && LHS->hasOneUse()) {
// (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
Cond = A;
True = SimplifyBinOp(Opcode, B, RHS, FMF, Q);
False = SimplifyBinOp(Opcode, C, RHS, FMF, Q);
True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
} else if (RHSIsSelect && RHS->hasOneUse()) {
// X op (D ? E : F) -> D ? (X op E) : (X op F)
Cond = D;
True = SimplifyBinOp(Opcode, LHS, E, FMF, Q);
False = SimplifyBinOp(Opcode, LHS, F, FMF, Q);
True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
}
if (!True || !False)
@ -2088,7 +2088,7 @@ Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
return nullptr;
Value *Sum =
SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
// Only do the combine when we are sure the cost after the
// merge is never more than that before the merge.
if (Sum == nullptr)
@ -2235,7 +2235,7 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Type *GEPType = GEP.getType();
Type *GEPEltType = GEP.getSourceElementType();
bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType);
if (Value *V = SimplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(),
if (Value *V = simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(),
SQ.getWithInstruction(&GEP)))
return replaceInstUsesWith(GEP, V);
@ -3224,7 +3224,7 @@ Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
if (!EV.hasIndices())
return replaceInstUsesWith(EV, Agg);
if (Value *V = SimplifyExtractValueInst(Agg, EV.getIndices(),
if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
SQ.getWithInstruction(&EV)))
return replaceInstUsesWith(EV, V);
@ -3818,7 +3818,7 @@ bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) {
Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
Value *Op0 = I.getOperand(0);
if (Value *V = SimplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
// freeze (phi const, x) --> phi const, (freeze x)

View File

@ -276,7 +276,7 @@ static bool processPHI(PHINode *P, LazyValueInfo *LVI, DominatorTree *DT,
}
}
if (Value *V = SimplifyInstruction(P, SQ)) {
if (Value *V = simplifyInstruction(P, SQ)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
Changed = true;

View File

@ -1347,7 +1347,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
// its simpler value.
if (Value *V = SimplifyInstruction(&Inst, SQ)) {
if (Value *V = simplifyInstruction(&Inst, SQ)) {
LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << Inst << " to: " << *V
<< '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {

View File

@ -2455,7 +2455,7 @@ bool GVNPass::processInstruction(Instruction *I) {
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
const DataLayout &DL = I->getModule()->getDataLayout();
if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
if (Value *V = simplifyInstruction(I, {DL, TLI, DT, AC})) {
bool Changed = false;
if (!I->use_empty()) {
// Simplification can cause a special instruction to become not special.

View File

@ -51,7 +51,7 @@ static bool runImpl(Function &F, const SimplifyQuery &SQ,
DeadInstsInBB.push_back(&I);
Changed = true;
} else if (!I.use_empty()) {
if (Value *V = SimplifyInstruction(&I, SQ, ORE)) {
if (Value *V = simplifyInstruction(&I, SQ, ORE)) {
// Mark all uses for resimplification next time round the loop.
for (User *U : I.users())
Next->insert(cast<Instruction>(U));

View File

@ -831,7 +831,7 @@ bool JumpThreadingPass::computeValueKnownInPredecessorsImpl(
LHS = CmpLHS->DoPHITranslation(BB, PredBB);
RHS = PN->getIncomingValue(i);
}
Value *Res = SimplifyCmpInst(Pred, LHS, RHS, {DL});
Value *Res = simplifyCmpInst(Pred, LHS, RHS, {DL});
if (!Res) {
if (!isa<Constant>(RHS))
continue;
@ -2662,7 +2662,7 @@ bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred(
// If this instruction can be simplified after the operands are updated,
// just use the simplified value instead. This frequently happens due to
// phi translation.
if (Value *IV = SimplifyInstruction(
if (Value *IV = simplifyInstruction(
New,
{BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) {
ValueMapping[&*BI] = IV;

View File

@ -192,13 +192,13 @@ getValueOnFirstIteration(Value *V, DenseMap<Value *, Value *> &FirstIterValue,
getValueOnFirstIteration(BO->getOperand(0), FirstIterValue, SQ);
Value *RHS =
getValueOnFirstIteration(BO->getOperand(1), FirstIterValue, SQ);
FirstIterV = SimplifyBinOp(BO->getOpcode(), LHS, RHS, SQ);
FirstIterV = simplifyBinOp(BO->getOpcode(), LHS, RHS, SQ);
} else if (auto *Cmp = dyn_cast<ICmpInst>(V)) {
Value *LHS =
getValueOnFirstIteration(Cmp->getOperand(0), FirstIterValue, SQ);
Value *RHS =
getValueOnFirstIteration(Cmp->getOperand(1), FirstIterValue, SQ);
FirstIterV = SimplifyICmpInst(Cmp->getPredicate(), LHS, RHS, SQ);
FirstIterV = simplifyICmpInst(Cmp->getPredicate(), LHS, RHS, SQ);
} else if (auto *Select = dyn_cast<SelectInst>(V)) {
Value *Cond =
getValueOnFirstIteration(Select->getCondition(), FirstIterValue, SQ);

View File

@ -96,7 +96,7 @@ static bool simplifyLoopInst(Loop &L, DominatorTree &DT, LoopInfo &LI,
if (!IsFirstIteration && !ToSimplify->count(&I))
continue;
Value *V = SimplifyInstruction(&I, SQ.getWithInstruction(&I));
Value *V = simplifyInstruction(&I, SQ.getWithInstruction(&I));
if (!V || !LI.replacementPreservesLCSSAForm(&I, V))
continue;

View File

@ -1089,7 +1089,7 @@ const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T,
E->op_push_back(lookupOperandLeader(Arg1));
E->op_push_back(lookupOperandLeader(Arg2));
Value *V = SimplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), SQ);
Value *V = simplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), SQ);
if (auto Simplified = checkExprResults(E, I, V)) {
addAdditionalUsers(Simplified, I);
return Simplified.Expr;
@ -1167,13 +1167,13 @@ NewGVN::ExprResult NewGVN::createExpression(Instruction *I) const {
Predicate = CmpInst::getSwappedPredicate(Predicate);
}
E->setOpcode((CI->getOpcode() << 8) | Predicate);
// TODO: 25% of our time is spent in SimplifyCmpInst with pointer operands
// TODO: 25% of our time is spent in simplifyCmpInst with pointer operands
assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() &&
"Wrong types on cmp instruction");
assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() &&
E->getOperand(1)->getType() == I->getOperand(1)->getType()));
Value *V =
SimplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), SQ);
simplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), SQ);
if (auto Simplified = checkExprResults(E, I, V))
return Simplified;
} else if (isa<SelectInst>(I)) {
@ -1181,24 +1181,24 @@ NewGVN::ExprResult NewGVN::createExpression(Instruction *I) const {
E->getOperand(1) == E->getOperand(2)) {
assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() &&
E->getOperand(2)->getType() == I->getOperand(2)->getType());
Value *V = SimplifySelectInst(E->getOperand(0), E->getOperand(1),
Value *V = simplifySelectInst(E->getOperand(0), E->getOperand(1),
E->getOperand(2), SQ);
if (auto Simplified = checkExprResults(E, I, V))
return Simplified;
}
} else if (I->isBinaryOp()) {
Value *V =
SimplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), SQ);
simplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), SQ);
if (auto Simplified = checkExprResults(E, I, V))
return Simplified;
} else if (auto *CI = dyn_cast<CastInst>(I)) {
Value *V =
SimplifyCastInst(CI->getOpcode(), E->getOperand(0), CI->getType(), SQ);
simplifyCastInst(CI->getOpcode(), E->getOperand(0), CI->getType(), SQ);
if (auto Simplified = checkExprResults(E, I, V))
return Simplified;
} else if (auto *GEPI = dyn_cast<GetElementPtrInst>(I)) {
Value *V =
SimplifyGEPInst(GEPI->getSourceElementType(), *E->op_begin(),
simplifyGEPInst(GEPI->getSourceElementType(), *E->op_begin(),
makeArrayRef(std::next(E->op_begin()), E->op_end()),
GEPI->isInBounds(), SQ);
if (auto Simplified = checkExprResults(E, I, V))

View File

@ -1012,7 +1012,7 @@ private:
I.getParent()->getFirstInsertionPt() == I.getParent()->end())
return PI.setAborted(&I);
// TODO: We could use SimplifyInstruction here to fold PHINodes and
// TODO: We could use simplifyInstruction here to fold PHINodes and
// SelectInsts. However, doing so requires to change the current
// dead-operand-tracking mechanism. For instance, suppose neither loading
// from %U nor %other traps. Then "load (select undef, %U, %other)" does not

View File

@ -681,7 +681,7 @@ void StructurizeCFG::simplifyAffectedPhis() {
Q.DT = DT;
for (WeakVH VH : AffectedPhis) {
if (auto Phi = dyn_cast_or_null<PHINode>(VH)) {
if (auto NewValue = SimplifyInstruction(Phi, Q)) {
if (auto NewValue = simplifyInstruction(Phi, Q)) {
Phi->replaceAllUsesWith(NewValue);
Phi->eraseFromParent();
Changed = true;

View File

@ -729,7 +729,7 @@ void TailRecursionEliminator::cleanupAndFinalize() {
// call.
for (PHINode *PN : ArgumentPHIs) {
// If the PHI Node is a dynamic constant, replace it with the value it is.
if (Value *PNV = SimplifyInstruction(PN, F.getParent()->getDataLayout())) {
if (Value *PNV = simplifyInstruction(PN, F.getParent()->getDataLayout())) {
PN->replaceAllUsesWith(PNV);
PN->eraseFromParent();
}

View File

@ -485,7 +485,7 @@ void PruningFunctionCloner::CloneBlock(
// a mapping to that value rather than inserting a new instruction into
// the basic block.
if (Value *V =
SimplifyInstruction(NewInst, BB->getModule()->getDataLayout())) {
simplifyInstruction(NewInst, BB->getModule()->getDataLayout())) {
// On the off-chance that this simplifies to an instruction in the old
// function, map it back into the new function.
if (NewFunc != OldFunc)
@ -768,7 +768,7 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
continue;
// See if this instruction simplifies.
Value *SimpleV = SimplifyInstruction(I, DL);
Value *SimpleV = simplifyInstruction(I, DL);
if (!SimpleV)
continue;

View File

@ -2650,7 +2650,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
AssumptionCache *AC =
IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
auto &DL = Caller->getParent()->getDataLayout();
if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();
}

View File

@ -675,7 +675,7 @@ simplifyAndDCEInstruction(Instruction *I,
return true;
}
if (Value *SimpleV = SimplifyInstruction(I, DL)) {
if (Value *SimpleV = simplifyInstruction(I, DL)) {
// Add the users to the worklist. CAREFUL: an instruction can use itself,
// in the case of a phi node.
for (User *U : I->users()) {

View File

@ -439,7 +439,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// With the operands remapped, see if the instruction constant folds or is
// otherwise simplifyable. This commonly occurs because the entry from PHI
// nodes allows icmps and other instructions to fold.
Value *V = SimplifyInstruction(C, SQ);
Value *V = simplifyInstruction(C, SQ);
if (V && LI->replacementPreservesLCSSAForm(C, V)) {
// If so, then delete the temporary instruction and stick the folded value
// in the map.

View File

@ -176,7 +176,7 @@ static PHINode *findPHIToPartitionLoops(Loop *L, DominatorTree *DT,
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ) {
PHINode *PN = cast<PHINode>(I);
++I;
if (Value *V = SimplifyInstruction(PN, {DL, nullptr, DT, AC})) {
if (Value *V = simplifyInstruction(PN, {DL, nullptr, DT, AC})) {
// This is a degenerate PHI already, don't modify it!
PN->replaceAllUsesWith(V);
PN->eraseFromParent();
@ -597,7 +597,7 @@ ReprocessLoop:
PHINode *PN;
for (BasicBlock::iterator I = L->getHeader()->begin();
(PN = dyn_cast<PHINode>(I++)); )
if (Value *V = SimplifyInstruction(PN, {DL, nullptr, DT, AC})) {
if (Value *V = simplifyInstruction(PN, {DL, nullptr, DT, AC})) {
if (SE) SE->forgetValue(PN);
if (!PreserveLCSSA || LI->replacementPreservesLCSSAForm(PN, V)) {
PN->replaceAllUsesWith(V);

View File

@ -236,7 +236,7 @@ void llvm::simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI,
SmallVector<WeakTrackingVH, 16> DeadInsts;
for (BasicBlock *BB : L->getBlocks()) {
for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
if (Value *V = SimplifyInstruction(&Inst, {DL, nullptr, DT, AC}))
if (Value *V = simplifyInstruction(&Inst, {DL, nullptr, DT, AC}))
if (LI->replacementPreservesLCSSAForm(&Inst, V))
Inst.replaceAllUsesWith(V);
if (isInstructionTriviallyDead(&Inst))

View File

@ -957,7 +957,7 @@ bool llvm::UnrollRuntimeLoopRemainder(
SmallVector<WeakTrackingVH, 16> DeadInsts;
for (BasicBlock *BB : RemainderBlocks) {
for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
if (Value *V = SimplifyInstruction(&Inst, {DL, nullptr, DT, AC}))
if (Value *V = simplifyInstruction(&Inst, {DL, nullptr, DT, AC}))
if (LI->replacementPreservesLCSSAForm(&Inst, V))
Inst.replaceAllUsesWith(V);
if (isInstructionTriviallyDead(&Inst))

View File

@ -702,7 +702,7 @@ void PromoteMem2Reg::run() {
PHINode *PN = I->second;
// If this PHI node merges one value and/or undefs, get the value.
if (Value *V = SimplifyInstruction(PN, SQ)) {
if (Value *V = simplifyInstruction(PN, SQ)) {
PN->replaceAllUsesWith(V);
PN->eraseFromParent();
NewPhiNodes.erase(I++);

View File

@ -996,7 +996,7 @@ void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
if ((V1State.isConstant() || V2State.isConstant())) {
Value *V1 = isConstant(V1State) ? getConstant(V1State) : I.getOperand(0);
Value *V2 = isConstant(V2State) ? getConstant(V2State) : I.getOperand(1);
Value *R = SimplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL));
Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL));
auto *C = dyn_cast_or_null<Constant>(R);
if (C) {
// X op Y -> undef.

View File

@ -165,7 +165,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
// See if the PHI node can be merged to a single value. This can happen in
// loop cases when we get a PHI of itself and one other value.
if (Value *V =
SimplifyInstruction(InsertedPHI, BB->getModule()->getDataLayout())) {
simplifyInstruction(InsertedPHI, BB->getModule()->getDataLayout())) {
InsertedPHI->eraseFromParent();
return V;
}

View File

@ -1932,7 +1932,7 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// so narrow phis can reuse them.
for (PHINode *Phi : Phis) {
auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
if (Value *V = simplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
return V;
if (!SE.isSCEVable(PN->getType()))
return nullptr;

View File

@ -3103,7 +3103,7 @@ FoldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
}
// Check for trivial simplification.
if (Value *V = SimplifyInstruction(N, {DL, nullptr, nullptr, AC})) {
if (Value *V = simplifyInstruction(N, {DL, nullptr, nullptr, AC})) {
if (!BBI->use_empty())
TranslateMap[&*BBI] = V;
if (!N->mayHaveSideEffects()) {
@ -3244,7 +3244,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI,
bool Changed = false;
for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
PHINode *PN = cast<PHINode>(II++);
if (Value *V = SimplifyInstruction(PN, {DL, PN})) {
if (Value *V = simplifyInstruction(PN, {DL, PN})) {
PN->replaceAllUsesWith(V);
PN->eraseFromParent();
Changed = true;
@ -4489,7 +4489,7 @@ bool SimplifyCFGOpt::tryToSimplifyUncondBranchWithICmpInIt(
assert(VVal && "Should have a unique destination value");
ICI->setOperand(0, VVal);
if (Value *V = SimplifyInstruction(ICI, {DL, ICI})) {
if (Value *V = simplifyInstruction(ICI, {DL, ICI})) {
ICI->replaceAllUsesWith(V);
ICI->eraseFromParent();
}

View File

@ -596,9 +596,9 @@ TEST(Local, SimplifyVScaleWithRange) {
Function *VScale = Intrinsic::getDeclaration(&M, Intrinsic::vscale, {Ty});
auto *CI = CallInst::Create(VScale, {}, "vscale");
// Test that SimplifyCall won't try to query it's parent function for
// Test that simplifyCall won't try to query it's parent function for
// vscale_range attributes in order to simplify llvm.vscale -> constant.
EXPECT_EQ(SimplifyCall(CI, SimplifyQuery(M.getDataLayout())), nullptr);
EXPECT_EQ(simplifyCall(CI, SimplifyQuery(M.getDataLayout())), nullptr);
delete CI;
}