[llvm] Don't use Optional::getValue (NFC)
This commit is contained in:
parent
9cfbe7bbfe
commit
7a47ee51a1
|
@ -80,7 +80,7 @@ private:
|
|||
|
||||
inline void toNext() {
|
||||
Optional<QueueElement> Head = VisitQueue.front();
|
||||
QueueElement H = Head.getValue();
|
||||
QueueElement H = *Head;
|
||||
NodeRef Node = H.first;
|
||||
Optional<ChildItTy> &ChildIt = H.second;
|
||||
|
||||
|
|
|
@ -1299,7 +1299,7 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
|
|||
auto &HeaderNode = Loop.Nodes[H];
|
||||
assert(!getBlock(HeaderNode)->getIrrLoopHeaderWeight() &&
|
||||
"Shouldn't have a weight metadata");
|
||||
uint64_t MinWeight = MinHeaderWeight.getValue();
|
||||
uint64_t MinWeight = *MinHeaderWeight;
|
||||
LLVM_DEBUG(dbgs() << "Giving weight " << MinWeight << " to "
|
||||
<< getBlockName(HeaderNode) << "\n");
|
||||
if (MinWeight)
|
||||
|
@ -1869,7 +1869,7 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
|
|||
case GVDT_Count: {
|
||||
auto Count = Graph->getBlockProfileCount(Node);
|
||||
if (Count)
|
||||
OS << Count.getValue();
|
||||
OS << *Count;
|
||||
else
|
||||
OS << "Unknown";
|
||||
break;
|
||||
|
|
|
@ -1307,8 +1307,7 @@ public:
|
|||
|
||||
// Scale the cost of the load by the fraction of legal instructions that
|
||||
// will be used.
|
||||
Cost = divideCeil(UsedInsts.count() * Cost.getValue().getValue(),
|
||||
NumLegalInsts);
|
||||
Cost = divideCeil(UsedInsts.count() * *Cost.getValue(), NumLegalInsts);
|
||||
}
|
||||
|
||||
// Then plus the cost of interleave operation.
|
||||
|
|
|
@ -196,7 +196,7 @@ struct BinaryAnnotationIterator
|
|||
|
||||
const DecodedAnnotation &operator*() {
|
||||
ParseCurrentAnnotation();
|
||||
return Current.getValue();
|
||||
return *Current;
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -181,7 +181,7 @@ Error iterateSymbolGroups(InputFile &Input, const PrintScope &HeaderScope,
|
|||
|
||||
FilterOptions Filters = HeaderScope.P.getFilters();
|
||||
if (Filters.DumpModi) {
|
||||
uint32_t Modi = Filters.DumpModi.getValue();
|
||||
uint32_t Modi = *Filters.DumpModi;
|
||||
SymbolGroup SG(&Input, Modi);
|
||||
return iterateOneModule(Input, withLabelWidth(HeaderScope, NumDigits(Modi)),
|
||||
SG, Modi, Callback);
|
||||
|
|
|
@ -1692,7 +1692,7 @@ void IO::processKeyWithDefault(const char *Key, Optional<T> &Val,
|
|||
if (IsNone)
|
||||
Val = DefaultValue;
|
||||
else
|
||||
yamlize(*this, Val.getValue(), Required, Ctx);
|
||||
yamlize(*this, *Val, Required, Ctx);
|
||||
this->postflightKey(SaveInfo);
|
||||
} else {
|
||||
if (UseDefault)
|
||||
|
|
|
@ -414,8 +414,7 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
|
|||
const LoopBlock DstLoopBB = getLoopBlock(TI->getSuccessor(I - 1));
|
||||
auto EstimatedWeight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB});
|
||||
if (EstimatedWeight &&
|
||||
EstimatedWeight.getValue() <=
|
||||
static_cast<uint32_t>(BlockExecWeight::UNREACHABLE))
|
||||
*EstimatedWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE))
|
||||
UnreachableIdxs.push_back(I - 1);
|
||||
else
|
||||
ReachableIdxs.push_back(I - 1);
|
||||
|
@ -688,7 +687,7 @@ Optional<uint32_t> BranchProbabilityInfo::getMaxEstimatedEdgeWeight(
|
|||
if (!Weight)
|
||||
return None;
|
||||
|
||||
if (!MaxWeight || MaxWeight.getValue() < Weight.getValue())
|
||||
if (!MaxWeight || *MaxWeight < *Weight)
|
||||
MaxWeight = Weight;
|
||||
}
|
||||
|
||||
|
@ -852,8 +851,7 @@ void BranchProbabilityInfo::computeEestimateBlockWeight(
|
|||
if (LoopWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE))
|
||||
LoopWeight = static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO);
|
||||
|
||||
EstimatedLoopWeight.insert(
|
||||
{LoopBB.getLoopData(), LoopWeight.getValue()});
|
||||
EstimatedLoopWeight.insert({LoopBB.getLoopData(), *LoopWeight});
|
||||
// Add all blocks entering the loop into working list.
|
||||
getLoopEnterBlocks(LoopBB, BlockWorkList);
|
||||
}
|
||||
|
@ -875,7 +873,7 @@ void BranchProbabilityInfo::computeEestimateBlockWeight(
|
|||
auto MaxWeight = getMaxEstimatedEdgeWeight(LoopBB, successors(BB));
|
||||
|
||||
if (MaxWeight)
|
||||
propagateEstimatedBlockWeight(LoopBB, DT, PDT, MaxWeight.getValue(),
|
||||
propagateEstimatedBlockWeight(LoopBB, DT, PDT, *MaxWeight,
|
||||
BlockWorkList, LoopWorkList);
|
||||
}
|
||||
} while (!BlockWorkList.empty() || !LoopWorkList.empty());
|
||||
|
|
|
@ -2056,7 +2056,7 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
|
|||
case Intrinsic::experimental_constrained_rint: {
|
||||
auto CI = cast<ConstrainedFPIntrinsic>(Call);
|
||||
RM = CI->getRoundingMode();
|
||||
if (!RM || RM.getValue() == RoundingMode::Dynamic)
|
||||
if (!RM || *RM == RoundingMode::Dynamic)
|
||||
return nullptr;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1205,7 +1205,7 @@ SimilarityGroupList &IRSimilarityIdentifier::findSimilarity(
|
|||
populateMapper(Modules, InstrList, IntegerMapping);
|
||||
findCandidates(InstrList, IntegerMapping);
|
||||
|
||||
return SimilarityCandidates.getValue();
|
||||
return *SimilarityCandidates;
|
||||
}
|
||||
|
||||
SimilarityGroupList &IRSimilarityIdentifier::findSimilarity(Module &M) {
|
||||
|
@ -1222,7 +1222,7 @@ SimilarityGroupList &IRSimilarityIdentifier::findSimilarity(Module &M) {
|
|||
populateMapper(M, InstrList, IntegerMapping);
|
||||
findCandidates(InstrList, IntegerMapping);
|
||||
|
||||
return SimilarityCandidates.getValue();
|
||||
return *SimilarityCandidates;
|
||||
}
|
||||
|
||||
INITIALIZE_PASS(IRSimilarityIdentifierWrapperPass, "ir-similarity-identifier",
|
||||
|
|
|
@ -578,8 +578,7 @@ const char *InlineAdvisor::getAnnotatedInlinePassName() {
|
|||
|
||||
// IC is constant and initialized in constructor, so compute the annotated
|
||||
// name only once.
|
||||
static const std::string PassName =
|
||||
llvm::AnnotateInlinePassName(IC.getValue());
|
||||
static const std::string PassName = llvm::AnnotateInlinePassName(*IC);
|
||||
|
||||
return PassName.c_str();
|
||||
}
|
||||
|
|
|
@ -836,7 +836,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
|
|||
auto *CallerBB = CandidateCall.getParent();
|
||||
BlockFrequencyInfo *CallerBFI = &(GetBFI(*(CallerBB->getParent())));
|
||||
CycleSavings += getCallsiteCost(this->CandidateCall, DL);
|
||||
CycleSavings *= CallerBFI->getBlockProfileCount(CallerBB).getValue();
|
||||
CycleSavings *= *CallerBFI->getBlockProfileCount(CallerBB);
|
||||
|
||||
// Remove the cost of the cold basic blocks.
|
||||
int Size = Cost - ColdSize;
|
||||
|
@ -906,7 +906,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
|
|||
|
||||
if (auto Result = costBenefitAnalysis()) {
|
||||
DecidedByCostBenefit = true;
|
||||
if (Result.getValue())
|
||||
if (*Result)
|
||||
return InlineResult::success();
|
||||
else
|
||||
return InlineResult::failure("Cost over threshold.");
|
||||
|
@ -998,7 +998,7 @@ public:
|
|||
BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold),
|
||||
CostBenefitAnalysisEnabled(isCostBenefitAnalysisEnabled()),
|
||||
Writer(this) {
|
||||
AllowRecursiveCall = Params.AllowRecursiveCall.getValue();
|
||||
AllowRecursiveCall = *Params.AllowRecursiveCall;
|
||||
}
|
||||
|
||||
/// Annotation Writer for instruction details
|
||||
|
@ -1262,7 +1262,7 @@ void InlineCostAnnotationWriter::emitInstructionAnnot(
|
|||
auto C = ICCA->getSimplifiedValue(const_cast<Instruction *>(I));
|
||||
if (C) {
|
||||
OS << ", simplified to ";
|
||||
C.getValue()->print(OS, true);
|
||||
(*C)->print(OS, true);
|
||||
}
|
||||
OS << "\n";
|
||||
}
|
||||
|
@ -1855,7 +1855,7 @@ void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
|
|||
// current threshold, but AutoFDO + ThinLTO currently relies on this
|
||||
// behavior to prevent inlining of hot callsites during ThinLTO
|
||||
// compile phase.
|
||||
Threshold = HotCallSiteThreshold.getValue();
|
||||
Threshold = *HotCallSiteThreshold;
|
||||
} else if (isColdCallSite(Call, CallerBFI)) {
|
||||
LLVM_DEBUG(dbgs() << "Cold callsite.\n");
|
||||
// Do not apply bonuses for a cold callsite including the
|
||||
|
|
|
@ -1353,7 +1353,7 @@ static Optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
|
|||
ValueLatticeElement OpLatticeVal =
|
||||
getValueFromCondition(Op, Condition, isTrueDest);
|
||||
if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) {
|
||||
Result = constantFoldUser(Usr, Op, OpConst.getValue(), DL);
|
||||
Result = constantFoldUser(Usr, Op, *OpConst, DL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -367,7 +367,7 @@ static void computeFunctionSummary(
|
|||
// We should have named any anonymous globals
|
||||
assert(CalledFunction->hasName());
|
||||
auto ScaledCount = PSI->getProfileCount(*CB, BFI);
|
||||
auto Hotness = ScaledCount ? getHotness(ScaledCount.getValue(), PSI)
|
||||
auto Hotness = ScaledCount ? getHotness(*ScaledCount, PSI)
|
||||
: CalleeInfo::HotnessType::Unknown;
|
||||
if (ForceSummaryEdgesCold != FunctionSummary::FSHT_None)
|
||||
Hotness = CalleeInfo::HotnessType::Cold;
|
||||
|
|
|
@ -124,7 +124,7 @@ bool ProfileSummaryInfo::isFunctionHotInCallGraph(
|
|||
for (const auto &I : BB)
|
||||
if (isa<CallInst>(I) || isa<InvokeInst>(I))
|
||||
if (auto CallCount = getProfileCount(cast<CallBase>(I), nullptr))
|
||||
TotalCallCount += CallCount.getValue();
|
||||
TotalCallCount += *CallCount;
|
||||
if (isHotCount(TotalCallCount))
|
||||
return true;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ bool ProfileSummaryInfo::isFunctionColdInCallGraph(
|
|||
for (const auto &I : BB)
|
||||
if (isa<CallInst>(I) || isa<InvokeInst>(I))
|
||||
if (auto CallCount = getProfileCount(cast<CallBase>(I), nullptr))
|
||||
TotalCallCount += CallCount.getValue();
|
||||
TotalCallCount += *CallCount;
|
||||
if (!isColdCount(TotalCallCount))
|
||||
return false;
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ bool ProfileSummaryInfo::isFunctionHotOrColdInCallGraphNthPercentile(
|
|||
for (const auto &I : BB)
|
||||
if (isa<CallInst>(I) || isa<InvokeInst>(I))
|
||||
if (auto CallCount = getProfileCount(cast<CallBase>(I), nullptr))
|
||||
TotalCallCount += CallCount.getValue();
|
||||
TotalCallCount += *CallCount;
|
||||
if (isHot && isHotCountNthPercentile(PercentileCutoff, TotalCallCount))
|
||||
return true;
|
||||
if (!isHot && !isColdCountNthPercentile(PercentileCutoff, TotalCallCount))
|
||||
|
|
|
@ -9951,7 +9951,7 @@ ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
|
|||
// value at this index. When solving for "X*X != 5", for example, we
|
||||
// should not accept a root of 2.
|
||||
if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
|
||||
const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
|
||||
const auto *R = cast<SCEVConstant>(getConstant(*S));
|
||||
return ExitLimit(R, R, false, Predicates);
|
||||
}
|
||||
return getCouldNotCompute();
|
||||
|
@ -12781,7 +12781,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
|
|||
|
||||
if (isQuadratic()) {
|
||||
if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
|
||||
return SE.getConstant(S.getValue());
|
||||
return SE.getConstant(*S);
|
||||
}
|
||||
|
||||
return SE.getCouldNotCompute();
|
||||
|
|
|
@ -70,7 +70,7 @@ static const AllocaInst *findMatchingAlloca(const IntrinsicInst &II,
|
|||
auto AllocaSizeInBits = AI->getAllocationSizeInBits(DL);
|
||||
if (!AllocaSizeInBits)
|
||||
return nullptr;
|
||||
int64_t AllocaSize = AllocaSizeInBits.getValue() / 8;
|
||||
int64_t AllocaSize = *AllocaSizeInBits / 8;
|
||||
|
||||
auto *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
|
||||
if (!Size)
|
||||
|
|
|
@ -54,7 +54,7 @@ void SyntheticCountsUtils<CallGraphType>::propagateFromSCC(
|
|||
if (!OptProfCount)
|
||||
continue;
|
||||
auto Callee = CGT::edge_dest(E.second);
|
||||
AdditionalCounts[Callee] += OptProfCount.getValue();
|
||||
AdditionalCounts[Callee] += *OptProfCount;
|
||||
}
|
||||
|
||||
// Update the counts for the nodes in the SCC.
|
||||
|
@ -67,7 +67,7 @@ void SyntheticCountsUtils<CallGraphType>::propagateFromSCC(
|
|||
if (!OptProfCount)
|
||||
continue;
|
||||
auto Callee = CGT::edge_dest(E.second);
|
||||
AddCount(Callee, OptProfCount.getValue());
|
||||
AddCount(Callee, *OptProfCount);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1755,8 +1755,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
|
|||
break;
|
||||
}
|
||||
|
||||
unsigned FirstZeroHighBit =
|
||||
32 - countLeadingZeros(VScaleMax.getValue());
|
||||
unsigned FirstZeroHighBit = 32 - countLeadingZeros(*VScaleMax);
|
||||
if (FirstZeroHighBit < BitWidth)
|
||||
Known.Zero.setBitsFrom(FirstZeroHighBit);
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) {
|
|||
// remangle intrinsics names as well.
|
||||
for (Function &F : llvm::make_early_inc_range(*M)) {
|
||||
if (auto Remangled = Intrinsic::remangleIntrinsicFunction(&F)) {
|
||||
F.replaceAllUsesWith(Remangled.getValue());
|
||||
F.replaceAllUsesWith(*Remangled);
|
||||
F.eraseFromParent();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3331,7 +3331,7 @@ Error BitcodeReader::globalCleanup() {
|
|||
// Some types could be renamed during loading if several modules are
|
||||
// loaded in the same LLVMContext (LTO scenario). In this case we should
|
||||
// remangle intrinsics names as well.
|
||||
RemangledIntrinsics[&F] = Remangled.getValue();
|
||||
RemangledIntrinsics[&F] = *Remangled;
|
||||
// Look for functions that rely on old function attribute behavior.
|
||||
UpgradeFunctionAttributes(F);
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ void DbgValueHistoryMap::trimLocationRanges(
|
|||
if (auto R = intersects(StartMI, EndMI, ScopeRanges, Ordering)) {
|
||||
// Adjust ScopeRanges to exclude ranges which subsequent location ranges
|
||||
// cannot possibly intersect.
|
||||
ScopeRanges = ArrayRef<InsnRange>(R.getValue(), ScopeRanges.end());
|
||||
ScopeRanges = ArrayRef<InsnRange>(*R, ScopeRanges.end());
|
||||
} else {
|
||||
// If the location range does not intersect any scope range then the
|
||||
// DBG_VALUE which opened this location range is usless, mark it for
|
||||
|
|
|
@ -799,7 +799,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIDerivedType *DTy) {
|
|||
// or reference types.
|
||||
if (DTy->getDWARFAddressSpace())
|
||||
addUInt(Buffer, dwarf::DW_AT_address_class, dwarf::DW_FORM_data4,
|
||||
DTy->getDWARFAddressSpace().getValue());
|
||||
*DTy->getDWARFAddressSpace());
|
||||
}
|
||||
|
||||
void DwarfUnit::constructSubprogramArguments(DIE &Buffer, DITypeRefArray Args) {
|
||||
|
|
|
@ -245,7 +245,7 @@ assignSections(MachineFunction &MF,
|
|||
if (EHPadsSectionID == MBBSectionID::ExceptionSectionID)
|
||||
for (auto &MBB : MF)
|
||||
if (MBB.isEHPad())
|
||||
MBB.setSectionID(EHPadsSectionID.getValue());
|
||||
MBB.setSectionID(*EHPadsSectionID);
|
||||
}
|
||||
|
||||
void llvm::sortBasicBlocksAndUpdateBranches(
|
||||
|
|
|
@ -3973,7 +3973,7 @@ bool CombinerHelper::matchExtractAllEltsFromBuildVector(
|
|||
auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI);
|
||||
if (!Cst)
|
||||
return false;
|
||||
unsigned Idx = Cst.getValue().getZExtValue();
|
||||
unsigned Idx = Cst->getZExtValue();
|
||||
if (Idx >= NumElts)
|
||||
return false; // Out of range.
|
||||
ExtractedElts.set(Idx);
|
||||
|
|
|
@ -1817,7 +1817,7 @@ static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
|
|||
|
||||
bool IRTranslator::translateConstrainedFPIntrinsic(
|
||||
const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
|
||||
fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
|
||||
fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
|
||||
|
||||
unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
|
||||
if (!Opcode)
|
||||
|
@ -2264,7 +2264,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
|||
.buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
|
||||
{getOrCreateVReg(CI)},
|
||||
{getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
|
||||
.addImm((int)RoundMode.getValue());
|
||||
.addImm((int)*RoundMode);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1618,7 +1618,7 @@ bool MIParser::assignRegisterTies(MachineInstr &MI,
|
|||
continue;
|
||||
// The parser ensures that this operand is a register use, so we just have
|
||||
// to check the tied-def operand.
|
||||
unsigned DefIdx = Operands[I].TiedDefIdx.getValue();
|
||||
unsigned DefIdx = *Operands[I].TiedDefIdx;
|
||||
if (DefIdx >= E)
|
||||
return error(Operands[I].Begin,
|
||||
Twine("use of invalid tied-def operand index '" +
|
||||
|
|
|
@ -643,7 +643,7 @@ bool MIRParserImpl::parseRegisterInfo(PerFunctionMIParsingState &PFS,
|
|||
// be saved for the caller).
|
||||
if (YamlMF.CalleeSavedRegisters) {
|
||||
SmallVector<MCPhysReg, 16> CalleeSavedRegisters;
|
||||
for (const auto &RegSource : YamlMF.CalleeSavedRegisters.getValue()) {
|
||||
for (const auto &RegSource : *YamlMF.CalleeSavedRegisters) {
|
||||
Register Reg;
|
||||
if (parseNamedRegisterReference(PFS, Reg, RegSource.Value, Error))
|
||||
return error(Error, RegSource.SourceRange);
|
||||
|
@ -814,7 +814,7 @@ bool MIRParserImpl::initializeFrameInfo(PerFunctionMIParsingState &PFS,
|
|||
Object.CalleeSavedRestored, ObjectIdx))
|
||||
return true;
|
||||
if (Object.LocalOffset)
|
||||
MFI.mapLocalFrameObject(ObjectIdx, Object.LocalOffset.getValue());
|
||||
MFI.mapLocalFrameObject(ObjectIdx, *Object.LocalOffset);
|
||||
if (parseStackObjectsDebugInfo(PFS, Object, ObjectIdx))
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1431,7 +1431,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) {
|
|||
Register R = MRI.createVirtualRegister(RC);
|
||||
MachineInstr *IllegalPhi =
|
||||
BuildMI(*BB, MI, DebugLoc(), TII->get(TargetOpcode::PHI), R)
|
||||
.addReg(IllegalPhiDefault.getValue())
|
||||
.addReg(*IllegalPhiDefault)
|
||||
.addMBB(PreheaderBB) // Block choice is arbitrary and has no effect.
|
||||
.addReg(LoopReg)
|
||||
.addMBB(BB); // Block choice is arbitrary and has no effect.
|
||||
|
|
|
@ -924,7 +924,7 @@ Optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) {
|
|||
InstructionCost ICost =
|
||||
TTI->getInstructionCost(I, TargetTransformInfo::TCK_Latency);
|
||||
if (auto OC = ICost.getValue())
|
||||
return Optional<uint64_t>(OC.getValue());
|
||||
return Optional<uint64_t>(*OC);
|
||||
return Optional<uint64_t>(None);
|
||||
}
|
||||
|
||||
|
|
|
@ -17928,7 +17928,7 @@ bool DAGCombiner::mergeStoresOfConstantsOrVecElts(
|
|||
if (!UseTrunc) {
|
||||
NewStore = DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
|
||||
FirstInChain->getPointerInfo(),
|
||||
FirstInChain->getAlign(), Flags.getValue(), AAInfo);
|
||||
FirstInChain->getAlign(), *Flags, AAInfo);
|
||||
} else { // Must be realized as a trunc store
|
||||
EVT LegalizedStoredValTy =
|
||||
TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
|
||||
|
@ -17940,7 +17940,7 @@ bool DAGCombiner::mergeStoresOfConstantsOrVecElts(
|
|||
NewStore = DAG.getTruncStore(
|
||||
NewChain, DL, ExtendedStoreVal, FirstInChain->getBasePtr(),
|
||||
FirstInChain->getPointerInfo(), StoredVal.getValueType() /*TVT*/,
|
||||
FirstInChain->getAlign(), Flags.getValue(), AAInfo);
|
||||
FirstInChain->getAlign(), *Flags, AAInfo);
|
||||
}
|
||||
|
||||
// Replace all merged stores with the new store.
|
||||
|
|
|
@ -5543,7 +5543,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
|
|||
if (!FoldAttempt)
|
||||
return SDValue();
|
||||
|
||||
SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
|
||||
SDValue Folded = getConstant(*FoldAttempt, DL, VT);
|
||||
assert((!Folded || !VT.isVector()) &&
|
||||
"Can't fold vectors ops with scalar operands");
|
||||
return Folded;
|
||||
|
@ -5588,7 +5588,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
|
|||
Optional<APInt> Fold = FoldValue(Opcode, RawBits1[I], RawBits2[I]);
|
||||
if (!Fold)
|
||||
break;
|
||||
RawBits.push_back(Fold.getValue());
|
||||
RawBits.push_back(*Fold);
|
||||
}
|
||||
if (RawBits.size() == NumElts.getFixedValue()) {
|
||||
// We have constant folded, but we need to cast this again back to
|
||||
|
|
|
@ -337,7 +337,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
|
|||
|
||||
if (IsABIRegCopy) {
|
||||
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
|
||||
*DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
|
||||
*DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
|
||||
NumIntermediates, RegisterVT);
|
||||
} else {
|
||||
NumRegs =
|
||||
|
@ -726,7 +726,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
|
|||
DestEltCnt = ElementCount::getFixed(NumIntermediates);
|
||||
|
||||
EVT BuiltVectorTy = EVT::getVectorVT(
|
||||
*DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt.getValue());
|
||||
*DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
|
||||
|
||||
if (ValueVT == BuiltVectorTy) {
|
||||
// Nothing to do.
|
||||
|
@ -6372,7 +6372,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
|
|||
SDValue Result;
|
||||
Result = DAG.getNode(
|
||||
ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
|
||||
DAG.getTargetConstant((int)RoundMode.getValue(), sdl,
|
||||
DAG.getTargetConstant((int)*RoundMode, sdl,
|
||||
TLI.getPointerTy(DAG.getDataLayout())));
|
||||
setValue(&I, Result);
|
||||
|
||||
|
@ -7303,7 +7303,7 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
|
|||
};
|
||||
|
||||
SDVTList VTs = DAG.getVTList(ValueVTs);
|
||||
fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
|
||||
fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
|
||||
|
||||
SDNodeFlags Flags;
|
||||
if (EB == fp::ExceptionBehavior::ebIgnore)
|
||||
|
@ -7386,7 +7386,7 @@ static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
|
|||
: ISD::VP_REDUCE_FMUL;
|
||||
}
|
||||
|
||||
return ResOPC.getValue();
|
||||
return *ResOPC;
|
||||
}
|
||||
|
||||
void SelectionDAGBuilder::visitVPLoadGather(const VPIntrinsic &VPIntrin, EVT VT,
|
||||
|
|
|
@ -669,7 +669,7 @@ Error write(MCStreamer &Out, ArrayRef<std::string> Inputs) {
|
|||
FoundCUUnit = true;
|
||||
} else if (Header.UnitType == dwarf::DW_UT_split_type) {
|
||||
auto P = TypeIndexEntries.insert(
|
||||
std::make_pair(Header.Signature.getValue(), Entry));
|
||||
std::make_pair(*Header.Signature, Entry));
|
||||
if (!P.second)
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -389,7 +389,7 @@ void DWARFContext::dump(
|
|||
OS << '\n' << Name << " contents:\n";
|
||||
if (auto DumpOffset = DumpOffsets[DIDT_ID_DebugInfo])
|
||||
for (const auto &U : Units)
|
||||
U->getDIEForOffset(DumpOffset.getValue())
|
||||
U->getDIEForOffset(*DumpOffset)
|
||||
.dump(OS, 0, DumpOpts.noImplicitRecursion());
|
||||
else
|
||||
for (const auto &U : Units)
|
||||
|
|
|
@ -115,7 +115,7 @@ Error DWARFDebugMacro::parseImpl(
|
|||
if (IsMacro && Data.isValidOffset(Offset)) {
|
||||
// Keep a mapping from Macro contribution to CUs, this will
|
||||
// be needed while retrieving macro from DW_MACRO_define_strx form.
|
||||
for (const auto &U : Units.getValue())
|
||||
for (const auto &U : *Units)
|
||||
if (auto CUDIE = U->getUnitDIE())
|
||||
// Skip units which does not contibutes to macro section.
|
||||
if (auto MacroOffset = toSectionOffset(CUDIE.find(DW_AT_macros)))
|
||||
|
|
|
@ -139,8 +139,7 @@ static void dumpAttribute(raw_ostream &OS, const DWARFDie &Die,
|
|||
Color = HighlightColor::String;
|
||||
if (const auto *LT = U->getContext().getLineTableForUnit(U))
|
||||
if (LT->getFileNameByIndex(
|
||||
FormValue.getAsUnsignedConstant().getValue(),
|
||||
U->getCompilationDir(),
|
||||
*FormValue.getAsUnsignedConstant(), U->getCompilationDir(),
|
||||
DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath, File)) {
|
||||
File = '"' + File + '"';
|
||||
Name = File;
|
||||
|
|
|
@ -126,7 +126,7 @@ void IRSpeculationLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
|
|||
|
||||
assert(Mutator.GetInsertBlock()->getParent() == &Fn &&
|
||||
"IR builder association mismatch?");
|
||||
S.registerSymbols(internToJITSymbols(IRNames.getValue()),
|
||||
S.registerSymbols(internToJITSymbols(*IRNames),
|
||||
&R->getTargetJITDylib());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1032,7 +1032,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
|||
// Remangle our intrinsic since we upgrade the mangling
|
||||
auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
|
||||
if (Result != None) {
|
||||
NewFn = Result.getValue();
|
||||
NewFn = *Result;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -4497,7 +4497,7 @@ SwitchInstProfUpdateWrapper::CaseWeightOpt
|
|||
SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
|
||||
if (!Weights)
|
||||
return None;
|
||||
return Weights.getValue()[idx];
|
||||
return (*Weights)[idx];
|
||||
}
|
||||
|
||||
void SwitchInstProfUpdateWrapper::setSuccessorWeight(
|
||||
|
@ -4509,7 +4509,7 @@ void SwitchInstProfUpdateWrapper::setSuccessorWeight(
|
|||
Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
|
||||
|
||||
if (Weights) {
|
||||
auto &OldW = Weights.getValue()[idx];
|
||||
auto &OldW = (*Weights)[idx];
|
||||
if (*W != OldW) {
|
||||
Changed = true;
|
||||
OldW = *W;
|
||||
|
|
|
@ -314,7 +314,7 @@ ElementCount VPIntrinsic::getStaticVectorLength() const {
|
|||
|
||||
Value *VPIntrinsic::getMaskParam() const {
|
||||
if (auto MaskPos = getMaskParamPos(getIntrinsicID()))
|
||||
return getArgOperand(MaskPos.getValue());
|
||||
return getArgOperand(*MaskPos);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,7 @@ void VPIntrinsic::setMaskParam(Value *NewMask) {
|
|||
|
||||
Value *VPIntrinsic::getVectorLengthParam() const {
|
||||
if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID()))
|
||||
return getArgOperand(EVLPos.getValue());
|
||||
return getArgOperand(*EVLPos);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ public:
|
|||
for (const std::string &Lib : Stub.NeededLibs)
|
||||
DynStr.Content.add(Lib);
|
||||
if (Stub.SoName)
|
||||
DynStr.Content.add(Stub.SoName.getValue());
|
||||
DynStr.Content.add(*Stub.SoName);
|
||||
|
||||
std::vector<OutputSection<ELFT> *> Sections = {&DynSym, &DynStr, &DynTab,
|
||||
&ShStrTab};
|
||||
|
@ -231,7 +231,7 @@ public:
|
|||
DynTab.Content.addValue(DT_NEEDED, DynStr.Content.getOffset(Lib));
|
||||
if (Stub.SoName)
|
||||
DynTab.Content.addValue(DT_SONAME,
|
||||
DynStr.Content.getOffset(Stub.SoName.getValue()));
|
||||
DynStr.Content.getOffset(*Stub.SoName));
|
||||
DynTab.Size = DynTab.Content.getSize();
|
||||
// Calculate sections' addresses and offsets.
|
||||
uint64_t CurrentOffset = sizeof(Elf_Ehdr);
|
||||
|
@ -250,8 +250,7 @@ public:
|
|||
fillStrTabShdr(ShStrTab);
|
||||
|
||||
// Finish initializing the ELF header.
|
||||
initELFHeader<ELFT>(ElfHeader,
|
||||
static_cast<uint16_t>(Stub.Target.Arch.getValue()));
|
||||
initELFHeader<ELFT>(ElfHeader, static_cast<uint16_t>(*Stub.Target.Arch));
|
||||
ElfHeader.e_shstrndx = ShStrTab.Index;
|
||||
ElfHeader.e_shnum = LastSection->Index + 1;
|
||||
ElfHeader.e_shoff =
|
||||
|
|
|
@ -193,7 +193,7 @@ Expected<std::unique_ptr<IFSStub>> ifs::readIFSFromBuffer(StringRef Buf) {
|
|||
std::make_error_code(std::errc::invalid_argument));
|
||||
if (Stub->Target.ArchString) {
|
||||
Stub->Target.Arch =
|
||||
ELF::convertArchNameToEMachine(Stub->Target.ArchString.getValue());
|
||||
ELF::convertArchNameToEMachine(*Stub->Target.ArchString);
|
||||
}
|
||||
return std::move(Stub);
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ Error ifs::validateIFSTarget(IFSStub &Stub, bool ParseTriple) {
|
|||
ValidationEC);
|
||||
}
|
||||
if (ParseTriple) {
|
||||
IFSTarget TargetFromTriple = parseTriple(Stub.Target.Triple.getValue());
|
||||
IFSTarget TargetFromTriple = parseTriple(*Stub.Target.Triple);
|
||||
Stub.Target.Arch = TargetFromTriple.Arch;
|
||||
Stub.Target.BitWidth = TargetFromTriple.BitWidth;
|
||||
Stub.Target.Endianness = TargetFromTriple.Endianness;
|
||||
|
|
|
@ -643,7 +643,7 @@ Error LTO::addModule(InputFile &Input, unsigned ModI,
|
|||
// If only some modules were split, flag this in the index so that
|
||||
// we can skip or error on optimizations that need consistently split
|
||||
// modules (whole program devirt and lower type tests).
|
||||
if (EnableSplitLTOUnit.getValue() != LTOInfo->EnableSplitLTOUnit)
|
||||
if (*EnableSplitLTOUnit != LTOInfo->EnableSplitLTOUnit)
|
||||
ThinLTO.CombinedIndex.setPartiallySplitLTOUnits();
|
||||
} else
|
||||
EnableSplitLTOUnit = LTOInfo->EnableSplitLTOUnit;
|
||||
|
|
|
@ -1046,7 +1046,7 @@ Expected<Constant *> IRLinker::linkGlobalValueProto(GlobalValue *SGV,
|
|||
if (Function *F = dyn_cast<Function>(NewGV))
|
||||
if (auto Remangled = Intrinsic::remangleIntrinsicFunction(F)) {
|
||||
NewGV->eraseFromParent();
|
||||
NewGV = Remangled.getValue();
|
||||
NewGV = *Remangled;
|
||||
NeedsRenaming = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -188,7 +188,7 @@ public:
|
|||
|
||||
// TODO: Parse UniqueID
|
||||
MCSectionWasm *WS = getContext().getWasmSection(
|
||||
Name, Kind.getValue(), Flags, GroupName, MCContext::GenericSectionID);
|
||||
Name, *Kind, Flags, GroupName, MCContext::GenericSectionID);
|
||||
|
||||
if (WS->getSegmentFlags() != Flags)
|
||||
Parser->Error(loc, "changed section flags for " + Name +
|
||||
|
|
|
@ -448,7 +448,7 @@ void XCOFFObjectWriter::executePostLayoutBinding(MCAssembler &Asm,
|
|||
SectionMap[MCSec] = DwarfSec.get();
|
||||
|
||||
DwarfSectionEntry SecEntry(MCSec->getName(),
|
||||
MCSec->getDwarfSubtypeFlags().getValue(),
|
||||
*MCSec->getDwarfSubtypeFlags(),
|
||||
std::move(DwarfSec));
|
||||
DwarfSections.push_back(std::move(SecEntry));
|
||||
} else
|
||||
|
|
|
@ -264,8 +264,7 @@ static Error processLoadCommands(const MachOConfig &MachOConfig, Object &Obj) {
|
|||
if (LC.MachOLoadCommand.load_command_data.cmd == MachO::LC_SEGMENT_64 ||
|
||||
LC.MachOLoadCommand.load_command_data.cmd == MachO::LC_SEGMENT) {
|
||||
return LC.Sections.empty() &&
|
||||
MachOConfig.EmptySegmentsToRemove.contains(
|
||||
LC.getSegmentName().getValue());
|
||||
MachOConfig.EmptySegmentsToRemove.contains(*LC.getSegmentName());
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
|
|
@ -307,7 +307,7 @@ SubtargetFeatures ELFObjectFileBase::getRISCVFeatures() const {
|
|||
// The Arch pattern is [rv32|rv64][i|e]version(_[m|a|f|d|c]version)*
|
||||
// Version string pattern is (major)p(minor). Major and minor are optional.
|
||||
// For example, a version number could be 2p0, 2, or p92.
|
||||
StringRef Arch = Attr.getValue();
|
||||
StringRef Arch = *Attr;
|
||||
if (Arch.consume_front("rv32"))
|
||||
Features.AddFeature("64bit", false);
|
||||
else if (Arch.consume_front("rv64"))
|
||||
|
|
|
@ -1520,7 +1520,7 @@ Error WasmObjectFile::parseElemSection(ReadContext &Ctx) {
|
|||
Error WasmObjectFile::parseDataSection(ReadContext &Ctx) {
|
||||
DataSection = Sections.size();
|
||||
uint32_t Count = readVaruint32(Ctx);
|
||||
if (DataCount && Count != DataCount.getValue())
|
||||
if (DataCount && Count != *DataCount)
|
||||
return make_error<GenericBinaryError>(
|
||||
"number of data segments does not match DataCount section");
|
||||
DataSegments.reserve(Count);
|
||||
|
|
|
@ -1342,7 +1342,7 @@ XCOFFTracebackTable::XCOFFTracebackTable(const uint8_t *Ptr, uint64_t &Size,
|
|||
NumOfCtlAnchors = DE.getU32(Cur);
|
||||
if (Cur && NumOfCtlAnchors) {
|
||||
SmallVector<uint32_t, 8> Disp;
|
||||
Disp.reserve(NumOfCtlAnchors.getValue());
|
||||
Disp.reserve(*NumOfCtlAnchors);
|
||||
for (uint32_t I = 0; I < NumOfCtlAnchors && Cur; ++I)
|
||||
Disp.push_back(DE.getU32(Cur));
|
||||
if (Cur)
|
||||
|
@ -1369,7 +1369,7 @@ XCOFFTracebackTable::XCOFFTracebackTable(const uint8_t *Ptr, uint64_t &Size,
|
|||
return;
|
||||
}
|
||||
VecExt = TBVecExtOrErr.get();
|
||||
VectorParmsNum = VecExt.getValue().getNumberOfVectorParms();
|
||||
VectorParmsNum = VecExt->getNumberOfVectorParms();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -596,7 +596,7 @@ unsigned ELFState<ELFT>::toSectionIndex(StringRef S, StringRef LocSec,
|
|||
const ELFYAML::SectionHeaderTable &SectionHeaders =
|
||||
Doc.getSectionHeaderTable();
|
||||
if (SectionHeaders.IsImplicit ||
|
||||
(SectionHeaders.NoHeaders && !SectionHeaders.NoHeaders.getValue()) ||
|
||||
(SectionHeaders.NoHeaders && !*SectionHeaders.NoHeaders) ||
|
||||
SectionHeaders.isDefault())
|
||||
return Index;
|
||||
|
||||
|
|
|
@ -340,7 +340,7 @@ Optional<bool> KnownBits::eq(const KnownBits &LHS, const KnownBits &RHS) {
|
|||
|
||||
Optional<bool> KnownBits::ne(const KnownBits &LHS, const KnownBits &RHS) {
|
||||
if (Optional<bool> KnownEQ = eq(LHS, RHS))
|
||||
return Optional<bool>(!KnownEQ.getValue());
|
||||
return Optional<bool>(!*KnownEQ);
|
||||
return None;
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,7 @@ Optional<bool> KnownBits::ugt(const KnownBits &LHS, const KnownBits &RHS) {
|
|||
|
||||
Optional<bool> KnownBits::uge(const KnownBits &LHS, const KnownBits &RHS) {
|
||||
if (Optional<bool> IsUGT = ugt(RHS, LHS))
|
||||
return Optional<bool>(!IsUGT.getValue());
|
||||
return Optional<bool>(!*IsUGT);
|
||||
return None;
|
||||
}
|
||||
|
||||
|
@ -380,7 +380,7 @@ Optional<bool> KnownBits::sgt(const KnownBits &LHS, const KnownBits &RHS) {
|
|||
|
||||
Optional<bool> KnownBits::sge(const KnownBits &LHS, const KnownBits &RHS) {
|
||||
if (Optional<bool> KnownSGT = sgt(RHS, LHS))
|
||||
return Optional<bool>(!KnownSGT.getValue());
|
||||
return Optional<bool>(!*KnownSGT);
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
|
@ -415,7 +415,7 @@ bool AArch64MIPeepholeOpt::splitTwoPartImm(
|
|||
Imm &= 0xFFFFFFFF;
|
||||
OpcodePair Opcode;
|
||||
if (auto R = SplitAndOpc(Imm, RegSize, Imm0, Imm1))
|
||||
Opcode = R.getValue();
|
||||
Opcode = *R;
|
||||
else
|
||||
return false;
|
||||
|
||||
|
|
|
@ -417,7 +417,7 @@ bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) {
|
|||
bool IsInteresting =
|
||||
AI.getAllocatedType()->isSized() && AI.isStaticAlloca() &&
|
||||
// alloca() may be called with 0 size, ignore it.
|
||||
AI.getAllocationSizeInBits(*DL).getValue() > 0 &&
|
||||
*AI.getAllocationSizeInBits(*DL) > 0 &&
|
||||
// inalloca allocas are not treated as static, and we don't want
|
||||
// dynamic alloca instrumentation for them as well.
|
||||
!AI.isUsedWithInAlloca() &&
|
||||
|
@ -573,7 +573,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
|
|||
End->eraseFromParent();
|
||||
}
|
||||
} else {
|
||||
uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
|
||||
uint64_t Size = *Info.AI->getAllocationSizeInBits(*DL) / 8;
|
||||
Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
|
||||
tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
|
||||
for (auto &RI : SInfo.RetVec) {
|
||||
|
|
|
@ -392,7 +392,7 @@ AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
|
|||
if (VScaleRangeAttr.isValid()) {
|
||||
Optional<unsigned> VScaleMax = VScaleRangeAttr.getVScaleRangeMax();
|
||||
MinSVEVectorSize = VScaleRangeAttr.getVScaleRangeMin() * 128;
|
||||
MaxSVEVectorSize = VScaleMax ? VScaleMax.getValue() * 128 : 0;
|
||||
MaxSVEVectorSize = VScaleMax ? *VScaleMax * 128 : 0;
|
||||
} else {
|
||||
MinSVEVectorSize = SVEVectorBitsMinOpt;
|
||||
MaxSVEVectorSize = SVEVectorBitsMaxOpt;
|
||||
|
|
|
@ -6743,7 +6743,7 @@ void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
|
|||
Optional<int64_t> CstVal =
|
||||
getIConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI);
|
||||
assert(CstVal && "Expected constant value");
|
||||
MIB.addImm(CstVal.getValue());
|
||||
MIB.addImm(*CstVal);
|
||||
}
|
||||
|
||||
void AArch64InstructionSelector::renderLogicalImm32(
|
||||
|
|
|
@ -442,7 +442,7 @@ class CollectReachableCallees {
|
|||
continue;
|
||||
|
||||
for (const auto &GI : *CGN) {
|
||||
auto *RCB = cast<CallBase>(GI.first.getValue());
|
||||
auto *RCB = cast<CallBase>(*GI.first);
|
||||
auto *RCGN = GI.second;
|
||||
|
||||
if (auto *DCallee = RCGN->getFunction()) {
|
||||
|
|
|
@ -1514,7 +1514,7 @@ bool GCNTargetMachine::parseMachineFunctionInfo(
|
|||
Arg = ArgDescriptor::createStack(A->StackOffset);
|
||||
// Check and apply the optional mask.
|
||||
if (A->Mask)
|
||||
Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
|
||||
Arg = ArgDescriptor::createArg(Arg, *A->Mask);
|
||||
|
||||
MFI->NumUserSGPRs += UserSGPRs;
|
||||
MFI->NumSystemSGPRs += SystemSGPRs;
|
||||
|
|
|
@ -714,7 +714,7 @@ Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
|
|||
return None;
|
||||
}
|
||||
|
||||
SSID = IsSyncScopeInclusion.getValue() ? SSID : MMO->getSyncScopeID();
|
||||
SSID = *IsSyncScopeInclusion ? SSID : MMO->getSyncScopeID();
|
||||
Ordering = getMergedAtomicOrdering(Ordering, OpOrdering);
|
||||
assert(MMO->getFailureOrdering() != AtomicOrdering::Release &&
|
||||
MMO->getFailureOrdering() != AtomicOrdering::AcquireRelease);
|
||||
|
@ -733,7 +733,7 @@ Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
|
|||
return None;
|
||||
}
|
||||
std::tie(Scope, OrderingAddrSpace, IsCrossAddressSpaceOrdering) =
|
||||
ScopeOrNone.getValue();
|
||||
*ScopeOrNone;
|
||||
if ((OrderingAddrSpace == SIAtomicAddrSpace::NONE) ||
|
||||
((OrderingAddrSpace & SIAtomicAddrSpace::ATOMIC) != OrderingAddrSpace) ||
|
||||
((InstrAddrSpace & SIAtomicAddrSpace::ATOMIC) == SIAtomicAddrSpace::NONE)) {
|
||||
|
@ -795,7 +795,7 @@ Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
|
|||
SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE;
|
||||
bool IsCrossAddressSpaceOrdering = false;
|
||||
std::tie(Scope, OrderingAddrSpace, IsCrossAddressSpaceOrdering) =
|
||||
ScopeOrNone.getValue();
|
||||
*ScopeOrNone;
|
||||
|
||||
if ((OrderingAddrSpace == SIAtomicAddrSpace::NONE) ||
|
||||
((OrderingAddrSpace & SIAtomicAddrSpace::ATOMIC) != OrderingAddrSpace)) {
|
||||
|
|
|
@ -11385,7 +11385,7 @@ bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
|
|||
Error(TagLoc, "attribute name not recognised: " + Name);
|
||||
return false;
|
||||
}
|
||||
Tag = Ret.getValue();
|
||||
Tag = *Ret;
|
||||
Parser.Lex();
|
||||
} else {
|
||||
const MCExpr *AttrExpr;
|
||||
|
|
|
@ -390,7 +390,7 @@ MVEGatherScatterLowering::getVarAndConst(Value *Inst, int TypeScale) {
|
|||
return ReturnFalse;
|
||||
|
||||
// Check that the constant is small enough for an incrementing gather
|
||||
int64_t Immediate = Const.getValue() << TypeScale;
|
||||
int64_t Immediate = *Const << TypeScale;
|
||||
if (Immediate > 512 || Immediate < -512 || Immediate % 4 != 0)
|
||||
return ReturnFalse;
|
||||
|
||||
|
|
|
@ -923,8 +923,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
|
|||
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(CodeAddrSpace, dl),
|
||||
getI32Imm(vecType, dl), getI32Imm(fromType, dl),
|
||||
getI32Imm(fromTypeWidth, dl), Addr, Chain };
|
||||
NVPTXLD = CurDAG->getMachineNode(Opcode.getValue(), dl, TargetVT,
|
||||
MVT::Other, Ops);
|
||||
NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
|
||||
} else if (PointerSize == 64 ? SelectADDRsi64(N1.getNode(), N1, Base, Offset)
|
||||
: SelectADDRsi(N1.getNode(), N1, Base, Offset)) {
|
||||
Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_asi, NVPTX::LD_i16_asi,
|
||||
|
@ -936,8 +935,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
|
|||
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(CodeAddrSpace, dl),
|
||||
getI32Imm(vecType, dl), getI32Imm(fromType, dl),
|
||||
getI32Imm(fromTypeWidth, dl), Base, Offset, Chain };
|
||||
NVPTXLD = CurDAG->getMachineNode(Opcode.getValue(), dl, TargetVT,
|
||||
MVT::Other, Ops);
|
||||
NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
|
||||
} else if (PointerSize == 64 ? SelectADDRri64(N1.getNode(), N1, Base, Offset)
|
||||
: SelectADDRri(N1.getNode(), N1, Base, Offset)) {
|
||||
if (PointerSize == 64)
|
||||
|
@ -955,8 +953,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
|
|||
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(CodeAddrSpace, dl),
|
||||
getI32Imm(vecType, dl), getI32Imm(fromType, dl),
|
||||
getI32Imm(fromTypeWidth, dl), Base, Offset, Chain };
|
||||
NVPTXLD = CurDAG->getMachineNode(Opcode.getValue(), dl, TargetVT,
|
||||
MVT::Other, Ops);
|
||||
NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
|
||||
} else {
|
||||
if (PointerSize == 64)
|
||||
Opcode = pickOpcodeForVT(
|
||||
|
@ -974,8 +971,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
|
|||
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(CodeAddrSpace, dl),
|
||||
getI32Imm(vecType, dl), getI32Imm(fromType, dl),
|
||||
getI32Imm(fromTypeWidth, dl), N1, Chain };
|
||||
NVPTXLD = CurDAG->getMachineNode(Opcode.getValue(), dl, TargetVT,
|
||||
MVT::Other, Ops);
|
||||
NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
|
||||
}
|
||||
|
||||
if (!NVPTXLD)
|
||||
|
@ -1092,7 +1088,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
|
|||
SDValue Ops[] = { getI32Imm(IsVolatile, DL), getI32Imm(CodeAddrSpace, DL),
|
||||
getI32Imm(VecType, DL), getI32Imm(FromType, DL),
|
||||
getI32Imm(FromTypeWidth, DL), Addr, Chain };
|
||||
LD = CurDAG->getMachineNode(Opcode.getValue(), DL, N->getVTList(), Ops);
|
||||
LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
|
||||
} else if (PointerSize == 64
|
||||
? SelectADDRsi64(Op1.getNode(), Op1, Base, Offset)
|
||||
: SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) {
|
||||
|
@ -1119,7 +1115,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
|
|||
SDValue Ops[] = { getI32Imm(IsVolatile, DL), getI32Imm(CodeAddrSpace, DL),
|
||||
getI32Imm(VecType, DL), getI32Imm(FromType, DL),
|
||||
getI32Imm(FromTypeWidth, DL), Base, Offset, Chain };
|
||||
LD = CurDAG->getMachineNode(Opcode.getValue(), DL, N->getVTList(), Ops);
|
||||
LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
|
||||
} else if (PointerSize == 64
|
||||
? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
|
||||
: SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
|
||||
|
@ -1169,7 +1165,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
|
|||
getI32Imm(VecType, DL), getI32Imm(FromType, DL),
|
||||
getI32Imm(FromTypeWidth, DL), Base, Offset, Chain };
|
||||
|
||||
LD = CurDAG->getMachineNode(Opcode.getValue(), DL, N->getVTList(), Ops);
|
||||
LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
|
||||
} else {
|
||||
if (PointerSize == 64) {
|
||||
switch (N->getOpcode()) {
|
||||
|
@ -1217,7 +1213,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
|
|||
SDValue Ops[] = { getI32Imm(IsVolatile, DL), getI32Imm(CodeAddrSpace, DL),
|
||||
getI32Imm(VecType, DL), getI32Imm(FromType, DL),
|
||||
getI32Imm(FromTypeWidth, DL), Op1, Chain };
|
||||
LD = CurDAG->getMachineNode(Opcode.getValue(), DL, N->getVTList(), Ops);
|
||||
LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
|
||||
}
|
||||
|
||||
MachineMemOperand *MemRef = cast<MemSDNode>(N)->getMemOperand();
|
||||
|
@ -1361,7 +1357,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
|
|||
if (!Opcode)
|
||||
return false;
|
||||
SDValue Ops[] = { Addr, Chain };
|
||||
LD = CurDAG->getMachineNode(Opcode.getValue(), DL, InstVTList, Ops);
|
||||
LD = CurDAG->getMachineNode(*Opcode, DL, InstVTList, Ops);
|
||||
} else if (TM.is64Bit() ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
|
||||
: SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
|
||||
if (TM.is64Bit()) {
|
||||
|
@ -1508,7 +1504,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
|
|||
if (!Opcode)
|
||||
return false;
|
||||
SDValue Ops[] = {Base, Offset, Chain};
|
||||
LD = CurDAG->getMachineNode(Opcode.getValue(), DL, InstVTList, Ops);
|
||||
LD = CurDAG->getMachineNode(*Opcode, DL, InstVTList, Ops);
|
||||
} else {
|
||||
if (TM.is64Bit()) {
|
||||
switch (N->getOpcode()) {
|
||||
|
@ -1654,7 +1650,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
|
|||
if (!Opcode)
|
||||
return false;
|
||||
SDValue Ops[] = { Op1, Chain };
|
||||
LD = CurDAG->getMachineNode(Opcode.getValue(), DL, InstVTList, Ops);
|
||||
LD = CurDAG->getMachineNode(*Opcode, DL, InstVTList, Ops);
|
||||
}
|
||||
|
||||
MachineMemOperand *MemRef = Mem->getMemOperand();
|
||||
|
@ -1787,7 +1783,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
|
|||
getI32Imm(toTypeWidth, dl),
|
||||
Addr,
|
||||
Chain};
|
||||
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
|
||||
NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
|
||||
} else if (PointerSize == 64
|
||||
? SelectADDRsi64(BasePtr.getNode(), BasePtr, Base, Offset)
|
||||
: SelectADDRsi(BasePtr.getNode(), BasePtr, Base, Offset)) {
|
||||
|
@ -1806,7 +1802,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
|
|||
Base,
|
||||
Offset,
|
||||
Chain};
|
||||
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
|
||||
NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
|
||||
} else if (PointerSize == 64
|
||||
? SelectADDRri64(BasePtr.getNode(), BasePtr, Base, Offset)
|
||||
: SelectADDRri(BasePtr.getNode(), BasePtr, Base, Offset)) {
|
||||
|
@ -1832,7 +1828,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
|
|||
Base,
|
||||
Offset,
|
||||
Chain};
|
||||
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
|
||||
NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
|
||||
} else {
|
||||
if (PointerSize == 64)
|
||||
Opcode =
|
||||
|
@ -1855,7 +1851,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
|
|||
getI32Imm(toTypeWidth, dl),
|
||||
BasePtr,
|
||||
Chain};
|
||||
NVPTXST = CurDAG->getMachineNode(Opcode.getValue(), dl, MVT::Other, Ops);
|
||||
NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
|
||||
}
|
||||
|
||||
if (!NVPTXST)
|
||||
|
@ -2082,7 +2078,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
|
|||
|
||||
StOps.push_back(Chain);
|
||||
|
||||
ST = CurDAG->getMachineNode(Opcode.getValue(), DL, MVT::Other, StOps);
|
||||
ST = CurDAG->getMachineNode(*Opcode, DL, MVT::Other, StOps);
|
||||
|
||||
MachineMemOperand *MemRef = cast<MemSDNode>(N)->getMemOperand();
|
||||
CurDAG->setNodeMemRefs(cast<MachineSDNode>(ST), {MemRef});
|
||||
|
@ -2164,7 +2160,7 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
|
|||
Ops.push_back(Chain);
|
||||
Ops.push_back(Flag);
|
||||
|
||||
ReplaceNode(Node, CurDAG->getMachineNode(Opcode.getValue(), DL, VTs, Ops));
|
||||
ReplaceNode(Node, CurDAG->getMachineNode(*Opcode, DL, VTs, Ops));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2230,7 +2226,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
|
|||
if (!Opcode)
|
||||
return false;
|
||||
|
||||
SDNode *Ret = CurDAG->getMachineNode(Opcode.getValue(), DL, MVT::Other, Ops);
|
||||
SDNode *Ret = CurDAG->getMachineNode(*Opcode, DL, MVT::Other, Ops);
|
||||
MachineMemOperand *MemRef = cast<MemSDNode>(N)->getMemOperand();
|
||||
CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ret), {MemRef});
|
||||
|
||||
|
@ -2333,8 +2329,7 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
|
|||
}
|
||||
|
||||
SDVTList RetVTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
|
||||
SDNode *Ret =
|
||||
CurDAG->getMachineNode(Opcode.getValue(), DL, RetVTs, Ops);
|
||||
SDNode *Ret = CurDAG->getMachineNode(*Opcode, DL, RetVTs, Ops);
|
||||
MachineMemOperand *MemRef = cast<MemSDNode>(N)->getMemOperand();
|
||||
CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ret), {MemRef});
|
||||
|
||||
|
|
|
@ -775,7 +775,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
|
|||
if (!OptPPCPred)
|
||||
return false;
|
||||
|
||||
PPC::Predicate PPCPred = OptPPCPred.getValue();
|
||||
PPC::Predicate PPCPred = *OptPPCPred;
|
||||
|
||||
// Take advantage of fall-through opportunities.
|
||||
if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
|
||||
|
|
|
@ -2179,7 +2179,7 @@ bool RISCVAsmParser::parseDirectiveAttribute() {
|
|||
Error(TagLoc, "attribute name not recognised: " + Name);
|
||||
return false;
|
||||
}
|
||||
Tag = Ret.getValue();
|
||||
Tag = *Ret;
|
||||
Parser.Lex();
|
||||
} else {
|
||||
const MCExpr *AttrExpr;
|
||||
|
|
|
@ -375,7 +375,7 @@ public:
|
|||
auto Type = WebAssembly::parseType(Lexer.getTok().getString());
|
||||
if (!Type)
|
||||
return error("unknown type: ", Lexer.getTok());
|
||||
Types.push_back(Type.getValue());
|
||||
Types.push_back(*Type);
|
||||
Parser.Lex();
|
||||
if (!isNext(AsmToken::Comma))
|
||||
break;
|
||||
|
@ -817,8 +817,7 @@ public:
|
|||
// Now set this symbol with the correct type.
|
||||
auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
|
||||
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL);
|
||||
WasmSym->setGlobalType(
|
||||
wasm::WasmGlobalType{uint8_t(Type.getValue()), Mutable});
|
||||
WasmSym->setGlobalType(wasm::WasmGlobalType{uint8_t(*Type), Mutable});
|
||||
// And emit the directive again.
|
||||
TOut.emitGlobalType(WasmSym);
|
||||
return expect(AsmToken::EndOfStatement, "EOL");
|
||||
|
@ -848,7 +847,7 @@ public:
|
|||
// symbol
|
||||
auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
|
||||
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_TABLE);
|
||||
wasm::WasmTableType Type = {uint8_t(ElemType.getValue()), Limits};
|
||||
wasm::WasmTableType Type = {uint8_t(*ElemType), Limits};
|
||||
WasmSym->setTableType(Type);
|
||||
TOut.emitTableType(WasmSym);
|
||||
return expect(AsmToken::EndOfStatement, "EOL");
|
||||
|
|
|
@ -276,7 +276,7 @@ void WebAssemblyAsmPrinter::emitSymbolType(const MCSymbolWasm *Sym) {
|
|||
if (!WasmTy)
|
||||
return;
|
||||
|
||||
switch (WasmTy.getValue()) {
|
||||
switch (*WasmTy) {
|
||||
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
|
||||
getTargetStreamer()->emitGlobalType(Sym);
|
||||
break;
|
||||
|
|
|
@ -160,7 +160,7 @@ bool X86DiscriminateMemOps::runOnMachineFunction(MachineFunction &MF) {
|
|||
}
|
||||
// Since we were able to encode, bump the MemOpDiscriminators.
|
||||
++MemOpDiscriminators[L];
|
||||
DI = DI->cloneWithDiscriminator(EncodedDiscriminator.getValue());
|
||||
DI = DI->cloneWithDiscriminator(*EncodedDiscriminator);
|
||||
assert(DI && "DI should not be nullptr");
|
||||
updateDebugInfo(&MI, DI);
|
||||
Changed = true;
|
||||
|
|
|
@ -501,7 +501,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
|||
|
||||
for (const MachineOperand &MO : MI->operands())
|
||||
if (auto MaybeMCOp = LowerMachineOperand(MI, MO))
|
||||
OutMI.addOperand(MaybeMCOp.getValue());
|
||||
OutMI.addOperand(*MaybeMCOp);
|
||||
|
||||
// Handle a few special cases to eliminate operand modifiers.
|
||||
switch (OutMI.getOpcode()) {
|
||||
|
@ -1317,7 +1317,7 @@ void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI,
|
|||
E = FaultingMI.operands_end();
|
||||
I != E; ++I)
|
||||
if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I))
|
||||
MI.addOperand(MaybeOperand.getValue());
|
||||
MI.addOperand(*MaybeOperand);
|
||||
|
||||
OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
|
||||
OutStreamer->emitInstruction(MI, getSubtargetInfo());
|
||||
|
@ -1382,7 +1382,7 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
|
|||
MCI.setOpcode(Opcode);
|
||||
for (auto &MO : drop_begin(MI.operands(), 2))
|
||||
if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
|
||||
MCI.addOperand(MaybeOperand.getValue());
|
||||
MCI.addOperand(*MaybeOperand);
|
||||
|
||||
SmallString<256> Code;
|
||||
SmallVector<MCFixup, 4> Fixups;
|
||||
|
@ -1758,7 +1758,7 @@ void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI,
|
|||
Ret.setOpcode(OpCode);
|
||||
for (auto &MO : drop_begin(MI.operands()))
|
||||
if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
|
||||
Ret.addOperand(MaybeOperand.getValue());
|
||||
Ret.addOperand(*MaybeOperand);
|
||||
OutStreamer->emitInstruction(Ret, getSubtargetInfo());
|
||||
emitX86Nops(*OutStreamer, 10, Subtarget);
|
||||
recordSled(CurSled, MI, SledKind::FUNCTION_EXIT, 2);
|
||||
|
@ -1797,7 +1797,7 @@ void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI,
|
|||
OutStreamer->AddComment("TAILCALL");
|
||||
for (auto &MO : drop_begin(MI.operands()))
|
||||
if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
|
||||
TC.addOperand(MaybeOperand.getValue());
|
||||
TC.addOperand(*MaybeOperand);
|
||||
OutStreamer->emitInstruction(TC, getSubtargetInfo());
|
||||
}
|
||||
|
||||
|
|
|
@ -1391,7 +1391,7 @@ struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
|
|||
bool getShouldLiveOnFrame() const {
|
||||
if (!ShouldLiveOnFrame)
|
||||
ShouldLiveOnFrame = computeShouldLiveOnFrame();
|
||||
return ShouldLiveOnFrame.getValue();
|
||||
return *ShouldLiveOnFrame;
|
||||
}
|
||||
|
||||
bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
|
||||
|
@ -1793,7 +1793,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
|
|||
auto *FramePtr = GetFramePointer(Alloca);
|
||||
auto *FramePtrRaw =
|
||||
Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
|
||||
auto &Value = Alias.second.getValue();
|
||||
auto &Value = *Alias.second;
|
||||
auto ITy = IntegerType::get(C, Value.getBitWidth());
|
||||
auto *AliasPtr = Builder.CreateGEP(Type::getInt8Ty(C), FramePtrRaw,
|
||||
ConstantInt::get(ITy, Value));
|
||||
|
|
|
@ -2892,7 +2892,7 @@ private:
|
|||
KnownUBInsts.insert(I);
|
||||
return llvm::None;
|
||||
}
|
||||
if (!SimplifiedV.getValue())
|
||||
if (!*SimplifiedV)
|
||||
return nullptr;
|
||||
V = *SimplifiedV;
|
||||
}
|
||||
|
@ -5540,7 +5540,7 @@ struct AAValueSimplifyImpl : AAValueSimplify {
|
|||
A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
|
||||
return true;
|
||||
}
|
||||
if (auto *C = COpt.getValue()) {
|
||||
if (auto *C = *COpt) {
|
||||
SimplifiedAssociatedValue = C;
|
||||
A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
|
||||
return true;
|
||||
|
@ -6736,7 +6736,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
|||
|
||||
// Avoid arguments with padding for now.
|
||||
if (!getIRPosition().hasAttr(Attribute::ByVal) &&
|
||||
!ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
|
||||
!ArgumentPromotionPass::isDenselyPacked(*PrivatizableType,
|
||||
A.getInfoCache().getDL())) {
|
||||
LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
|
||||
return indicatePessimisticFixpoint();
|
||||
|
@ -6745,7 +6745,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
|||
// Collect the types that will replace the privatizable type in the function
|
||||
// signature.
|
||||
SmallVector<Type *, 16> ReplacementTypes;
|
||||
identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
|
||||
identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
|
||||
|
||||
// Verify callee and caller agree on how the promoted argument would be
|
||||
// passed.
|
||||
|
@ -7061,7 +7061,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
|||
// When no alignment is specified for the load instruction,
|
||||
// natural alignment is assumed.
|
||||
createReplacementValues(
|
||||
AlignAA.getAssumedAlign(), PrivatizableType.getValue(), ACS,
|
||||
AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
|
||||
ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
|
||||
NewArgOperands);
|
||||
};
|
||||
|
@ -7069,7 +7069,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
|||
// Collect the types that will replace the privatizable type in the function
|
||||
// signature.
|
||||
SmallVector<Type *, 16> ReplacementTypes;
|
||||
identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
|
||||
identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
|
||||
|
||||
// Register a rewrite of the argument.
|
||||
if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
|
||||
|
|
|
@ -777,7 +777,7 @@ static void findConstants(IRSimilarityCandidate &C, DenseSet<unsigned> &NotSame,
|
|||
for (Value *V : (*IDIt).OperVals) {
|
||||
// Since these are stored before any outlining, they will be in the
|
||||
// global value numbering.
|
||||
unsigned GVN = C.getGVN(V).getValue();
|
||||
unsigned GVN = *C.getGVN(V);
|
||||
if (isa<Constant>(V))
|
||||
if (NotSame.contains(GVN) && !Seen.contains(GVN)) {
|
||||
Inputs.push_back(GVN);
|
||||
|
@ -1194,7 +1194,7 @@ static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
|
|||
continue;
|
||||
|
||||
// Collect the canonical numbers of the values in the PHINode.
|
||||
unsigned GVN = OGVN.getValue();
|
||||
unsigned GVN = *OGVN;
|
||||
OGVN = Cand.getCanonicalNum(GVN);
|
||||
assert(OGVN && "No GVN found for incoming value?");
|
||||
PHIGVNs.push_back(*OGVN);
|
||||
|
@ -1223,7 +1223,7 @@ static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
|
|||
assert(PrevBlock && "Expected a predecessor not in the reigon!");
|
||||
OGVN = Cand.getGVN(PrevBlock);
|
||||
}
|
||||
GVN = OGVN.getValue();
|
||||
GVN = *OGVN;
|
||||
OGVN = Cand.getCanonicalNum(GVN);
|
||||
assert(OGVN && "No GVN found for incoming block?");
|
||||
PHIGVNs.push_back(*OGVN);
|
||||
|
|
|
@ -975,7 +975,7 @@ Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
|
|||
Attribute Attr =
|
||||
Trunc.getFunction()->getFnAttribute(Attribute::VScaleRange);
|
||||
if (Optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
|
||||
if (Log2_32(MaxVScale.getValue()) < DestWidth) {
|
||||
if (Log2_32(*MaxVScale) < DestWidth) {
|
||||
Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
|
||||
return replaceInstUsesWith(Trunc, VScale);
|
||||
}
|
||||
|
@ -1347,7 +1347,7 @@ Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) {
|
|||
Attribute Attr = CI.getFunction()->getFnAttribute(Attribute::VScaleRange);
|
||||
if (Optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
|
||||
unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
|
||||
if (Log2_32(MaxVScale.getValue()) < TypeWidth) {
|
||||
if (Log2_32(*MaxVScale) < TypeWidth) {
|
||||
Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
|
||||
return replaceInstUsesWith(CI, VScale);
|
||||
}
|
||||
|
@ -1620,7 +1620,7 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) {
|
|||
CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
|
||||
Attribute Attr = CI.getFunction()->getFnAttribute(Attribute::VScaleRange);
|
||||
if (Optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
|
||||
if (Log2_32(MaxVScale.getValue()) < (SrcBitSize - 1)) {
|
||||
if (Log2_32(*MaxVScale) < (SrcBitSize - 1)) {
|
||||
Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
|
||||
return replaceInstUsesWith(CI, VScale);
|
||||
}
|
||||
|
|
|
@ -313,8 +313,7 @@ public:
|
|||
auto PreheaderCount = BFI->getBlockProfileCount(L.getLoopPreheader());
|
||||
// If the average loop trip count is not greater than 1.5, we skip
|
||||
// promotion.
|
||||
if (PreheaderCount &&
|
||||
(PreheaderCount.getValue() * 3) >= (InstrCount.getValue() * 2))
|
||||
if (PreheaderCount && (*PreheaderCount * 3) >= (*InstrCount * 2))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -1619,7 +1619,7 @@ static void fixFuncEntryCount(PGOUseFunc &Func, LoopInfo &LI,
|
|||
continue;
|
||||
auto BFICount = NBFI.getBlockProfileCount(&BBI);
|
||||
CountValue = Func.getBBInfo(&BBI).CountValue;
|
||||
BFICountValue = BFICount.getValue();
|
||||
BFICountValue = *BFICount;
|
||||
SumCount.add(APFloat(CountValue * 1.0), APFloat::rmNearestTiesToEven);
|
||||
SumBFICount.add(APFloat(BFICountValue * 1.0), APFloat::rmNearestTiesToEven);
|
||||
}
|
||||
|
@ -1672,7 +1672,7 @@ static void verifyFuncBFI(PGOUseFunc &Func, LoopInfo &LI,
|
|||
NonZeroBBNum++;
|
||||
auto BFICount = NBFI.getBlockProfileCount(&BBI);
|
||||
if (BFICount)
|
||||
BFICountValue = BFICount.getValue();
|
||||
BFICountValue = *BFICount;
|
||||
|
||||
if (HotBBOnly) {
|
||||
bool rawIsHot = CountValue >= HotCountThreshold;
|
||||
|
|
|
@ -1188,9 +1188,7 @@ bool GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
|
|||
canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
|
||||
const auto ClobberOff = MD->getClobberOffset(DepLoad);
|
||||
// GVN has no deal with a negative offset.
|
||||
Offset = (ClobberOff == None || ClobberOff.getValue() < 0)
|
||||
? -1
|
||||
: ClobberOff.getValue();
|
||||
Offset = (ClobberOff == None || *ClobberOff < 0) ? -1 : *ClobberOff;
|
||||
}
|
||||
if (Offset == -1)
|
||||
Offset =
|
||||
|
|
|
@ -1414,7 +1414,7 @@ bool LoopConstrainer::run() {
|
|||
return false;
|
||||
}
|
||||
|
||||
SubRanges SR = MaybeSR.getValue();
|
||||
SubRanges SR = *MaybeSR;
|
||||
bool Increasing = MainLoopStructure.IndVarIncreasing;
|
||||
IntegerType *IVTy =
|
||||
cast<IntegerType>(Range.getBegin()->getType());
|
||||
|
@ -1927,7 +1927,7 @@ bool InductiveRangeCheckElimination::run(
|
|||
<< FailureReason << "\n";);
|
||||
return false;
|
||||
}
|
||||
LoopStructure LS = MaybeLoopStructure.getValue();
|
||||
LoopStructure LS = *MaybeLoopStructure;
|
||||
if (!isProfitableToTransform(*L, LS))
|
||||
return false;
|
||||
const SCEVAddRecExpr *IndVar =
|
||||
|
|
|
@ -777,7 +777,7 @@ unsigned LoopPredication::collectChecks(SmallVectorImpl<Value *> &Checks,
|
|||
if (ICmpInst *ICI = dyn_cast<ICmpInst>(Condition)) {
|
||||
if (auto NewRangeCheck = widenICmpRangeCheck(ICI, Expander,
|
||||
Guard)) {
|
||||
Checks.push_back(NewRangeCheck.getValue());
|
||||
Checks.push_back(*NewRangeCheck);
|
||||
NumWidened++;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) {
|
|||
return None;
|
||||
}
|
||||
|
||||
return DeoptBundle.getValue().Inputs;
|
||||
return DeoptBundle->Inputs;
|
||||
}
|
||||
|
||||
/// Compute the live-in set for every basic block in the function
|
||||
|
|
|
@ -222,7 +222,7 @@ static bool addDiscriminators(Function &F) {
|
|||
<< DIL->getColumn() << ":" << Discriminator << " "
|
||||
<< I << "\n");
|
||||
} else {
|
||||
I.setDebugLoc(NewDIL.getValue());
|
||||
I.setDebugLoc(*NewDIL);
|
||||
LLVM_DEBUG(dbgs() << DIL->getFilename() << ":" << DIL->getLine() << ":"
|
||||
<< DIL->getColumn() << ":" << Discriminator << " " << I
|
||||
<< "\n");
|
||||
|
@ -260,7 +260,7 @@ static bool addDiscriminators(Function &F) {
|
|||
<< CurrentDIL->getLine() << ":" << CurrentDIL->getColumn()
|
||||
<< ":" << Discriminator << " " << I << "\n");
|
||||
} else {
|
||||
I.setDebugLoc(NewDIL.getValue());
|
||||
I.setDebugLoc(*NewDIL);
|
||||
Changed = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -485,7 +485,7 @@ bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
|
|||
|
||||
if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
|
||||
Optional<fp::ExceptionBehavior> ExBehavior = FPI->getExceptionBehavior();
|
||||
return ExBehavior.getValue() != fp::ebStrict;
|
||||
return *ExBehavior != fp::ebStrict;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -513,7 +513,7 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
|
|||
if (const DILocation *DIL = I.getDebugLoc()) {
|
||||
auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(ULO.Count);
|
||||
if (NewDIL)
|
||||
I.setDebugLoc(NewDIL.getValue());
|
||||
I.setDebugLoc(*NewDIL);
|
||||
else
|
||||
LLVM_DEBUG(dbgs()
|
||||
<< "Failed to create new discriminator: "
|
||||
|
|
|
@ -356,7 +356,7 @@ llvm::UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
|
|||
if (const DILocation *DIL = I.getDebugLoc()) {
|
||||
auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(Count);
|
||||
if (NewDIL)
|
||||
I.setDebugLoc(NewDIL.getValue());
|
||||
I.setDebugLoc(*NewDIL);
|
||||
else
|
||||
LLVM_DEBUG(dbgs()
|
||||
<< "Failed to create new discriminator: "
|
||||
|
|
|
@ -144,7 +144,7 @@ void StackInfoBuilder::visit(Instruction &Inst) {
|
|||
|
||||
uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
|
||||
auto DL = AI.getModule()->getDataLayout();
|
||||
return AI.getAllocationSizeInBits(DL).getValue() / 8;
|
||||
return *AI.getAllocationSizeInBits(DL) / 8;
|
||||
}
|
||||
|
||||
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Alignment) {
|
||||
|
|
|
@ -933,7 +933,7 @@ void InnerLoopVectorizer::setDebugLocFromInst(
|
|||
auto NewDIL =
|
||||
DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
|
||||
if (NewDIL)
|
||||
Builder.SetCurrentDebugLocation(NewDIL.getValue());
|
||||
Builder.SetCurrentDebugLocation(*NewDIL);
|
||||
else
|
||||
LLVM_DEBUG(dbgs()
|
||||
<< "Failed to create new discriminator: "
|
||||
|
@ -5319,7 +5319,7 @@ VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
|
|||
#ifndef NDEBUG
|
||||
unsigned AssumedMinimumVscale = 1;
|
||||
if (Optional<unsigned> VScale = getVScaleForTuning())
|
||||
AssumedMinimumVscale = VScale.getValue();
|
||||
AssumedMinimumVscale = *VScale;
|
||||
unsigned Width =
|
||||
Candidate.Width.isScalable()
|
||||
? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
|
||||
|
@ -5537,7 +5537,7 @@ LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
|
|||
if (MainLoopVF.isScalable()) {
|
||||
EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
|
||||
if (Optional<unsigned> VScale = getVScaleForTuning())
|
||||
EstimatedRuntimeVF *= VScale.getValue();
|
||||
EstimatedRuntimeVF *= *VScale;
|
||||
}
|
||||
|
||||
for (auto &NextVF : ProfitableVFs)
|
||||
|
|
|
@ -1597,7 +1597,7 @@ public:
|
|||
}
|
||||
|
||||
if (BestOp.Idx) {
|
||||
getData(BestOp.Idx.getValue(), Lane).IsUsed = IsUsed;
|
||||
getData(*BestOp.Idx, Lane).IsUsed = IsUsed;
|
||||
return BestOp.Idx;
|
||||
}
|
||||
// If we could not find a good match return None.
|
||||
|
@ -1938,7 +1938,7 @@ public:
|
|||
if (BestIdx) {
|
||||
// Swap the current operand with the one returned by
|
||||
// getBestOperand().
|
||||
swap(OpIdx, BestIdx.getValue(), Lane);
|
||||
swap(OpIdx, *BestIdx, Lane);
|
||||
} else {
|
||||
// We failed to find a best operand, set mode to 'Failed'.
|
||||
ReorderingModes[OpIdx] = ReorderingMode::Failed;
|
||||
|
@ -2557,7 +2557,7 @@ private:
|
|||
ScalarToTreeEntry[V] = Last;
|
||||
}
|
||||
// Update the scheduler bundle to point to this TreeEntry.
|
||||
ScheduleData *BundleMember = Bundle.getValue();
|
||||
ScheduleData *BundleMember = *Bundle;
|
||||
assert((BundleMember || isa<PHINode>(S.MainOp) ||
|
||||
isVectorLikeInstWithConstOps(S.MainOp) ||
|
||||
doesNotNeedToSchedule(VL)) &&
|
||||
|
|
|
@ -387,7 +387,7 @@ VPInstruction *VPlanSlp::buildGraph(ArrayRef<VPValue *> Values) {
|
|||
return markFailed();
|
||||
|
||||
assert(getOpcode(Values) && "Opcodes for all values must match");
|
||||
unsigned ValuesOpcode = getOpcode(Values).getValue();
|
||||
unsigned ValuesOpcode = *getOpcode(Values);
|
||||
|
||||
SmallVector<VPValue *, 4> CombinedOperands;
|
||||
if (areCommutative(Values)) {
|
||||
|
|
|
@ -538,7 +538,7 @@ void SourceCoverageViewHTML::renderLine(raw_ostream &OS, LineRef L,
|
|||
auto Highlight = [&](const std::string &Snippet, unsigned LC, unsigned RC) {
|
||||
if (getOptions().Debug)
|
||||
HighlightedRanges.emplace_back(LC, RC);
|
||||
return tag("span", Snippet, std::string(Color.getValue()));
|
||||
return tag("span", Snippet, std::string(*Color));
|
||||
};
|
||||
|
||||
auto CheckIfUncovered = [&](const CoverageSegment *S) {
|
||||
|
|
|
@ -519,7 +519,7 @@ int main(int argc, char *argv[]) {
|
|||
// TODO: Remove OutputFormat flag in the next revision.
|
||||
WithColor::warning() << "--output-format option is deprecated, please use "
|
||||
"--output-{FILE_FORMAT} options instead\n";
|
||||
switch (Config.OutputFormat.getValue()) {
|
||||
switch (*Config.OutputFormat) {
|
||||
case FileFormat::TBD: {
|
||||
std::error_code SysErr;
|
||||
raw_fd_ostream Out(*Config.Output, SysErr);
|
||||
|
|
|
@ -1066,7 +1066,7 @@ int main(int argc, char **argv) {
|
|||
CodeGen.setAttrs(codegen::getMAttrs());
|
||||
|
||||
if (auto FT = codegen::getExplicitFileType())
|
||||
CodeGen.setFileType(FT.getValue());
|
||||
CodeGen.setFileType(*FT);
|
||||
|
||||
if (!OutputFilename.empty()) {
|
||||
if (SaveLinkedModuleFile) {
|
||||
|
|
|
@ -241,7 +241,7 @@ static int run(int argc, char **argv) {
|
|||
Conf.Options = codegen::InitTargetOptionsFromCodeGenFlags(Triple());
|
||||
Conf.MAttrs = codegen::getMAttrs();
|
||||
if (auto RM = codegen::getExplicitRelocModel())
|
||||
Conf.RelocModel = RM.getValue();
|
||||
Conf.RelocModel = *RM;
|
||||
Conf.CodeModel = codegen::getExplicitCodeModel();
|
||||
|
||||
Conf.DebugPassManager = DebugPassManager;
|
||||
|
@ -288,7 +288,7 @@ static int run(int argc, char **argv) {
|
|||
}
|
||||
|
||||
if (auto FT = codegen::getExplicitFileType())
|
||||
Conf.CGFileType = FT.getValue();
|
||||
Conf.CGFileType = *FT;
|
||||
|
||||
Conf.OverrideTriple = OverrideTriple;
|
||||
Conf.DefaultTriple = DefaultTriple;
|
||||
|
|
|
@ -2129,7 +2129,7 @@ void objdump::printSymbol(const ObjectFile *O, const SymbolRef &Symbol,
|
|||
dyn_cast<const XCOFFObjectFile>(O), Symbol);
|
||||
if (SymRef) {
|
||||
|
||||
Expected<StringRef> NameOrErr = SymRef.getValue().getName();
|
||||
Expected<StringRef> NameOrErr = SymRef->getName();
|
||||
|
||||
if (NameOrErr) {
|
||||
outs() << " (csect:";
|
||||
|
@ -2289,7 +2289,7 @@ static void printFaultMaps(const ObjectFile *Obj) {
|
|||
}
|
||||
|
||||
StringRef FaultMapContents =
|
||||
unwrapOrError(FaultMapSection.getValue().getContents(), Obj->getFileName());
|
||||
unwrapOrError(FaultMapSection->getContents(), Obj->getFileName());
|
||||
FaultMapParser FMP(FaultMapContents.bytes_begin(),
|
||||
FaultMapContents.bytes_end());
|
||||
|
||||
|
|
|
@ -450,9 +450,7 @@ void dumpDebugLines(DWARFContext &DCtx, DWARFYAML::Data &Y) {
|
|||
|
||||
default:
|
||||
for (uint8_t i = 0;
|
||||
i <
|
||||
DebugLines.StandardOpcodeLengths.getValue()[NewOp.Opcode - 1];
|
||||
++i)
|
||||
i < (*DebugLines.StandardOpcodeLengths)[NewOp.Opcode - 1]; ++i)
|
||||
NewOp.StandardOpcodeData.push_back(LineData.getULEB128(&Offset));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue