[llvm] Use value instead of getValue (NFC)

This commit is contained in:
Kazu Hirata 2022-07-13 23:11:56 -07:00
parent 6882ca9aff
commit 611ffcf4e4
109 changed files with 482 additions and 501 deletions

View File

@ -1278,9 +1278,9 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
}
LLVM_DEBUG(dbgs() << getBlockName(HeaderNode)
<< " has irr loop header weight "
<< HeaderWeight.getValue() << "\n");
<< HeaderWeight.value() << "\n");
NumHeadersWithWeight++;
uint64_t HeaderWeightValue = HeaderWeight.getValue();
uint64_t HeaderWeightValue = HeaderWeight.value();
if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight)
MinHeaderWeight = HeaderWeightValue;
if (HeaderWeightValue) {
@ -1732,10 +1732,10 @@ raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
if (Optional<uint64_t> ProfileCount =
BlockFrequencyInfoImplBase::getBlockProfileCount(
F->getFunction(), getNode(&BB)))
OS << ", count = " << ProfileCount.getValue();
OS << ", count = " << ProfileCount.value();
if (Optional<uint64_t> IrrLoopHeaderWeight =
BB.getIrrLoopHeaderWeight())
OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.getValue();
OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.value();
OS << "\n";
}

View File

@ -236,10 +236,10 @@ class VFDatabase {
// ensuring that the variant described in the attribute has a
// corresponding definition or declaration of the vector
// function in the Module M.
if (Shape && (Shape.getValue().ScalarName == ScalarName)) {
assert(CI.getModule()->getFunction(Shape.getValue().VectorName) &&
if (Shape && (Shape.value().ScalarName == ScalarName)) {
assert(CI.getModule()->getFunction(Shape.value().VectorName) &&
"Vector function is missing.");
Mappings.push_back(Shape.getValue());
Mappings.push_back(Shape.value());
}
}
}

View File

@ -386,12 +386,12 @@ private:
const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++);
if (Op.isLiteral())
EmitAbbreviatedLiteral(Op, Code.getValue());
EmitAbbreviatedLiteral(Op, Code.value());
else {
assert(Op.getEncoding() != BitCodeAbbrevOp::Array &&
Op.getEncoding() != BitCodeAbbrevOp::Blob &&
"Expected literal or scalar");
EmitAbbreviatedField(Op, Code.getValue());
EmitAbbreviatedField(Op, Code.value());
}
}

View File

@ -694,7 +694,7 @@ bool InstructionSelector::executeMatchTable(
(ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])(
State.MIs[InsnID]->getOperand(OpIdx));
if (Renderer)
State.Renderers[RendererID] = Renderer.getValue();
State.Renderers[RendererID] = Renderer.value();
else
if (handleReject() == RejectAndGiveUp)
return false;

View File

@ -1162,11 +1162,11 @@ private:
RoundingMode UseRounding = DefaultConstrainedRounding;
if (Rounding)
UseRounding = Rounding.getValue();
UseRounding = Rounding.value();
Optional<StringRef> RoundingStr = convertRoundingModeToStr(UseRounding);
assert(RoundingStr && "Garbage strict rounding mode!");
auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
auto *RoundingMDS = MDString::get(Context, RoundingStr.value());
return MetadataAsValue::get(Context, RoundingMDS);
}
@ -1175,11 +1175,11 @@ private:
fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
if (Except)
UseExcept = Except.getValue();
UseExcept = Except.value();
Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(UseExcept);
assert(ExceptStr && "Garbage strict exception behavior!");
auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
auto *ExceptMDS = MDString::get(Context, ExceptStr.value());
return MetadataAsValue::get(Context, ExceptMDS);
}

View File

@ -89,7 +89,7 @@ public:
bool hasImportModule() const { return ImportModule.has_value(); }
StringRef getImportModule() const {
if (ImportModule)
return ImportModule.getValue();
return ImportModule.value();
// Use a default module name of "env" for now, for compatibility with
// existing tools.
// TODO(sbc): Find a way to specify a default value in the object format
@ -101,13 +101,13 @@ public:
bool hasImportName() const { return ImportName.has_value(); }
StringRef getImportName() const {
if (ImportName)
return ImportName.getValue();
return ImportName.value();
return getName();
}
void setImportName(StringRef Name) { ImportName = Name; }
bool hasExportName() const { return ExportName.has_value(); }
StringRef getExportName() const { return ExportName.getValue(); }
StringRef getExportName() const { return ExportName.value(); }
void setExportName(StringRef Name) { ExportName = Name; }
bool isFunctionTable() const {
@ -130,14 +130,14 @@ public:
const wasm::WasmGlobalType &getGlobalType() const {
assert(GlobalType);
return GlobalType.getValue();
return GlobalType.value();
}
void setGlobalType(wasm::WasmGlobalType GT) { GlobalType = GT; }
bool hasTableType() const { return TableType.has_value(); }
const wasm::WasmTableType &getTableType() const {
assert(hasTableType());
return TableType.getValue();
return TableType.value();
}
void setTableType(wasm::WasmTableType TT) { TableType = TT; }
void setTableType(wasm::ValType VT) {

View File

@ -40,7 +40,7 @@ public:
XCOFF::StorageClass getStorageClass() const {
assert(StorageClass && "StorageClass not set on XCOFF MCSymbol.");
return StorageClass.getValue();
return StorageClass.value();
}
StringRef getUnqualifiedName() const { return getUnqualifiedName(getName()); }

View File

@ -638,9 +638,7 @@ template <typename T, typename Enable = void> struct ValueIsPresent {
template <typename T> struct ValueIsPresent<Optional<T>> {
using UnwrappedType = T;
static inline bool isPresent(const Optional<T> &t) { return t.has_value(); }
static inline decltype(auto) unwrapValue(Optional<T> &t) {
return t.getValue();
}
static inline decltype(auto) unwrapValue(Optional<T> &t) { return t.value(); }
};
// If something is "nullable" then we just compare it to nullptr to see if it

View File

@ -1270,7 +1270,7 @@ public:
assert(Err && "Trying to log after takeError().");
OS << "'" << FileName << "': ";
if (Line)
OS << "line " << Line.getValue() << ": ";
OS << "line " << Line.value() << ": ";
Err->log(OS);
}

View File

@ -828,9 +828,8 @@ void BranchProbabilityInfo::computeEestimateBlockWeight(
if (auto BBWeight = getInitialEstimatedBlockWeight(BB))
// If we were able to find estimated weight for the block set it to this
// block and propagate up the IR.
propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT,
BBWeight.getValue(), BlockWorkList,
LoopWorkList);
propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, BBWeight.value(),
BlockWorkList, LoopWorkList);
// BlockWorklist/LoopWorkList contains blocks/loops with at least one
// successor/exit having estimated weight. Try to propagate weight to such

View File

@ -184,7 +184,7 @@ CmpInst::Predicate IRInstructionData::getPredicate() const {
"Can only get a predicate from a compare instruction");
if (RevisedPredicate)
return RevisedPredicate.getValue();
return RevisedPredicate.value();
return cast<CmpInst>(Inst)->getPredicate();
}

View File

@ -708,7 +708,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
assert(BFI && "BFI must be available");
auto ProfileCount = BFI->getBlockProfileCount(BB);
assert(ProfileCount);
if (ProfileCount.getValue() == 0)
if (ProfileCount.value() == 0)
ColdSize += Cost - CostAtBBStart;
}
@ -833,7 +833,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
assert(ProfileCount);
CurrentSavings *= ProfileCount.getValue();
CurrentSavings *= ProfileCount.value();
CycleSavings += CurrentSavings;
}
@ -1787,12 +1787,12 @@ void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
// return min(A, B) if B is valid.
auto MinIfValid = [](int A, Optional<int> B) {
return B ? std::min(A, B.getValue()) : A;
return B ? std::min(A, B.value()) : A;
};
// return max(A, B) if B is valid.
auto MaxIfValid = [](int A, Optional<int> B) {
return B ? std::max(A, B.getValue()) : A;
return B ? std::max(A, B.value()) : A;
};
// Various bonus percentages. These are multiplied by Threshold to get the

View File

@ -6111,8 +6111,8 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
Value *Op2 = Call->getArgOperand(2);
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q,
FPI->getExceptionBehavior().getValue(),
FPI->getRoundingMode().getValue()))
FPI->getExceptionBehavior().value(),
FPI->getRoundingMode().value()))
return V;
return nullptr;
}
@ -6176,38 +6176,33 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
}
case Intrinsic::experimental_constrained_fadd: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
return simplifyFAddInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
FPI->getFastMathFlags(), Q,
FPI->getExceptionBehavior().getValue(),
FPI->getRoundingMode().getValue());
return simplifyFAddInst(
FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_fsub: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
return simplifyFSubInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
FPI->getFastMathFlags(), Q,
FPI->getExceptionBehavior().getValue(),
FPI->getRoundingMode().getValue());
return simplifyFSubInst(
FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_fmul: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
return simplifyFMulInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
FPI->getFastMathFlags(), Q,
FPI->getExceptionBehavior().getValue(),
FPI->getRoundingMode().getValue());
return simplifyFMulInst(
FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_fdiv: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
return simplifyFDivInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
FPI->getFastMathFlags(), Q,
FPI->getExceptionBehavior().getValue(),
FPI->getRoundingMode().getValue());
return simplifyFDivInst(
FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
case Intrinsic::experimental_constrained_frem: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
return simplifyFRemInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
FPI->getFastMathFlags(), Q,
FPI->getExceptionBehavior().getValue(),
FPI->getRoundingMode().getValue());
return simplifyFRemInst(
FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value());
}
default:
return nullptr;

View File

@ -921,7 +921,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast(
if (!LHSRes)
// More work to do before applying this transfer rule.
return None;
const ConstantRange &LHSRange = LHSRes.getValue();
const ConstantRange &LHSRange = LHSRes.value();
const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
@ -946,8 +946,8 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
// More work to do before applying this transfer rule.
return None;
const ConstantRange &LHSRange = LHSRes.getValue();
const ConstantRange &RHSRange = RHSRes.getValue();
const ConstantRange &LHSRange = LHSRes.value();
const ConstantRange &RHSRange = RHSRes.value();
return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
}

View File

@ -501,10 +501,10 @@ Optional<StringRef> llvm::getAllocationFamily(const Value *I,
return None;
const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI);
if (AllocData)
return mangledNameForMallocFamily(AllocData.getValue().Family);
return mangledNameForMallocFamily(AllocData.value().Family);
const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn);
if (FreeData)
return mangledNameForMallocFamily(FreeData.getValue().Family);
return mangledNameForMallocFamily(FreeData.value().Family);
return None;
}

View File

@ -493,7 +493,7 @@ static V getOrCreateCachedOptional(K Key, DenseMap<K, Optional<V>> &Map,
Optional<V> &OptVal = Map[Key];
if (!OptVal)
OptVal = Fn(std::forward<ArgsTy>(args)...);
return OptVal.getValue();
return OptVal.value();
}
const BasicBlock *

View File

@ -279,19 +279,19 @@ ProfileSummaryInfo::computeThreshold(int PercentileCutoff) const {
}
bool ProfileSummaryInfo::hasHugeWorkingSetSize() const {
return HasHugeWorkingSetSize && HasHugeWorkingSetSize.getValue();
return HasHugeWorkingSetSize && HasHugeWorkingSetSize.value();
}
bool ProfileSummaryInfo::hasLargeWorkingSetSize() const {
return HasLargeWorkingSetSize && HasLargeWorkingSetSize.getValue();
return HasLargeWorkingSetSize && HasLargeWorkingSetSize.value();
}
bool ProfileSummaryInfo::isHotCount(uint64_t C) const {
return HotCountThreshold && C >= HotCountThreshold.getValue();
return HotCountThreshold && C >= HotCountThreshold.value();
}
bool ProfileSummaryInfo::isColdCount(uint64_t C) const {
return ColdCountThreshold && C <= ColdCountThreshold.getValue();
return ColdCountThreshold && C <= ColdCountThreshold.value();
}
template <bool isHot>
@ -299,9 +299,9 @@ bool ProfileSummaryInfo::isHotOrColdCountNthPercentile(int PercentileCutoff,
uint64_t C) const {
auto CountThreshold = computeThreshold(PercentileCutoff);
if (isHot)
return CountThreshold && C >= CountThreshold.getValue();
return CountThreshold && C >= CountThreshold.value();
else
return CountThreshold && C <= CountThreshold.getValue();
return CountThreshold && C <= CountThreshold.value();
}
bool ProfileSummaryInfo::isHotCountNthPercentile(int PercentileCutoff,

View File

@ -4838,7 +4838,7 @@ public:
Optional<const SCEV *> Res =
compareWithBackedgeCondition(SI->getCondition());
if (Res) {
bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
bool IsOne = cast<SCEVConstant>(Res.value())->getValue()->isOne();
Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
}
break;
@ -4846,7 +4846,7 @@ public:
default: {
Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
if (Res)
Result = Res.getValue();
Result = Res.value();
break;
}
}
@ -6586,8 +6586,8 @@ ScalarEvolution::getRangeRef(const SCEV *S,
// Check if the IR explicitly contains !range metadata.
Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
if (MDRange)
ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
RangeType);
ConservativeResult =
ConservativeResult.intersectWith(MDRange.value(), RangeType);
// Use facts about recurrences in the underlying IR. Note that add
// recurrences are AddRecExprs and thus don't hit this path. This
@ -10632,7 +10632,7 @@ ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
assert(ResultSwapped && "should be able to analyze both!");
assert(ResultSwapped.getValue() != Result.getValue() &&
assert(ResultSwapped.value() != Result.value() &&
"monotonicity should flip as we flip the predicate");
}
#endif

View File

@ -1502,7 +1502,7 @@ void VFABI::getVectorVariantNames(
LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
assert(Info && "Invalid name for a VFABI variant.");
assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
assert(CI.getModule()->getFunction(Info.value().VectorName) &&
"Vector function is missing.");
#endif
VariantMappings.push_back(std::string(S));

View File

@ -742,7 +742,7 @@ bool MIParser::parseBasicBlockDefinition(
MBB->setIsInlineAsmBrIndirectTarget(IsInlineAsmBrIndirectTarget);
MBB->setIsEHFuncletEntry(IsEHFuncletEntry);
if (SectionID) {
MBB->setSectionID(SectionID.getValue());
MBB->setSectionID(SectionID.value());
MF.setBBSectionsType(BasicBlockSection::List);
}
return false;

View File

@ -451,7 +451,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (IrrLoopHeaderWeight && IsStandalone) {
if (Indexes) OS << '\t';
OS.indent(2) << "; Irreducible loop header weight: "
<< IrrLoopHeaderWeight.getValue() << '\n';
<< IrrLoopHeaderWeight.value() << '\n';
}
}

View File

@ -106,8 +106,8 @@ bool MachineFunctionSplitter::runOnMachineFunction(MachineFunction &MF) {
// We don't want to proceed further for cold functions
// or functions of unknown hotness. Lukewarm functions have no prefix.
Optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
if (SectionPrefix && (SectionPrefix.getValue().equals("unlikely") ||
SectionPrefix.getValue().equals("unknown"))) {
if (SectionPrefix && (SectionPrefix.value().equals("unlikely") ||
SectionPrefix.value().equals("unknown"))) {
return false;
}

View File

@ -1448,7 +1448,7 @@ Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
const TargetRegisterClass *RC) {
// If the init register is not undef, try and find an existing phi.
if (InitReg) {
auto I = Phis.find({LoopReg, InitReg.getValue()});
auto I = Phis.find({LoopReg, InitReg.value()});
if (I != Phis.end())
return I->second;
} else {
@ -1469,10 +1469,10 @@ Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
return R;
// Found a phi taking undef as input, so rewrite it to take InitReg.
MachineInstr *MI = MRI.getVRegDef(R);
MI->getOperand(1).setReg(InitReg.getValue());
Phis.insert({{LoopReg, InitReg.getValue()}, R});
MI->getOperand(1).setReg(InitReg.value());
Phis.insert({{LoopReg, InitReg.value()}, R});
const TargetRegisterClass *ConstrainRegClass =
MRI.constrainRegClass(R, MRI.getRegClass(InitReg.getValue()));
MRI.constrainRegClass(R, MRI.getRegClass(InitReg.value()));
assert(ConstrainRegClass && "Expected a valid constrained register class!");
(void)ConstrainRegClass;
UndefPhis.erase(I);

View File

@ -870,8 +870,8 @@ bool SelectOptimize::computeLoopCosts(
ORE->emit(ORmissL);
return false;
}
IPredCost += Scaled64::get(ILatency.getValue());
INonPredCost += Scaled64::get(ILatency.getValue());
IPredCost += Scaled64::get(ILatency.value());
INonPredCost += Scaled64::get(ILatency.value());
// For a select that can be converted to branch,
// compute its cost as a branch (non-predicated cost).

View File

@ -703,7 +703,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
unsigned NumRegs;
if (IsABIRegCopy) {
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
*DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
*DAG.getContext(), CallConv.value(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
} else {
NumRegs =
@ -800,11 +800,11 @@ RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
for (EVT ValueVT : ValueVTs) {
unsigned NumRegs =
isABIMangled()
? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
? TLI.getNumRegistersForCallingConv(Context, CC.value(), ValueVT)
: TLI.getNumRegisters(Context, ValueVT);
MVT RegisterVT =
isABIMangled()
? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
? TLI.getRegisterTypeForCallingConv(Context, CC.value(), ValueVT)
: TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
@ -831,9 +831,9 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
// Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value];
unsigned NumRegs = RegCount[Value];
MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(),
CallConv.getValue(), RegVTs[Value])
MVT RegisterVT =
isABIMangled() ? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(), CallConv.value(), RegVTs[Value])
: RegVTs[Value];
Parts.resize(NumRegs);
@ -914,9 +914,9 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumParts = RegCount[Value];
MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(),
CallConv.getValue(), RegVTs[Value])
MVT RegisterVT =
isABIMangled() ? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(), CallConv.value(), RegVTs[Value])
: RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
@ -8791,7 +8791,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
if (RegError) {
const MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const char *RegName = TRI.getName(RegError.getValue());
const char *RegName = TRI.getName(RegError.value());
emitInlineAsmError(Call, "register '" + Twine(RegName) +
"' allocated for constraint '" +
Twine(OpInfo.ConstraintCode) +

View File

@ -531,14 +531,14 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
for (const Value *V : SI.Bases) {
auto Opt = S.isGCManagedPointer(V->getType()->getScalarType());
if (Opt) {
assert(Opt.getValue() &&
assert(Opt.value() &&
"non gc managed base pointer found in statepoint");
}
}
for (const Value *V : SI.Ptrs) {
auto Opt = S.isGCManagedPointer(V->getType()->getScalarType());
if (Opt) {
assert(Opt.getValue() &&
assert(Opt.value() &&
"non gc managed derived pointer found in statepoint");
}
}

View File

@ -1995,9 +1995,9 @@ bool TargetLowering::SimplifyDemandedBits(
KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
Known = KnownBits::umin(Known0, Known1);
if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1))
return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1);
return TLO.CombineTo(Op, IsULE.value() ? Op0 : Op1);
if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1))
return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1);
return TLO.CombineTo(Op, IsULT.value() ? Op0 : Op1);
break;
}
case ISD::UMAX: {
@ -2008,9 +2008,9 @@ bool TargetLowering::SimplifyDemandedBits(
KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
Known = KnownBits::umax(Known0, Known1);
if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1))
return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1);
return TLO.CombineTo(Op, IsUGE.value() ? Op0 : Op1);
if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1))
return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1);
return TLO.CombineTo(Op, IsUGT.value() ? Op0 : Op1);
break;
}
case ISD::BITREVERSE: {

View File

@ -1205,13 +1205,13 @@ void DWARFContext::addLocalsForDie(DWARFCompileUnit *CU, DWARFDie Subprogram,
if (auto DeclFileAttr = Die.find(DW_AT_decl_file)) {
if (const auto *LT = CU->getContext().getLineTableForUnit(CU))
LT->getFileNameByIndex(
DeclFileAttr->getAsUnsignedConstant().getValue(),
DeclFileAttr->getAsUnsignedConstant().value(),
CU->getCompilationDir(),
DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath,
Local.DeclFile);
}
if (auto DeclLineAttr = Die.find(DW_AT_decl_line))
Local.DeclLine = DeclLineAttr->getAsUnsignedConstant().getValue();
Local.DeclLine = DeclLineAttr->getAsUnsignedConstant().value();
Result.push_back(Local);
return;

View File

@ -327,20 +327,20 @@ parseV5DirFileTables(const DWARFDataExtractor &DebugLineData,
FileEntry.Source = Value;
break;
case DW_LNCT_directory_index:
FileEntry.DirIdx = Value.getAsUnsignedConstant().getValue();
FileEntry.DirIdx = Value.getAsUnsignedConstant().value();
break;
case DW_LNCT_timestamp:
FileEntry.ModTime = Value.getAsUnsignedConstant().getValue();
FileEntry.ModTime = Value.getAsUnsignedConstant().value();
break;
case DW_LNCT_size:
FileEntry.Length = Value.getAsUnsignedConstant().getValue();
FileEntry.Length = Value.getAsUnsignedConstant().value();
break;
case DW_LNCT_MD5:
if (!Value.getAsBlock() || Value.getAsBlock().getValue().size() != 16)
if (!Value.getAsBlock() || Value.getAsBlock().value().size() != 16)
return createStringError(
errc::invalid_argument,
"failed to parse file entry because the MD5 hash is invalid");
std::uninitialized_copy_n(Value.getAsBlock().getValue().begin(), 16,
std::uninitialized_copy_n(Value.getAsBlock().value().begin(), 16,
FileEntry.Checksum.begin());
break;
default:

View File

@ -373,7 +373,7 @@ Error DebuginfodCollection::findBinaries(StringRef Path) {
if (!ID)
continue;
std::string IDString = buildIDToString(ID.getValue());
std::string IDString = buildIDToString(ID.value());
if (isDebugBinary(Object)) {
std::lock_guard<sys::RWMutex> DebugBinariesGuard(DebugBinariesMutex);
DebugBinaries[IDString] = FilePath;
@ -435,7 +435,7 @@ Expected<std::string> DebuginfodCollection::findBinaryPath(BuildIDRef ID) {
}
}
if (Path)
return Path.getValue();
return Path.value();
}
// Try federation.
@ -466,7 +466,7 @@ Expected<std::string> DebuginfodCollection::findDebugBinaryPath(BuildIDRef ID) {
}
}
if (Path)
return Path.getValue();
return Path.value();
// Try federation.
return getCachedOrDownloadDebuginfo(ID);

View File

@ -214,7 +214,7 @@ static int isVariantApplicableInContextHelper(
Optional<bool> Result = HandleTrait(Property, IsActiveTrait);
if (Result)
return Result.getValue();
return Result.value();
}
if (!DeviceSetOnly) {
@ -235,7 +235,7 @@ static int isVariantApplicableInContextHelper(
Optional<bool> Result = HandleTrait(Property, FoundInOrder);
if (Result)
return Result.getValue();
return Result.value();
if (!FoundInOrder) {
LLVM_DEBUG(dbgs() << "[" << DEBUG_TYPE << "] Construct property "

View File

@ -4427,10 +4427,9 @@ MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
assert(SI.getNumSuccessors() == Weights->size() &&
"num of prof branch_weights must accord with num of successors");
bool AllZeroes =
all_of(Weights.getValue(), [](uint32_t W) { return W == 0; });
bool AllZeroes = all_of(Weights.value(), [](uint32_t W) { return W == 0; });
if (AllZeroes || Weights.getValue().size() < 2)
if (AllZeroes || Weights.value().size() < 2)
return nullptr;
return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
@ -4464,8 +4463,8 @@ SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
// Copy the last case to the place of the removed one and shrink.
// This is tightly coupled with the way SwitchInst::removeCase() removes
// the cases in SwitchInst::removeCase(CaseIt).
Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back();
Weights.getValue().pop_back();
Weights.value()[I->getCaseIndex() + 1] = Weights.value().back();
Weights.value().pop_back();
}
return SI.removeCase(I);
}
@ -4478,10 +4477,10 @@ void SwitchInstProfUpdateWrapper::addCase(
if (!Weights && W && *W) {
Changed = true;
Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
Weights.getValue()[SI.getNumSuccessors() - 1] = *W;
Weights.value()[SI.getNumSuccessors() - 1] = *W;
} else if (Weights) {
Changed = true;
Weights.getValue().push_back(W.value_or(0));
Weights.value().push_back(W.value_or(0));
}
if (Weights)
assert(SI.getNumSuccessors() == Weights->size() &&

View File

@ -223,13 +223,13 @@ ConstrainedFPIntrinsic::getExceptionBehavior() const {
bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
Optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
if (Except) {
if (Except.getValue() != fp::ebIgnore)
if (Except.value() != fp::ebIgnore)
return false;
}
Optional<RoundingMode> Rounding = getRoundingMode();
if (Rounding) {
if (Rounding.getValue() != RoundingMode::NearestTiesToEven)
if (Rounding.value() != RoundingMode::NearestTiesToEven)
return false;
}
@ -364,13 +364,13 @@ VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
MaybeAlign VPIntrinsic::getPointerAlignment() const {
Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID());
assert(PtrParamOpt && "no pointer argument!");
return getParamAlign(PtrParamOpt.getValue());
return getParamAlign(PtrParamOpt.value());
}
/// \return The pointer operand of this load,store, gather or scatter.
Value *VPIntrinsic::getMemoryPointerParam() const {
if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()))
return getArgOperand(PtrParamOpt.getValue());
return getArgOperand(PtrParamOpt.value());
return nullptr;
}
@ -391,7 +391,7 @@ Value *VPIntrinsic::getMemoryDataParam() const {
auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID());
if (!DataParamOpt)
return nullptr;
return getArgOperand(DataParamOpt.getValue());
return getArgOperand(DataParamOpt.value());
}
Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) {

View File

@ -259,7 +259,7 @@ bool LLVMContextImpl::getOpaquePointers() {
}
void LLVMContextImpl::setOpaquePointers(bool OP) {
assert((!OpaquePointers || OpaquePointers.getValue() == OP) &&
assert((!OpaquePointers || OpaquePointers.value() == OP) &&
"Cannot change opaque pointers mode once set");
OpaquePointers = OP;
}

View File

@ -202,8 +202,8 @@ Error ifs::writeIFSToOutputStream(raw_ostream &OS, const IFSStub &Stub) {
yaml::Output YamlOut(OS, nullptr, /*WrapColumn =*/0);
std::unique_ptr<IFSStubTriple> CopyStub(new IFSStubTriple(Stub));
if (Stub.Target.Arch) {
CopyStub->Target.ArchString = std::string(
ELF::convertEMachineToArchName(Stub.Target.Arch.getValue()));
CopyStub->Target.ArchString =
std::string(ELF::convertEMachineToArchName(Stub.Target.Arch.value()));
}
IFSTarget Target = Stub.Target;
@ -222,36 +222,35 @@ Error ifs::overrideIFSTarget(IFSStub &Stub, Optional<IFSArch> OverrideArch,
Optional<std::string> OverrideTriple) {
std::error_code OverrideEC(1, std::generic_category());
if (OverrideArch) {
if (Stub.Target.Arch &&
Stub.Target.Arch.getValue() != OverrideArch.getValue()) {
if (Stub.Target.Arch && Stub.Target.Arch.value() != OverrideArch.value()) {
return make_error<StringError>(
"Supplied Arch conflicts with the text stub", OverrideEC);
}
Stub.Target.Arch = OverrideArch.getValue();
Stub.Target.Arch = OverrideArch.value();
}
if (OverrideEndianness) {
if (Stub.Target.Endianness &&
Stub.Target.Endianness.getValue() != OverrideEndianness.getValue()) {
Stub.Target.Endianness.value() != OverrideEndianness.value()) {
return make_error<StringError>(
"Supplied Endianness conflicts with the text stub", OverrideEC);
}
Stub.Target.Endianness = OverrideEndianness.getValue();
Stub.Target.Endianness = OverrideEndianness.value();
}
if (OverrideBitWidth) {
if (Stub.Target.BitWidth &&
Stub.Target.BitWidth.getValue() != OverrideBitWidth.getValue()) {
Stub.Target.BitWidth.value() != OverrideBitWidth.value()) {
return make_error<StringError>(
"Supplied BitWidth conflicts with the text stub", OverrideEC);
}
Stub.Target.BitWidth = OverrideBitWidth.getValue();
Stub.Target.BitWidth = OverrideBitWidth.value();
}
if (OverrideTriple) {
if (Stub.Target.Triple &&
Stub.Target.Triple.getValue() != OverrideTriple.getValue()) {
Stub.Target.Triple.value() != OverrideTriple.value()) {
return make_error<StringError>(
"Supplied Triple conflicts with the text stub", OverrideEC);
}
Stub.Target.Triple = OverrideTriple.getValue();
Stub.Target.Triple = OverrideTriple.value();
}
return Error::success();
}

View File

@ -773,7 +773,7 @@ MCSectionXCOFF *MCContext::getXCOFFSection(
// Do the lookup. If we have a hit, return it.
auto IterBool = XCOFFUniquingMap.insert(std::make_pair(
IsDwarfSec
? XCOFFSectionKey(Section.str(), DwarfSectionSubtypeFlags.getValue())
? XCOFFSectionKey(Section.str(), DwarfSectionSubtypeFlags.value())
: XCOFFSectionKey(Section.str(), CsectProp->MappingClass),
nullptr));
auto &Entry = *IterBool.first;
@ -806,7 +806,7 @@ MCSectionXCOFF *MCContext::getXCOFFSection(
if (IsDwarfSec)
Result = new (XCOFFAllocator.Allocate())
MCSectionXCOFF(QualName->getUnqualifiedName(), Kind, QualName,
DwarfSectionSubtypeFlags.getValue(), Begin, CachedName,
DwarfSectionSubtypeFlags.value(), Begin, CachedName,
MultiSymbolsAllowed);
else
Result = new (XCOFFAllocator.Allocate())

View File

@ -88,8 +88,8 @@ bool XCOFFSymbolInfo::operator<(const XCOFFSymbolInfo &SymInfo) const {
return SymInfo.StorageMappingClass.has_value();
if (StorageMappingClass) {
return getSMCPriority(StorageMappingClass.getValue()) <
getSMCPriority(SymInfo.StorageMappingClass.getValue());
return getSMCPriority(StorageMappingClass.value()) <
getSMCPriority(SymInfo.StorageMappingClass.value());
}
return false;

View File

@ -4213,7 +4213,7 @@ bool MasmParser::parseStructInitializer(const StructInfo &Structure,
size_t FieldIndex = 0;
if (EndToken) {
// Initialize all fields with given initializers.
while (getTok().isNot(EndToken.getValue()) &&
while (getTok().isNot(EndToken.value()) &&
FieldIndex < Structure.Fields.size()) {
const FieldInfo &Field = Structure.Fields[FieldIndex++];
if (parseOptionalToken(AsmToken::Comma)) {
@ -4245,10 +4245,10 @@ bool MasmParser::parseStructInitializer(const StructInfo &Structure,
}
if (EndToken) {
if (EndToken.getValue() == AsmToken::Greater)
if (EndToken.value() == AsmToken::Greater)
return parseAngleBracketClose();
return parseToken(EndToken.getValue());
return parseToken(EndToken.value());
}
return false;

View File

@ -96,10 +96,10 @@ MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI,
continue;
unsigned NumUnits = SM.getProcResource(I->ProcResourceIdx)->NumUnits;
double Temp = NumUnits * 1.0 / I->Cycles;
Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp;
Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp;
}
if (Throughput)
return 1.0 / Throughput.getValue();
return 1.0 / Throughput.value();
// If no throughput value was calculated, assume that we can execute at the
// maximum issue width scaled by number of micro-ops for the schedule class.
@ -140,10 +140,10 @@ MCSchedModel::getReciprocalThroughput(unsigned SchedClass,
if (!I->getCycles())
continue;
double Temp = countPopulation(I->getUnits()) * 1.0 / I->getCycles();
Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp;
Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp;
}
if (Throughput)
return 1.0 / Throughput.getValue();
return 1.0 / Throughput.value();
// If there are no execution resources specified for this class, then assume
// that it can execute at the maximum default issue width.

View File

@ -110,8 +110,8 @@ void MCSectionXCOFF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
// XCOFF debug sections.
if (getKind().isMetadata() && isDwarfSect()) {
OS << "\n\t.dwsect "
<< format("0x%" PRIx32, getDwarfSubtypeFlags().getValue()) << '\n';
OS << "\n\t.dwsect " << format("0x%" PRIx32, getDwarfSubtypeFlags().value())
<< '\n';
OS << MAI.getPrivateLabelPrefix() << getName() << ':' << '\n';
return;
}

View File

@ -600,8 +600,8 @@ handleUserSection(const NewSectionInfo &NewSection,
static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig,
Object &Obj) {
if (Config.OutputArch) {
Obj.Machine = Config.OutputArch.getValue().EMachine;
Obj.OSABI = Config.OutputArch.getValue().OSABI;
Obj.Machine = Config.OutputArch.value().EMachine;
Obj.OSABI = Config.OutputArch.value().OSABI;
}
if (!Config.SplitDWO.empty() && Config.ExtractDWO) {
@ -699,7 +699,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig,
const SectionRename &SR = Iter->second;
Sec.Name = std::string(SR.NewName);
if (SR.NewFlags)
setSectionFlagsAndType(Sec, SR.NewFlags.getValue());
setSectionFlagsAndType(Sec, SR.NewFlags.value());
RenamedSections.insert(&Sec);
} else if (RelocSec && !(Sec.Flags & SHF_ALLOC))
// Postpone processing relocation sections which are not specified in
@ -811,7 +811,7 @@ Error objcopy::elf::executeObjcopyOnBinary(const CommonConfig &Config,
return Obj.takeError();
// Prefer OutputArch (-O<format>) if set, otherwise infer it from the input.
const ElfType OutputElfType =
Config.OutputArch ? getOutputElfType(Config.OutputArch.getValue())
Config.OutputArch ? getOutputElfType(Config.OutputArch.value())
: getOutputElfType(In);
if (Error E = handleArgs(Config, ELFConfig, **Obj))

View File

@ -168,11 +168,11 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
Optional<unsigned> Attr =
Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch);
if (Attr)
isV7 = Attr.getValue() == ARMBuildAttrs::v7;
isV7 = Attr.value() == ARMBuildAttrs::v7;
Attr = Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch_profile);
if (Attr) {
switch (Attr.getValue()) {
switch (Attr.value()) {
case ARMBuildAttrs::ApplicationProfile:
Features.AddFeature("aclass");
break;
@ -191,7 +191,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
Attr = Attributes.getAttributeValue(ARMBuildAttrs::THUMB_ISA_use);
if (Attr) {
switch (Attr.getValue()) {
switch (Attr.value()) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@ -206,7 +206,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
Attr = Attributes.getAttributeValue(ARMBuildAttrs::FP_arch);
if (Attr) {
switch (Attr.getValue()) {
switch (Attr.value()) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@ -230,7 +230,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
Attr = Attributes.getAttributeValue(ARMBuildAttrs::Advanced_SIMD_arch);
if (Attr) {
switch (Attr.getValue()) {
switch (Attr.value()) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@ -249,7 +249,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
Attr = Attributes.getAttributeValue(ARMBuildAttrs::MVE_arch);
if (Attr) {
switch (Attr.getValue()) {
switch (Attr.value()) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@ -268,7 +268,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
Attr = Attributes.getAttributeValue(ARMBuildAttrs::DIV_use);
if (Attr) {
switch (Attr.getValue()) {
switch (Attr.value()) {
default:
break;
case ARMBuildAttrs::DisallowDIV:
@ -524,7 +524,7 @@ void ELFObjectFileBase::setARMSubArch(Triple &TheTriple) const {
Optional<unsigned> Attr =
Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch);
if (Attr) {
switch (Attr.getValue()) {
switch (Attr.value()) {
case ARMBuildAttrs::v4:
Triple += "v4";
break;
@ -556,7 +556,7 @@ void ELFObjectFileBase::setARMSubArch(Triple &TheTriple) const {
Optional<unsigned> ArchProfileAttr =
Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch_profile);
if (ArchProfileAttr &&
ArchProfileAttr.getValue() == ARMBuildAttrs::MicroControllerProfile)
ArchProfileAttr.value() == ARMBuildAttrs::MicroControllerProfile)
Triple += "v7m";
else
Triple += "v7";

View File

@ -133,17 +133,17 @@ void DXContainerWriter::writeParts(raw_ostream &OS) {
// Compute the optional fields if needed...
if (P.Program->DXILOffset)
Header.Bitcode.Offset = P.Program->DXILOffset.getValue();
Header.Bitcode.Offset = P.Program->DXILOffset.value();
else
Header.Bitcode.Offset = sizeof(dxbc::BitcodeHeader);
if (P.Program->DXILSize)
Header.Bitcode.Size = P.Program->DXILSize.getValue();
Header.Bitcode.Size = P.Program->DXILSize.value();
else
Header.Bitcode.Size = P.Program->DXIL ? P.Program->DXIL->size() : 0;
if (P.Program->Size)
Header.Size = P.Program->Size.getValue();
Header.Size = P.Program->Size.value();
else
Header.Size = sizeof(dxbc::ProgramHeader) + Header.Bitcode.Size;

View File

@ -47,7 +47,7 @@ Optional<std::string> Process::FindInEnvPath(StringRef EnvName,
const char EnvPathSeparatorStr[] = {Separator, '\0'};
SmallVector<StringRef, 8> Dirs;
SplitString(OptPath.getValue(), Dirs, EnvPathSeparatorStr);
SplitString(OptPath.value(), Dirs, EnvPathSeparatorStr);
for (StringRef Dir : Dirs) {
if (Dir.empty())

View File

@ -2669,13 +2669,13 @@ void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
" 'version': 0,\n";
if (IsCaseSensitive)
OS << " 'case-sensitive': '"
<< (IsCaseSensitive.getValue() ? "true" : "false") << "',\n";
<< (IsCaseSensitive.value() ? "true" : "false") << "',\n";
if (UseExternalNames)
OS << " 'use-external-names': '"
<< (UseExternalNames.getValue() ? "true" : "false") << "',\n";
<< (UseExternalNames.value() ? "true" : "false") << "',\n";
bool UseOverlayRelative = false;
if (IsOverlayRelative) {
UseOverlayRelative = IsOverlayRelative.getValue();
UseOverlayRelative = IsOverlayRelative.value();
OS << " 'overlay-relative': '" << (UseOverlayRelative ? "true" : "false")
<< "',\n";
}

View File

@ -429,7 +429,7 @@ raw_ostream &raw_ostream::operator<<(const FormattedBytes &FB) {
indent(FB.IndentLevel);
if (FB.FirstByteOffset) {
uint64_t Offset = FB.FirstByteOffset.getValue();
uint64_t Offset = FB.FirstByteOffset.value();
llvm::write_hex(*this, Offset + LineIndex, HPS, OffsetWidth);
*this << ": ";
}

View File

@ -2601,7 +2601,7 @@ StringRef Record::getValueAsString(StringRef FieldName) const {
if (!S)
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
return S.getValue();
return S.value();
}
llvm::Optional<StringRef>

View File

@ -1180,7 +1180,7 @@ bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
if (Arg) {
const int64_t Value = Arg.getValue().Value.getSExtValue();
const int64_t Value = Arg.value().Value.getSExtValue();
if (Value == 0) {
unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
@ -4240,7 +4240,7 @@ AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
},
[=](MachineInstrBuilder &MIB) { // vaddr
if (FI)
MIB.addFrameIndex(FI.getValue());
MIB.addFrameIndex(FI.value());
else
MIB.addReg(VAddr);
},

View File

@ -131,7 +131,7 @@ public:
bool IsAOneAddressSpace = isOneAddressSpace(A);
bool IsBOneAddressSpace = isOneAddressSpace(B);
return AIO.getValue() >= BIO.getValue() &&
return AIO.value() >= BIO.value() &&
(IsAOneAddressSpace == IsBOneAddressSpace || !IsAOneAddressSpace);
}
};

View File

@ -2329,13 +2329,13 @@ bool SIMemoryLegalizer::runOnMachineFunction(MachineFunction &MF) {
continue;
if (const auto &MOI = MOA.getLoadInfo(MI))
Changed |= expandLoad(MOI.getValue(), MI);
Changed |= expandLoad(MOI.value(), MI);
else if (const auto &MOI = MOA.getStoreInfo(MI))
Changed |= expandStore(MOI.getValue(), MI);
Changed |= expandStore(MOI.value(), MI);
else if (const auto &MOI = MOA.getAtomicFenceInfo(MI))
Changed |= expandAtomicFence(MOI.getValue(), MI);
Changed |= expandAtomicFence(MOI.value(), MI);
else if (const auto &MOI = MOA.getAtomicCmpxchgOrRmwInfo(MI))
Changed |= expandAtomicCmpxchgOrRmw(MOI.getValue(), MI);
Changed |= expandAtomicCmpxchgOrRmw(MOI.value(), MI);
}
}

View File

@ -351,13 +351,13 @@ Optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) {
if (!Op0 || !Op1)
return Optional<int64_t>{};
if (I->getOpcode() == Instruction::Add)
return Optional<int64_t>{Op0.getValue() + Op1.getValue()};
return Optional<int64_t>{Op0.value() + Op1.value()};
if (I->getOpcode() == Instruction::Mul)
return Optional<int64_t>{Op0.getValue() * Op1.getValue()};
return Optional<int64_t>{Op0.value() * Op1.value()};
if (I->getOpcode() == Instruction::Shl)
return Optional<int64_t>{Op0.getValue() << Op1.getValue()};
return Optional<int64_t>{Op0.value() << Op1.value()};
if (I->getOpcode() == Instruction::Or)
return Optional<int64_t>{Op0.getValue() | Op1.getValue()};
return Optional<int64_t>{Op0.value() | Op1.value()};
}
return Optional<int64_t>{};
}

View File

@ -1024,7 +1024,7 @@ void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const {
for (auto &B : MF) {
auto At = findCFILocation(B);
if (At)
insertCFIInstructionsAt(B, At.getValue());
insertCFIInstructionsAt(B, At.value());
}
}

View File

@ -705,14 +705,14 @@ LanaiAsmParser::parseRegister(bool RestoreOnFailure) {
RegNum = MatchRegisterName(Lexer.getTok().getIdentifier());
if (RegNum == 0) {
if (PercentTok && RestoreOnFailure)
Lexer.UnLex(PercentTok.getValue());
Lexer.UnLex(PercentTok.value());
return nullptr;
}
Parser.Lex(); // Eat identifier token
return LanaiOperand::createReg(RegNum, Start, End);
}
if (PercentTok && RestoreOnFailure)
Lexer.UnLex(PercentTok.getValue());
Lexer.UnLex(PercentTok.value());
return nullptr;
}

View File

@ -1861,7 +1861,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InFlag = Ret.getValue(2);
if (ProxyRegTruncates[i]) {
Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].getValue(), Ret);
Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].value(), Ret);
}
InVals.push_back(Ret);

View File

@ -41,7 +41,7 @@ SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const {
auto VVPOpcodeOpt = getVVPOpcode(Opcode);
if (!VVPOpcodeOpt)
return SDValue();
unsigned VVPOpcode = VVPOpcodeOpt.getValue();
unsigned VVPOpcode = VVPOpcodeOpt.value();
const bool FromVP = ISD::isVPOpcode(Opcode);
// The representative and legalized vector type of this operation.

View File

@ -87,15 +87,14 @@ bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc,
if (Stack.empty()) {
return typeError(ErrorLoc,
EVT ? StringRef("empty stack while popping ") +
WebAssembly::typeToString(EVT.getValue())
WebAssembly::typeToString(EVT.value())
: StringRef("empty stack while popping value"));
}
auto PVT = Stack.pop_back_val();
if (EVT && EVT.getValue() != PVT) {
if (EVT && EVT.value() != PVT) {
return typeError(
ErrorLoc, StringRef("popped ") + WebAssembly::typeToString(PVT) +
", expected " +
WebAssembly::typeToString(EVT.getValue()));
", expected " + WebAssembly::typeToString(EVT.value()));
}
return false;
}

View File

@ -553,7 +553,7 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) {
std::tie(SizeArg, NEltArg) = FnAttrs.getAllocSizeArgs();
SizeArg += 1;
if (NEltArg)
NEltArg = NEltArg.getValue() + 1;
NEltArg = NEltArg.value() + 1;
FnAttrs.addAllocSizeAttr(SizeArg, NEltArg);
}
// In case the callee has 'noreturn' attribute, We need to remove it, because

View File

@ -718,8 +718,8 @@ Argument *IRPosition::getAssociatedArgument() const {
}
// If we found a unique callback candidate argument, return it.
if (CBCandidateArg && CBCandidateArg.getValue())
return CBCandidateArg.getValue();
if (CBCandidateArg && CBCandidateArg.value())
return CBCandidateArg.value();
// If no callbacks were found, or none used the underlying call site operand
// exclusively, use the direct callee argument if available.
@ -1048,11 +1048,11 @@ Attributor::getAssumedConstant(const IRPosition &IRP,
recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
return llvm::None;
}
if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) {
if (isa_and_nonnull<UndefValue>(SimplifiedV.value())) {
recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
return UndefValue::get(IRP.getAssociatedType());
}
Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.getValue());
Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.value());
if (CI)
CI = dyn_cast_or_null<Constant>(
AA::getWithType(*CI, *IRP.getAssociatedType()));
@ -2697,8 +2697,8 @@ void InformationCache::initializeInformationCache(const Function &CF,
Optional<short> &NumUses = AssumeUsesMap[I];
if (!NumUses)
NumUses = I->getNumUses();
NumUses = NumUses.getValue() - /* this assume */ 1;
if (NumUses.getValue() != 0)
NumUses = NumUses.value() - /* this assume */ 1;
if (NumUses.value() != 0)
continue;
AssumeOnlyValues.insert(I);
for (const Value *Op : I->operands())

View File

@ -437,7 +437,7 @@ static bool genericValueTraversal(
A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
if (!SimpleV)
continue;
Value *NewV = SimpleV.getValue();
Value *NewV = SimpleV.value();
if (NewV && NewV != V) {
if ((VS & AA::Interprocedural) || !CtxI ||
AA::isValidInScope(*NewV, CtxI->getFunction())) {
@ -1891,14 +1891,14 @@ ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
// Check if we have an assumed unique return value that we could manifest.
Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
if (!UniqueRV || !UniqueRV.getValue())
if (!UniqueRV || !UniqueRV.value())
return Changed;
// Bookkeeping.
STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
"Number of function with unique return");
// If the assumed unique return value is an argument, annotate it.
if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) {
if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
getAssociatedFunction()->getReturnType())) {
getIRPosition() = IRPosition::argument(*UniqueRVArg);
@ -2666,9 +2666,9 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
// Either we stopped and the appropriate action was taken,
// or we got back a simplified value to continue.
Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
if (!SimplifiedPtrOp || !SimplifiedPtrOp.getValue())
if (!SimplifiedPtrOp || !SimplifiedPtrOp.value())
return true;
const Value *PtrOpVal = SimplifiedPtrOp.getValue();
const Value *PtrOpVal = SimplifiedPtrOp.value();
// A memory access through a pointer is considered UB
// only if the pointer has constant null value.
@ -2757,14 +2757,14 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
if (UsedAssumedInformation)
continue;
if (SimplifiedVal && !SimplifiedVal.getValue())
if (SimplifiedVal && !SimplifiedVal.value())
return true;
if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.getValue())) {
if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) {
KnownUBInsts.insert(&I);
continue;
}
if (!ArgVal->getType()->isPointerTy() ||
!isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
!isa<ConstantPointerNull>(*SimplifiedVal.value()))
continue;
auto &NonNullAA =
A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
@ -4101,11 +4101,11 @@ identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
bool UsedAssumedInformation = false;
Optional<Constant *> C =
A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
if (!C || isa_and_nonnull<UndefValue>(C.getValue())) {
if (!C || isa_and_nonnull<UndefValue>(C.value())) {
// No value yet, assume all edges are dead.
} else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
} else if (isa_and_nonnull<ConstantInt>(C.value())) {
for (auto &CaseIt : SI.cases()) {
if (CaseIt.getCaseValue() == C.getValue()) {
if (CaseIt.getCaseValue() == C.value()) {
AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
return UsedAssumedInformation;
}
@ -5523,8 +5523,8 @@ struct AAValueSimplifyImpl : AAValueSimplify {
if (!SimpleV)
return PoisonValue::get(&Ty);
Value *EffectiveV = &V;
if (SimpleV.getValue())
EffectiveV = SimpleV.getValue();
if (SimpleV.value())
EffectiveV = SimpleV.value();
if (auto *C = dyn_cast<Constant>(EffectiveV))
return C;
if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
@ -5540,7 +5540,7 @@ struct AAValueSimplifyImpl : AAValueSimplify {
/// nullptr if we don't have one that makes sense.
Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
Value *NewV = SimplifiedAssociatedValue
? SimplifiedAssociatedValue.getValue()
? SimplifiedAssociatedValue.value()
: UndefValue::get(getAssociatedType());
if (NewV && NewV != &getAssociatedValue()) {
ValueToValueMapTy VMap;
@ -5671,7 +5671,7 @@ struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
if (!SimpleArgOp)
return true;
if (!SimpleArgOp.getValue())
if (!SimpleArgOp.value())
return false;
if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
return false;
@ -5786,7 +5786,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
*this, UsedAssumedInformation);
if (!SimplifiedLHS)
return true;
if (!SimplifiedLHS.getValue())
if (!SimplifiedLHS.value())
return false;
LHS = *SimplifiedLHS;
@ -5795,7 +5795,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
*this, UsedAssumedInformation);
if (!SimplifiedRHS)
return true;
if (!SimplifiedRHS.getValue())
if (!SimplifiedRHS.value())
return false;
RHS = *SimplifiedRHS;
@ -5867,8 +5867,8 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
if (!SimplifiedOp)
return true;
if (SimplifiedOp.getValue())
NewOps[Idx] = SimplifiedOp.getValue();
if (SimplifiedOp.value())
NewOps[Idx] = SimplifiedOp.value();
else
NewOps[Idx] = Op;
@ -6294,10 +6294,10 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
Alignment = std::max(Alignment, *RetAlign);
if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
assert(AlignmentAPI && AlignmentAPI.getValue().getZExtValue() > 0 &&
assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 &&
"Expected an alignment during manifest!");
Alignment = std::max(
Alignment, assumeAligned(AlignmentAPI.getValue().getZExtValue()));
Alignment, assumeAligned(AlignmentAPI.value().getZExtValue()));
}
// TODO: Hoist the alloca towards the function entry.
@ -6346,7 +6346,7 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
A.getAssumedConstant(V, AA, UsedAssumedInformation);
if (!SimpleV)
return APInt(64, 0);
if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value()))
return CI->getValue();
return llvm::None;
}
@ -6637,7 +6637,7 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
Optional<APInt> Size = getSize(A, *this, AI);
if (MaxHeapToStackSize != -1) {
if (!Size || Size.getValue().ugt(MaxHeapToStackSize)) {
if (!Size || Size.value().ugt(MaxHeapToStackSize)) {
LLVM_DEBUG({
if (!Size)
dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
@ -6759,8 +6759,8 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
LLVM_DEBUG({
dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
if (CSTy && CSTy.getValue())
CSTy.getValue()->print(dbgs());
if (CSTy && CSTy.value())
CSTy.value()->print(dbgs());
else if (CSTy)
dbgs() << "<nullptr>";
else
@ -6771,8 +6771,8 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
LLVM_DEBUG({
dbgs() << " : New Type: ";
if (Ty && Ty.getValue())
Ty.getValue()->print(dbgs());
if (Ty && Ty.value())
Ty.value()->print(dbgs());
else if (Ty)
dbgs() << "<nullptr>";
else
@ -6780,7 +6780,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
dbgs() << "\n";
});
return !Ty || Ty.getValue();
return !Ty || Ty.value();
};
if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
@ -6794,7 +6794,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
PrivatizableType = identifyPrivatizableType(A);
if (!PrivatizableType)
return ChangeStatus::UNCHANGED;
if (!PrivatizableType.getValue())
if (!PrivatizableType.value())
return indicatePessimisticFixpoint();
// The dependence is optional so we don't give up once we give up on the
@ -6882,7 +6882,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
if (!CBArgPrivTy)
continue;
if (CBArgPrivTy.getValue() == PrivatizableType)
if (CBArgPrivTy.value() == PrivatizableType)
continue;
}
@ -6929,7 +6929,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
if (!DCArgPrivTy)
return true;
if (DCArgPrivTy.getValue() == PrivatizableType)
if (DCArgPrivTy.value() == PrivatizableType)
return true;
}
}
@ -7071,7 +7071,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
ChangeStatus manifest(Attributor &A) override {
if (!PrivatizableType)
return ChangeStatus::UNCHANGED;
assert(PrivatizableType.getValue() && "Expected privatizable type!");
assert(PrivatizableType.value() && "Expected privatizable type!");
// Collect all tail calls in the function as we cannot allow new allocas to
// escape into tail recursion.
@ -7104,9 +7104,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
Instruction *IP = &*EntryBB.getFirstInsertionPt();
const DataLayout &DL = IP->getModule()->getDataLayout();
unsigned AS = DL.getAllocaAddrSpace();
Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
Instruction *AI = new AllocaInst(PrivatizableType.value(), AS,
Arg->getName() + ".priv", IP);
createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
createInitialization(PrivatizableType.value(), *AI, ReplacementFn,
ArgIt->getArgNo(), *IP);
if (AI->getType() != Arg->getType())
@ -7214,7 +7214,7 @@ struct AAPrivatizablePtrCallSiteArgument final
PrivatizableType = identifyPrivatizableType(A);
if (!PrivatizableType)
return ChangeStatus::UNCHANGED;
if (!PrivatizableType.getValue())
if (!PrivatizableType.value())
return indicatePessimisticFixpoint();
const IRPosition &IRP = getIRPosition();
@ -8675,7 +8675,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
*this, UsedAssumedInformation);
if (!SimplifiedLHS)
return true;
if (!SimplifiedLHS.getValue())
if (!SimplifiedLHS.value())
return false;
LHS = *SimplifiedLHS;
@ -8684,7 +8684,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
*this, UsedAssumedInformation);
if (!SimplifiedRHS)
return true;
if (!SimplifiedRHS.getValue())
if (!SimplifiedRHS.value())
return false;
RHS = *SimplifiedRHS;
@ -8728,7 +8728,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
*this, UsedAssumedInformation);
if (!SimplifiedOpV)
return true;
if (!SimplifiedOpV.getValue())
if (!SimplifiedOpV.value())
return false;
OpV = *SimplifiedOpV;
@ -8758,7 +8758,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
*this, UsedAssumedInformation);
if (!SimplifiedLHS)
return true;
if (!SimplifiedLHS.getValue())
if (!SimplifiedLHS.value())
return false;
LHS = *SimplifiedLHS;
@ -8767,7 +8767,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
*this, UsedAssumedInformation);
if (!SimplifiedRHS)
return true;
if (!SimplifiedRHS.getValue())
if (!SimplifiedRHS.value())
return false;
RHS = *SimplifiedRHS;
@ -8832,7 +8832,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
*this, UsedAssumedInformation);
if (!SimplifiedOpV)
return true;
if (!SimplifiedOpV.getValue())
if (!SimplifiedOpV.value())
return false;
Value *VPtr = *SimplifiedOpV;
@ -9193,7 +9193,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
*this, UsedAssumedInformation);
if (!SimplifiedLHS)
return ChangeStatus::UNCHANGED;
if (!SimplifiedLHS.getValue())
if (!SimplifiedLHS.value())
return indicatePessimisticFixpoint();
LHS = *SimplifiedLHS;
@ -9202,7 +9202,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
*this, UsedAssumedInformation);
if (!SimplifiedRHS)
return ChangeStatus::UNCHANGED;
if (!SimplifiedRHS.getValue())
if (!SimplifiedRHS.value())
return indicatePessimisticFixpoint();
RHS = *SimplifiedRHS;
@ -9276,7 +9276,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
*this, UsedAssumedInformation);
if (!SimplifiedLHS)
return ChangeStatus::UNCHANGED;
if (!SimplifiedLHS.getValue())
if (!SimplifiedLHS.value())
return indicatePessimisticFixpoint();
LHS = *SimplifiedLHS;
@ -9285,7 +9285,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
*this, UsedAssumedInformation);
if (!SimplifiedRHS)
return ChangeStatus::UNCHANGED;
if (!SimplifiedRHS.getValue())
if (!SimplifiedRHS.value())
return indicatePessimisticFixpoint();
RHS = *SimplifiedRHS;
@ -9351,7 +9351,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
*this, UsedAssumedInformation);
if (!SimplifiedSrc)
return ChangeStatus::UNCHANGED;
if (!SimplifiedSrc.getValue())
if (!SimplifiedSrc.value())
return indicatePessimisticFixpoint();
Src = *SimplifiedSrc;
@ -9384,7 +9384,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
*this, UsedAssumedInformation);
if (!SimplifiedLHS)
return ChangeStatus::UNCHANGED;
if (!SimplifiedLHS.getValue())
if (!SimplifiedLHS.value())
return indicatePessimisticFixpoint();
LHS = *SimplifiedLHS;
@ -9393,7 +9393,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
*this, UsedAssumedInformation);
if (!SimplifiedRHS)
return ChangeStatus::UNCHANGED;
if (!SimplifiedRHS.getValue())
if (!SimplifiedRHS.value())
return indicatePessimisticFixpoint();
RHS = *SimplifiedRHS;
@ -9452,7 +9452,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
UsedAssumedInformation);
if (!SimplifiedIncomingValue)
continue;
if (!SimplifiedIncomingValue.getValue())
if (!SimplifiedIncomingValue.value())
return indicatePessimisticFixpoint();
IncomingValue = *SimplifiedIncomingValue;
@ -9941,7 +9941,7 @@ private:
const Function &Fn) {
Optional<bool> Cached = isCachedReachable(Fn);
if (Cached)
return Cached.getValue();
return Cached.value();
// The query was not cached, thus it is new. We need to request an update
// explicitly to make sure this the information is properly run to a

View File

@ -555,7 +555,7 @@ collectRegionsConstants(OutlinableRegion &Region,
for (Value *V : ID.OperVals) {
Optional<unsigned> GVNOpt = C.getGVN(V);
assert(GVNOpt && "Expected a GVN for operand?");
unsigned GVN = GVNOpt.getValue();
unsigned GVN = GVNOpt.value();
// Check if this global value has been found to not be the same already.
if (NotSame.contains(GVN)) {
@ -570,7 +570,7 @@ collectRegionsConstants(OutlinableRegion &Region,
// it is considered to not be the same value.
Optional<bool> ConstantMatches = constantMatches(V, GVN, GVNToConstant);
if (ConstantMatches) {
if (ConstantMatches.getValue())
if (ConstantMatches.value())
continue;
else
ConstantsTheSame = false;
@ -651,7 +651,7 @@ Function *IROutliner::createFunction(Module &M, OutlinableGroup &Group,
// Transfer the swifterr attribute to the correct function parameter.
if (Group.SwiftErrorArgument)
Group.OutlinedFunction->addParamAttr(Group.SwiftErrorArgument.getValue(),
Group.OutlinedFunction->addParamAttr(Group.SwiftErrorArgument.value(),
Attribute::SwiftError);
Group.OutlinedFunction->addFnAttr(Attribute::OptimizeForSize);
@ -809,7 +809,7 @@ static void mapInputsToGVNs(IRSimilarityCandidate &C,
if (OutputMappings.find(Input) != OutputMappings.end())
Input = OutputMappings.find(Input)->second;
assert(C.getGVN(Input) && "Could not find a numbering for the given input");
EndInputNumbers.push_back(C.getGVN(Input).getValue());
EndInputNumbers.push_back(C.getGVN(Input).value());
}
}
@ -948,11 +948,11 @@ findExtractedInputToOverallInputMapping(OutlinableRegion &Region,
for (unsigned InputVal : InputGVNs) {
Optional<unsigned> CanonicalNumberOpt = C.getCanonicalNum(InputVal);
assert(CanonicalNumberOpt && "Canonical number not found?");
unsigned CanonicalNumber = CanonicalNumberOpt.getValue();
unsigned CanonicalNumber = CanonicalNumberOpt.value();
Optional<Value *> InputOpt = C.fromGVN(InputVal);
assert(InputOpt && "Global value number not found?");
Value *Input = InputOpt.getValue();
Value *Input = InputOpt.value();
DenseMap<unsigned, unsigned>::iterator AggArgIt =
Group.CanonicalNumberToAggArg.find(CanonicalNumber);
@ -1236,13 +1236,13 @@ static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
Optional<unsigned> BBGVN = Cand.getGVN(PHIBB);
assert(BBGVN && "Could not find GVN for the incoming block!");
BBGVN = Cand.getCanonicalNum(BBGVN.getValue());
BBGVN = Cand.getCanonicalNum(BBGVN.value());
assert(BBGVN && "Could not find canonical number for the incoming block!");
// Create a pair of the exit block canonical value, and the aggregate
// argument location, connected to the canonical numbers stored in the
// PHINode.
PHINodeData TemporaryPair =
std::make_pair(std::make_pair(BBGVN.getValue(), AggArgIdx), PHIGVNs);
std::make_pair(std::make_pair(BBGVN.value(), AggArgIdx), PHIGVNs);
hash_code PHINodeDataHash = encodePHINodeData(TemporaryPair);
// Look for and create a new entry in our connection between canonical
@ -1516,8 +1516,7 @@ CallInst *replaceCalledFunction(Module &M, OutlinableRegion &Region) {
// Make sure that the argument in the new function has the SwiftError
// argument.
if (Group.SwiftErrorArgument)
Call->addParamAttr(Group.SwiftErrorArgument.getValue(),
Attribute::SwiftError);
Call->addParamAttr(Group.SwiftErrorArgument.value(), Attribute::SwiftError);
return Call;
}
@ -2082,9 +2081,9 @@ static void alignOutputBlockWithAggFunc(
if (MatchingBB) {
LLVM_DEBUG(dbgs() << "Set output block for region in function"
<< Region.ExtractedFunction << " to "
<< MatchingBB.getValue());
<< MatchingBB.value());
Region.OutputBlockNum = MatchingBB.getValue();
Region.OutputBlockNum = MatchingBB.value();
for (std::pair<Value *, BasicBlock *> &VtoBB : OutputBBs)
VtoBB.second->eraseFromParent();
return;
@ -2679,15 +2678,14 @@ void IROutliner::updateOutputMapping(OutlinableRegion &Region,
if (!OutputIdx)
return;
if (OutputMappings.find(Outputs[OutputIdx.getValue()]) ==
OutputMappings.end()) {
if (OutputMappings.find(Outputs[OutputIdx.value()]) == OutputMappings.end()) {
LLVM_DEBUG(dbgs() << "Mapping extracted output " << *LI << " to "
<< *Outputs[OutputIdx.getValue()] << "\n");
OutputMappings.insert(std::make_pair(LI, Outputs[OutputIdx.getValue()]));
<< *Outputs[OutputIdx.value()] << "\n");
OutputMappings.insert(std::make_pair(LI, Outputs[OutputIdx.value()]));
} else {
Value *Orig = OutputMappings.find(Outputs[OutputIdx.getValue()])->second;
Value *Orig = OutputMappings.find(Outputs[OutputIdx.value()])->second;
LLVM_DEBUG(dbgs() << "Mapping extracted output " << *Orig << " to "
<< *Outputs[OutputIdx.getValue()] << "\n");
<< *Outputs[OutputIdx.value()] << "\n");
OutputMappings.insert(std::make_pair(LI, Orig));
}
}

View File

@ -4431,10 +4431,10 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
if (!SimplifiedValue)
return Str + std::string("none");
if (!SimplifiedValue.getValue())
if (!SimplifiedValue.value())
return Str + std::string("nullptr");
if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue()))
if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.value()))
return Str + std::to_string(CI->getSExtValue());
return Str + std::string("unknown");
@ -4459,7 +4459,7 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
[&](const IRPosition &IRP, const AbstractAttribute *AA,
bool &UsedAssumedInformation) -> Optional<Value *> {
assert((isValidState() ||
(SimplifiedValue && SimplifiedValue.getValue() == nullptr)) &&
(SimplifiedValue && SimplifiedValue.value() == nullptr)) &&
"Unexpected invalid state!");
if (!isAtFixpoint()) {

View File

@ -130,7 +130,7 @@ void ContextTrieNode::addFunctionSize(uint32_t FSize) {
if (!FuncSize)
FuncSize = 0;
FuncSize = FuncSize.getValue() + FSize;
FuncSize = FuncSize.value() + FSize;
}
LineLocation ContextTrieNode::getCallSiteLoc() const { return CallSiteLoc; }

View File

@ -1350,14 +1350,14 @@ SampleProfileLoader::getExternalInlineAdvisorCost(CallBase &CB) {
bool SampleProfileLoader::getExternalInlineAdvisorShouldInline(CallBase &CB) {
Optional<InlineCost> Cost = getExternalInlineAdvisorCost(CB);
return Cost ? !!Cost.getValue() : false;
return Cost ? !!Cost.value() : false;
}
InlineCost
SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
if (Optional<InlineCost> ReplayCost =
getExternalInlineAdvisorCost(*Candidate.CallInstr))
return ReplayCost.getValue();
return ReplayCost.value();
// Adjust threshold based on call site hotness, only do this for callsite
// prioritized inliner because otherwise cost-benefit check is done earlier.
int SampleThreshold = SampleColdCallSiteThreshold;

View File

@ -2682,7 +2682,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// Handle target specific intrinsics
Optional<Instruction *> V = targetInstCombineIntrinsic(*II);
if (V)
return V.getValue();
return V.value();
break;
}
}

View File

@ -925,7 +925,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Optional<Value *> V = targetSimplifyDemandedUseBitsIntrinsic(
*II, DemandedMask, Known, KnownBitsComputed);
if (V)
return V.getValue();
return V.value();
break;
}
}
@ -1636,7 +1636,7 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V,
*II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
simplifyAndSetOp);
if (V)
return V.getValue();
return V.value();
break;
}
} // switch on IntrinsicID

View File

@ -486,7 +486,7 @@ static bool isTsanAtomic(const Instruction *I) {
if (!SSID)
return false;
if (isa<LoadInst>(I) || isa<StoreInst>(I))
return SSID.getValue() != SyncScope::SingleThread;
return SSID.value() != SyncScope::SingleThread;
return true;
}

View File

@ -611,9 +611,9 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S,
ConstCand->ConstInt->getValue());
if (Diff) {
const InstructionCost ImmCosts =
TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.getValue(), Ty);
TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.value(), Ty);
Cost -= ImmCosts;
LLVM_DEBUG(dbgs() << "Offset " << Diff.getValue() << " "
LLVM_DEBUG(dbgs() << "Offset " << Diff.value() << " "
<< "has penalty: " << ImmCosts << "\n"
<< "Adjusted cost: " << Cost << "\n");
}

View File

@ -748,14 +748,14 @@ void GVNPass::printPipeline(
OS << "<";
if (Options.AllowPRE != None)
OS << (Options.AllowPRE.getValue() ? "" : "no-") << "pre;";
OS << (Options.AllowPRE.value() ? "" : "no-") << "pre;";
if (Options.AllowLoadPRE != None)
OS << (Options.AllowLoadPRE.getValue() ? "" : "no-") << "load-pre;";
OS << (Options.AllowLoadPRE.value() ? "" : "no-") << "load-pre;";
if (Options.AllowLoadPRESplitBackedge != None)
OS << (Options.AllowLoadPRESplitBackedge.getValue() ? "" : "no-")
OS << (Options.AllowLoadPRESplitBackedge.value() ? "" : "no-")
<< "split-backedge-load-pre;";
if (Options.AllowMemDep != None)
OS << (Options.AllowMemDep.getValue() ? "" : "no-") << "memdep";
OS << (Options.AllowMemDep.value() ? "" : "no-") << "memdep";
OS << ">";
}

View File

@ -1710,7 +1710,7 @@ IntersectSignedRange(ScalarEvolution &SE,
return None;
if (!R1)
return R2;
auto &R1Value = R1.getValue();
auto &R1Value = R1.value();
// We never return empty ranges from this function, and R1 is supposed to be
// a result of intersection. Thus, R1 is never empty.
assert(!R1Value.isEmpty(SE, /* IsSigned */ true) &&
@ -1739,7 +1739,7 @@ IntersectUnsignedRange(ScalarEvolution &SE,
return None;
if (!R1)
return R2;
auto &R1Value = R1.getValue();
auto &R1Value = R1.value();
// We never return empty ranges from this function, and R1 is supposed to be
// a result of intersection. Thus, R1 is never empty.
assert(!R1Value.isEmpty(SE, /* IsSigned */ false) &&
@ -1950,13 +1950,12 @@ bool InductiveRangeCheckElimination::run(
LS.IsSignedPredicate);
if (Result) {
auto MaybeSafeIterRange =
IntersectRange(SE, SafeIterRange, Result.getValue());
IntersectRange(SE, SafeIterRange, Result.value());
if (MaybeSafeIterRange) {
assert(
!MaybeSafeIterRange.getValue().isEmpty(SE, LS.IsSignedPredicate) &&
assert(!MaybeSafeIterRange.value().isEmpty(SE, LS.IsSignedPredicate) &&
"We should never return empty ranges!");
RangeChecksToEliminate.push_back(IRC);
SafeIterRange = MaybeSafeIterRange.getValue();
SafeIterRange = MaybeSafeIterRange.value();
}
}
}
@ -1964,8 +1963,7 @@ bool InductiveRangeCheckElimination::run(
if (!SafeIterRange)
return false;
LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT,
SafeIterRange.getValue());
LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT, SafeIterRange.value());
bool Changed = LC.run();
if (Changed) {

View File

@ -602,7 +602,7 @@ private:
: LLVMLoopDistributeFollowupCoincident});
if (PartitionID) {
Loop *NewLoop = Part->getDistributedLoop();
NewLoop->setLoopID(PartitionID.getValue());
NewLoop->setLoopID(PartitionID.value());
}
}
};
@ -826,7 +826,7 @@ public:
{LLVMLoopDistributeFollowupAll,
LLVMLoopDistributeFollowupFallback},
"llvm.loop.distribute.", true)
.getValue();
.value();
LVer.getNonVersionedLoop()->setLoopID(UnversionedLoopID);
}

View File

@ -1483,7 +1483,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// anything where the alignment isn't at least the element size.
assert((StoreAlign && LoadAlign) &&
"Expect unordered load/store to have align.");
if (StoreAlign.getValue() < StoreSize || LoadAlign.getValue() < StoreSize)
if (StoreAlign.value() < StoreSize || LoadAlign.value() < StoreSize)
return Changed;
// If the element.atomic memcpy is not lowered into explicit
@ -1497,7 +1497,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// Note that unordered atomic loads/stores are *required* by the spec to
// have an alignment but non-atomic loads/stores may not.
NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(),
StoreBasePtr, StoreAlign.value(), LoadBasePtr, LoadAlign.value(),
NumBytes, StoreSize, AATags.TBAA, AATags.TBAAStruct, AATags.Scope,
AATags.NoAlias);
}

View File

@ -6385,8 +6385,8 @@ static bool SalvageDVI(llvm::Loop *L, ScalarEvolution &SE,
// less DWARF ops than an iteration count-based expression.
if (Optional<APInt> Offset =
SE.computeConstantDifference(DVIRec.SCEVs[i], SCEVInductionVar)) {
if (Offset.getValue().getMinSignedBits() <= 64)
SalvageExpr->createOffsetExpr(Offset.getValue().getSExtValue(),
if (Offset.value().getMinSignedBits() <= 64)
SalvageExpr->createOffsetExpr(Offset.value().getSExtValue(),
LSRInductionVar);
} else if (!SalvageExpr->createIterCountExpr(DVIRec.SCEVs[i], IterCountExpr,
SE))

View File

@ -373,7 +373,7 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
LLVMLoopUnrollAndJamFollowupRemainderInner});
if (NewInnerEpilogueLoopID)
SubLoop->setLoopID(NewInnerEpilogueLoopID.getValue());
SubLoop->setLoopID(NewInnerEpilogueLoopID.value());
// Find trip count and trip multiple
BasicBlock *Latch = L->getLoopLatch();
@ -403,14 +403,14 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
LLVMLoopUnrollAndJamFollowupRemainderOuter});
if (NewOuterEpilogueLoopID)
EpilogueOuterLoop->setLoopID(NewOuterEpilogueLoopID.getValue());
EpilogueOuterLoop->setLoopID(NewOuterEpilogueLoopID.value());
}
Optional<MDNode *> NewInnerLoopID =
makeFollowupLoopID(OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
LLVMLoopUnrollAndJamFollowupInner});
if (NewInnerLoopID)
SubLoop->setLoopID(NewInnerLoopID.getValue());
SubLoop->setLoopID(NewInnerLoopID.value());
else
SubLoop->setLoopID(OrigSubLoopID);
@ -419,7 +419,7 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
OrigOuterLoopID,
{LLVMLoopUnrollAndJamFollowupAll, LLVMLoopUnrollAndJamFollowupOuter});
if (NewOuterLoopID) {
L->setLoopID(NewOuterLoopID.getValue());
L->setLoopID(NewOuterLoopID.value());
// Do not setLoopAlreadyUnrolled if a followup was given.
return UnrollResult;

View File

@ -1324,7 +1324,7 @@ static LoopUnrollResult tryToUnrollLoop(
makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll,
LLVMLoopUnrollFollowupRemainder});
if (RemainderLoopID)
RemainderLoop->setLoopID(RemainderLoopID.getValue());
RemainderLoop->setLoopID(RemainderLoopID.value());
}
if (UnrollResult != LoopUnrollResult::FullyUnrolled) {
@ -1332,7 +1332,7 @@ static LoopUnrollResult tryToUnrollLoop(
makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll,
LLVMLoopUnrollFollowupUnrolled});
if (NewLoopID) {
L->setLoopID(NewLoopID.getValue());
L->setLoopID(NewLoopID.value());
// Do not setLoopAlreadyUnrolled if loop attributes have been specified
// explicitly.
@ -1645,15 +1645,15 @@ void LoopUnrollPass::printPipeline(
OS, MapClassName2PassName);
OS << "<";
if (UnrollOpts.AllowPartial != None)
OS << (UnrollOpts.AllowPartial.getValue() ? "" : "no-") << "partial;";
OS << (UnrollOpts.AllowPartial.value() ? "" : "no-") << "partial;";
if (UnrollOpts.AllowPeeling != None)
OS << (UnrollOpts.AllowPeeling.getValue() ? "" : "no-") << "peeling;";
OS << (UnrollOpts.AllowPeeling.value() ? "" : "no-") << "peeling;";
if (UnrollOpts.AllowRuntime != None)
OS << (UnrollOpts.AllowRuntime.getValue() ? "" : "no-") << "runtime;";
OS << (UnrollOpts.AllowRuntime.value() ? "" : "no-") << "runtime;";
if (UnrollOpts.AllowUpperBound != None)
OS << (UnrollOpts.AllowUpperBound.getValue() ? "" : "no-") << "upperbound;";
OS << (UnrollOpts.AllowUpperBound.value() ? "" : "no-") << "upperbound;";
if (UnrollOpts.AllowProfileBasedPeeling != None)
OS << (UnrollOpts.AllowProfileBasedPeeling.getValue() ? "" : "no-")
OS << (UnrollOpts.AllowProfileBasedPeeling.value() ? "" : "no-")
<< "profile-peeling;";
if (UnrollOpts.FullUnrollMaxCount != None)
OS << "full-unroll-max=" << UnrollOpts.FullUnrollMaxCount << ";";

View File

@ -1778,7 +1778,7 @@ CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC,
auto Count = BFI->getProfileCountFromFreq(EntryFreq.getFrequency());
if (Count)
newFunction->setEntryCount(
ProfileCount(Count.getValue(), Function::PCT_Real)); // FIXME
ProfileCount(Count.value(), Function::PCT_Real)); // FIXME
BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency());
}

View File

@ -402,7 +402,7 @@ CloneLoopBlocks(Loop *L, Value *NewIter, const bool UseEpilogRemainder,
Optional<MDNode *> NewLoopID = makeFollowupLoopID(
LoopID, {LLVMLoopUnrollFollowupAll, LLVMLoopUnrollFollowupRemainder});
if (NewLoopID) {
NewLoop->setLoopID(NewLoopID.getValue());
NewLoop->setLoopID(NewLoopID.value());
// Do not setLoopAlreadyUnrolled if loop attributes have been defined
// explicitly.

View File

@ -356,7 +356,7 @@ TransformationMode llvm::hasUnrollTransformation(const Loop *L) {
Optional<int> Count =
getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
if (Count)
return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
return Count.value() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable"))
return TM_ForcedByUser;
@ -377,7 +377,7 @@ TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) {
Optional<int> Count =
getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count");
if (Count)
return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
return Count.value() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable"))
return TM_ForcedByUser;

View File

@ -221,7 +221,7 @@ void checkBackendInstrumentation(Instruction &I,
auto ExpectedWeightsOpt = extractWeights(&I, I.getContext());
if (!ExpectedWeightsOpt)
return;
auto ExpectedWeights = ExpectedWeightsOpt.getValue();
auto ExpectedWeights = ExpectedWeightsOpt.value();
verifyMisExpect(I, RealWeights, ExpectedWeights);
}
@ -230,7 +230,7 @@ void checkFrontendInstrumentation(Instruction &I,
auto RealWeightsOpt = extractWeights(&I, I.getContext());
if (!RealWeightsOpt)
return;
auto RealWeights = RealWeightsOpt.getValue();
auto RealWeights = RealWeightsOpt.value();
verifyMisExpect(I, RealWeights, ExpectedWeights);
}

View File

@ -255,7 +255,7 @@ void VFABI::setVectorVariantNames(CallInst *CI,
LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << VariantMapping << "'\n");
Optional<VFInfo> VI = VFABI::tryDemangleForVFABI(VariantMapping, *M);
assert(VI && "Cannot add an invalid VFABI name.");
assert(M->getNamedValue(VI.getValue().VectorName) &&
assert(M->getNamedValue(VI.value().VectorName) &&
"Cannot add variant to attribute: "
"vector function declaration is missing.");
}

View File

@ -4866,7 +4866,7 @@ LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
MaxVScale =
TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
MaxScalableVF = ElementCount::getScalable(
MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
MaxVScale ? (MaxSafeElements / MaxVScale.value()) : 0);
if (!MaxScalableVF)
reportVectorizationInfo(
"Max legal vector width too small, scalable vectorization "
@ -5261,9 +5261,9 @@ bool LoopVectorizationCostModel::isMoreProfitable(
unsigned EstimatedWidthB = B.Width.getKnownMinValue();
if (Optional<unsigned> VScale = getVScaleForTuning()) {
if (A.Width.isScalable())
EstimatedWidthA *= VScale.getValue();
EstimatedWidthA *= VScale.value();
if (B.Width.isScalable())
EstimatedWidthB *= VScale.getValue();
EstimatedWidthB *= VScale.value();
}
// Assume vscale may be larger than 1 (or the value being tuned for),
@ -7625,7 +7625,7 @@ void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
BestVPlan.getVectorLoopRegion()->getEntryBasicBlock();
Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]);
if (VectorizedLoopID)
L->setLoopID(VectorizedLoopID.getValue());
L->setLoopID(VectorizedLoopID.value());
else {
// Keep all loop hints from the original loop on the vector loop (we'll
// replace the vectorizer-specific hints below).
@ -10461,7 +10461,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
LLVMLoopVectorizeFollowupEpilogue});
if (RemainderLoopID) {
L->setLoopID(RemainderLoopID.getValue());
L->setLoopID(RemainderLoopID.value());
} else {
if (DisableRuntimeUnroll)
AddRuntimeUnrollDisableMetaData(L);

View File

@ -2637,7 +2637,7 @@ private:
AliasCacheKey key = std::make_pair(Inst1, Inst2);
Optional<bool> &result = AliasCache[key];
if (result) {
return result.getValue();
return result.value();
}
bool aliased = true;
if (Loc1.Ptr && isSimple(Inst1))

View File

@ -579,7 +579,7 @@ static int compileModule(char **argv, LLVMContext &Context) {
Optional<CodeModel::Model> CM_IR = M->getCodeModel();
if (!CM && CM_IR)
Target->setCodeModel(CM_IR.getValue());
Target->setCodeModel(CM_IR.value());
} else {
TheTriple = Triple(Triple::normalize(TargetTriple));
if (TheTriple.getTriple().empty())

View File

@ -535,9 +535,9 @@ int main(int argc, char **argv, char * const *envp) {
builder.setMCPU(codegen::getCPUStr());
builder.setMAttrs(codegen::getFeatureList());
if (auto RM = codegen::getExplicitRelocModel())
builder.setRelocationModel(RM.getValue());
builder.setRelocationModel(RM.value());
if (auto CM = codegen::getExplicitCodeModel())
builder.setCodeModel(CM.getValue());
builder.setCodeModel(CM.value());
builder.setErrorStr(&ErrorMsg);
builder.setEngineKind(ForceInterpreter
? EngineKind::Interpreter

View File

@ -291,8 +291,8 @@ void CoverageExporterJson::renderRoot(ArrayRef<std::string> SourceFiles) {
const json::Object *ObjB = B.getAsObject();
assert(ObjA != nullptr && "Value A was not an Object");
assert(ObjB != nullptr && "Value B was not an Object");
const StringRef FilenameA = ObjA->getString("filename").getValue();
const StringRef FilenameB = ObjB->getString("filename").getValue();
const StringRef FilenameA = ObjA->getString("filename").value();
const StringRef FilenameB = ObjB->getString("filename").value();
return FilenameA.compare(FilenameB) < 0;
});
auto Export = json::Object(

View File

@ -533,34 +533,33 @@ int main(int argc, char *argv[]) {
<< "Triple should be defined when output format is TBD";
return -1;
}
return writeTbdStub(llvm::Triple(Stub.Target.Triple.getValue()),
return writeTbdStub(llvm::Triple(Stub.Target.Triple.value()),
Stub.Symbols, "TBD", Out);
}
case FileFormat::IFS: {
Stub.IfsVersion = IfsVersionCurrent;
if (Config.InputFormat.getValue() == FileFormat::ELF &&
if (Config.InputFormat.value() == FileFormat::ELF &&
Config.HintIfsTarget) {
std::error_code HintEC(1, std::generic_category());
IFSTarget HintTarget = parseTriple(*Config.HintIfsTarget);
if (Stub.Target.Arch.getValue() != HintTarget.Arch.getValue())
if (Stub.Target.Arch.value() != HintTarget.Arch.value())
fatalError(make_error<StringError>(
"Triple hint does not match the actual architecture", HintEC));
if (Stub.Target.Endianness.getValue() !=
HintTarget.Endianness.getValue())
if (Stub.Target.Endianness.value() != HintTarget.Endianness.value())
fatalError(make_error<StringError>(
"Triple hint does not match the actual endianness", HintEC));
if (Stub.Target.BitWidth.getValue() != HintTarget.BitWidth.getValue())
if (Stub.Target.BitWidth.value() != HintTarget.BitWidth.value())
fatalError(make_error<StringError>(
"Triple hint does not match the actual bit width", HintEC));
stripIFSTarget(Stub, true, false, false, false);
Stub.Target.Triple = Config.HintIfsTarget.getValue();
Stub.Target.Triple = Config.HintIfsTarget.value();
} else {
stripIFSTarget(Stub, Config.StripIfsTarget, Config.StripIfsArch,
Config.StripIfsEndianness, Config.StripIfsBitwidth);
}
Error IFSWriteError =
writeIFS(Config.Output.getValue(), Stub, Config.WriteIfChanged);
writeIFS(Config.Output.value(), Stub, Config.WriteIfChanged);
if (IFSWriteError)
fatalError(std::move(IFSWriteError));
break;
@ -589,29 +588,28 @@ int main(int argc, char *argv[]) {
}
if (Config.OutputIfs) {
Stub.IfsVersion = IfsVersionCurrent;
if (Config.InputFormat.getValue() == FileFormat::ELF &&
if (Config.InputFormat.value() == FileFormat::ELF &&
Config.HintIfsTarget) {
std::error_code HintEC(1, std::generic_category());
IFSTarget HintTarget = parseTriple(*Config.HintIfsTarget);
if (Stub.Target.Arch.getValue() != HintTarget.Arch.getValue())
if (Stub.Target.Arch.value() != HintTarget.Arch.value())
fatalError(make_error<StringError>(
"Triple hint does not match the actual architecture", HintEC));
if (Stub.Target.Endianness.getValue() !=
HintTarget.Endianness.getValue())
if (Stub.Target.Endianness.value() != HintTarget.Endianness.value())
fatalError(make_error<StringError>(
"Triple hint does not match the actual endianness", HintEC));
if (Stub.Target.BitWidth.getValue() != HintTarget.BitWidth.getValue())
if (Stub.Target.BitWidth.value() != HintTarget.BitWidth.value())
fatalError(make_error<StringError>(
"Triple hint does not match the actual bit width", HintEC));
stripIFSTarget(Stub, true, false, false, false);
Stub.Target.Triple = Config.HintIfsTarget.getValue();
Stub.Target.Triple = Config.HintIfsTarget.value();
} else {
stripIFSTarget(Stub, Config.StripIfsTarget, Config.StripIfsArch,
Config.StripIfsEndianness, Config.StripIfsBitwidth);
}
Error IFSWriteError =
writeIFS(Config.OutputIfs.getValue(), Stub, Config.WriteIfChanged);
writeIFS(Config.OutputIfs.value(), Stub, Config.WriteIfChanged);
if (IFSWriteError)
fatalError(std::move(IFSWriteError));
}
@ -628,7 +626,7 @@ int main(int argc, char *argv[]) {
<< "Triple should be defined when output format is TBD";
return -1;
}
return writeTbdStub(llvm::Triple(Stub.Target.Triple.getValue()),
return writeTbdStub(llvm::Triple(Stub.Target.Triple.value()),
Stub.Symbols, "TBD", Out);
}
}

View File

@ -71,7 +71,7 @@ void InstructionInfoView::printView(raw_ostream &OS) const {
TempStream << ' ';
if (IIVDEntry.RThroughput) {
double RT = IIVDEntry.RThroughput.getValue();
double RT = IIVDEntry.RThroughput.value();
TempStream << format("%.2f", RT) << ' ';
if (RT < 10.0)
TempStream << " ";

View File

@ -94,8 +94,8 @@ std::string objdump::getXCOFFSymbolDescription(const SymbolInfoTy &SymbolInfo,
std::string Result;
// Dummy symbols have no symbol index.
if (SymbolInfo.XCOFFSymInfo.Index)
Result = ("(idx: " + Twine(SymbolInfo.XCOFFSymInfo.Index.getValue()) +
") " + SymbolName)
Result = ("(idx: " + Twine(SymbolInfo.XCOFFSymInfo.Index.value()) + ") " +
SymbolName)
.str();
else
Result.append(SymbolName.begin(), SymbolName.end());

View File

@ -1412,7 +1412,7 @@ static void disassembleObject(const Target *TheTarget, const ObjectFile &Obj,
// separately. But WebAssembly decodes preludes for some symbols.
//
if (Status) {
if (Status.getValue() == MCDisassembler::Fail) {
if (Status.value() == MCDisassembler::Fail) {
outs() << "// Error in decoding " << SymbolName
<< " : Decoding failed region as bytes.\n";
for (uint64_t I = 0; I < Size; ++I) {
@ -2144,7 +2144,7 @@ void objdump::printSymbol(const ObjectFile &O, const SymbolRef &Symbol,
if (SymbolDescription)
SymName = getXCOFFSymbolDescription(
createSymbolInfo(O, SymRef.getValue()), SymName);
createSymbolInfo(O, SymRef.value()), SymName);
outs() << ' ' << SymName;
outs() << ") ";
@ -2251,8 +2251,8 @@ static void printRawClangAST(const ObjectFile *Obj) {
if (!ClangASTSection)
return;
StringRef ClangASTContents = unwrapOrError(
ClangASTSection.getValue().getContents(), Obj->getFileName());
StringRef ClangASTContents =
unwrapOrError(ClangASTSection.value().getContents(), Obj->getFileName());
outs().write(ClangASTContents.data(), ClangASTContents.size());
}

View File

@ -97,7 +97,7 @@ BinarySizeContextTracker::getFuncSizeForContext(const ContextTrieNode *Node) {
PrevNode = CurrNode;
CurrNode = CurrNode->getChildContext(CallSiteLoc, Node->getFuncName());
if (CurrNode && CurrNode->getFunctionSize())
Size = CurrNode->getFunctionSize().getValue();
Size = CurrNode->getFunctionSize().value();
CallSiteLoc = Node->getCallSiteLoc();
Node = Node->getParentContext();
}
@ -111,12 +111,12 @@ BinarySizeContextTracker::getFuncSizeForContext(const ContextTrieNode *Node) {
while (!Size && CurrNode && !CurrNode->getAllChildContext().empty()) {
CurrNode = &CurrNode->getAllChildContext().begin()->second;
if (CurrNode->getFunctionSize())
Size = CurrNode->getFunctionSize().getValue();
Size = CurrNode->getFunctionSize().value();
}
}
assert(Size && "We should at least find one context size.");
return Size.getValue();
return Size.value();
}
void BinarySizeContextTracker::trackInlineesOptimizedAway(

View File

@ -90,8 +90,8 @@ exportToFile(const StringRef FilePath,
assert(End && "Could not find instruction number for last instruction");
J.object([&] {
J.attribute("start", Start.getValue());
J.attribute("end", End.getValue());
J.attribute("start", Start.value());
J.attribute("end", End.value());
});
}
J.arrayEnd();

View File

@ -247,15 +247,15 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) {
auto FormValue = DIEWrapper.find(AttrSpec.Attr);
if (!FormValue)
return;
auto Form = FormValue.getValue().getForm();
auto Form = FormValue.value().getForm();
bool indirect = false;
do {
indirect = false;
switch (Form) {
case dwarf::DW_FORM_addr:
case dwarf::DW_FORM_GNU_addr_index:
if (auto Val = FormValue.getValue().getAsAddress())
NewValue.Value = Val.getValue();
if (auto Val = FormValue.value().getAsAddress())
NewValue.Value = Val.value();
break;
case dwarf::DW_FORM_ref_addr:
case dwarf::DW_FORM_ref1:
@ -264,16 +264,16 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) {
case dwarf::DW_FORM_ref8:
case dwarf::DW_FORM_ref_udata:
case dwarf::DW_FORM_ref_sig8:
if (auto Val = FormValue.getValue().getAsReferenceUVal())
NewValue.Value = Val.getValue();
if (auto Val = FormValue.value().getAsReferenceUVal())
NewValue.Value = Val.value();
break;
case dwarf::DW_FORM_exprloc:
case dwarf::DW_FORM_block:
case dwarf::DW_FORM_block1:
case dwarf::DW_FORM_block2:
case dwarf::DW_FORM_block4:
if (auto Val = FormValue.getValue().getAsBlock()) {
auto BlockData = Val.getValue();
if (auto Val = FormValue.value().getAsBlock()) {
auto BlockData = Val.value();
std::copy(BlockData.begin(), BlockData.end(),
std::back_inserter(NewValue.BlockData));
}
@ -288,8 +288,8 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) {
case dwarf::DW_FORM_udata:
case dwarf::DW_FORM_ref_sup4:
case dwarf::DW_FORM_ref_sup8:
if (auto Val = FormValue.getValue().getAsUnsignedConstant())
NewValue.Value = Val.getValue();
if (auto Val = FormValue.value().getAsUnsignedConstant())
NewValue.Value = Val.value();
break;
case dwarf::DW_FORM_string:
if (auto Val = dwarf::toString(FormValue))
@ -297,10 +297,10 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) {
break;
case dwarf::DW_FORM_indirect:
indirect = true;
if (auto Val = FormValue.getValue().getAsUnsignedConstant()) {
NewValue.Value = Val.getValue();
if (auto Val = FormValue.value().getAsUnsignedConstant()) {
NewValue.Value = Val.value();
NewEntry.Values.push_back(NewValue);
Form = static_cast<dwarf::Form>(Val.getValue());
Form = static_cast<dwarf::Form>(Val.value());
}
break;
case dwarf::DW_FORM_strp:
@ -311,8 +311,8 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) {
case dwarf::DW_FORM_strp_sup:
case dwarf::DW_FORM_GNU_str_index:
case dwarf::DW_FORM_strx:
if (auto Val = FormValue.getValue().getAsCStringOffset())
NewValue.Value = Val.getValue();
if (auto Val = FormValue.value().getAsCStringOffset())
NewValue.Value = Val.value();
break;
case dwarf::DW_FORM_flag_present:
NewValue.Value = 1;

View File

@ -35,7 +35,7 @@ void OptionalWorksInConstexpr() {
constexpr Optional<int> y2{3};
static_assert(y1.value() == y2.value() && y1.value() == 3,
"Construction with value and getValue() are constexpr");
static_assert(y1.getValue() == y2.getValue() && y1.getValue() == 3,
static_assert(y1.value() == y2.value() && y1.value() == 3,
"Construction with value and getValue() are constexpr");
static_assert(Optional<int>{3} >= 2 && Optional<int>{1} < Optional<int>{2},
"Comparisons work in constexpr");

View File

@ -75,11 +75,11 @@ TEST_F(BlockFrequencyInfoTest, Basic) {
EXPECT_EQ(BB0Freq, BB1Freq + BB2Freq);
EXPECT_EQ(BB0Freq, BB3Freq);
EXPECT_EQ(BFI.getBlockProfileCount(&BB0).getValue(), UINT64_C(100));
EXPECT_EQ(BFI.getBlockProfileCount(BB3).getValue(), UINT64_C(100));
EXPECT_EQ(BFI.getBlockProfileCount(BB1).getValue(),
EXPECT_EQ(BFI.getBlockProfileCount(&BB0).value(), UINT64_C(100));
EXPECT_EQ(BFI.getBlockProfileCount(BB3).value(), UINT64_C(100));
EXPECT_EQ(BFI.getBlockProfileCount(BB1).value(),
(100 * BB1Freq + BB0Freq / 2) / BB0Freq);
EXPECT_EQ(BFI.getBlockProfileCount(BB2).getValue(),
EXPECT_EQ(BFI.getBlockProfileCount(BB2).value(),
(100 * BB2Freq + BB0Freq / 2) / BB0Freq);
// Scale the frequencies of BB0, BB1 and BB2 by a factor of two.

View File

@ -1191,14 +1191,13 @@ TEST_F(MemorySSATest, TestStoreMayAlias) {
EXPECT_EQ(MemDef->isOptimized(), true)
<< "Store " << I << " was not optimized";
if (I == 1 || I == 3 || I == 4)
EXPECT_EQ(MemDef->getOptimizedAccessType().getValue(),
AliasResult::MayAlias)
EXPECT_EQ(MemDef->getOptimizedAccessType().value(), AliasResult::MayAlias)
<< "Store " << I << " doesn't have the correct alias information";
else if (I == 0 || I == 2)
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
<< "Store " << I << " doesn't have the correct alias information";
else
EXPECT_EQ(MemDef->getOptimizedAccessType().getValue(),
EXPECT_EQ(MemDef->getOptimizedAccessType().value(),
AliasResult::MustAlias)
<< "Store " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get

View File

@ -76,7 +76,7 @@ protected:
const auto OptInfo = VFABI::tryDemangleForVFABI(MangledName, *(M.get()));
if (OptInfo) {
Info = OptInfo.getValue();
Info = OptInfo.value();
return true;
}

View File

@ -84,156 +84,156 @@ TEST_F(AArch64GISelMITest, FoldBinOp) {
ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAddInt.has_value());
EXPECT_EQ(25ULL, FoldGAddInt.getValue().getLimitedValue());
EXPECT_EQ(25ULL, FoldGAddInt.value().getLimitedValue());
Optional<APInt> FoldGAddMix =
ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAddMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGAddMix.getValue().getLimitedValue());
EXPECT_EQ(1073741840ULL, FoldGAddMix.value().getLimitedValue());
// Test G_AND folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGAndInt =
ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAndInt.has_value());
EXPECT_EQ(0ULL, FoldGAndInt.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGAndInt.value().getLimitedValue());
Optional<APInt> FoldGAndMix =
ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst2.getReg(0),
MIBFCst1.getReg(0), *MRI);
EXPECT_TRUE(FoldGAndMix.has_value());
EXPECT_EQ(1ULL, FoldGAndMix.getValue().getLimitedValue());
EXPECT_EQ(1ULL, FoldGAndMix.value().getLimitedValue());
// Test G_ASHR folding Integer + Mixed cases
Optional<APInt> FoldGAShrInt =
ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAShrInt.has_value());
EXPECT_EQ(0ULL, FoldGAShrInt.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGAShrInt.value().getLimitedValue());
Optional<APInt> FoldGAShrMix =
ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBFCst2.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAShrMix.has_value());
EXPECT_EQ(2097152ULL, FoldGAShrMix.getValue().getLimitedValue());
EXPECT_EQ(2097152ULL, FoldGAShrMix.value().getLimitedValue());
// Test G_LSHR folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGLShrInt =
ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGLShrInt.has_value());
EXPECT_EQ(0ULL, FoldGLShrInt.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGLShrInt.value().getLimitedValue());
Optional<APInt> FoldGLShrMix =
ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBFCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGLShrMix.has_value());
EXPECT_EQ(2080768ULL, FoldGLShrMix.getValue().getLimitedValue());
EXPECT_EQ(2080768ULL, FoldGLShrMix.value().getLimitedValue());
// Test G_MUL folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGMulInt =
ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGMulInt.has_value());
EXPECT_EQ(144ULL, FoldGMulInt.getValue().getLimitedValue());
EXPECT_EQ(144ULL, FoldGMulInt.value().getLimitedValue());
Optional<APInt> FoldGMulMix =
ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGMulMix.has_value());
EXPECT_EQ(0ULL, FoldGMulMix.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGMulMix.value().getLimitedValue());
// Test G_OR folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGOrInt =
ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGOrInt.has_value());
EXPECT_EQ(25ULL, FoldGOrInt.getValue().getLimitedValue());
EXPECT_EQ(25ULL, FoldGOrInt.value().getLimitedValue());
Optional<APInt> FoldGOrMix =
ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGOrMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGOrMix.getValue().getLimitedValue());
EXPECT_EQ(1073741840ULL, FoldGOrMix.value().getLimitedValue());
// Test G_SHL folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGShlInt =
ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGShlInt.has_value());
EXPECT_EQ(8192ULL, FoldGShlInt.getValue().getLimitedValue());
EXPECT_EQ(8192ULL, FoldGShlInt.value().getLimitedValue());
Optional<APInt> FoldGShlMix =
ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGShlMix.has_value());
EXPECT_EQ(0ULL, FoldGShlMix.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGShlMix.value().getLimitedValue());
// Test G_SUB folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGSubInt =
ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSubInt.has_value());
EXPECT_EQ(7ULL, FoldGSubInt.getValue().getLimitedValue());
EXPECT_EQ(7ULL, FoldGSubInt.value().getLimitedValue());
Optional<APInt> FoldGSubMix =
ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSubMix.has_value());
EXPECT_EQ(3221225488ULL, FoldGSubMix.getValue().getLimitedValue());
EXPECT_EQ(3221225488ULL, FoldGSubMix.value().getLimitedValue());
// Test G_XOR folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGXorInt =
ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGXorInt.has_value());
EXPECT_EQ(25ULL, FoldGXorInt.getValue().getLimitedValue());
EXPECT_EQ(25ULL, FoldGXorInt.value().getLimitedValue());
Optional<APInt> FoldGXorMix =
ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGXorMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGXorMix.getValue().getLimitedValue());
EXPECT_EQ(1073741840ULL, FoldGXorMix.value().getLimitedValue());
// Test G_UDIV folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGUdivInt =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUdivInt.has_value());
EXPECT_EQ(1ULL, FoldGUdivInt.getValue().getLimitedValue());
EXPECT_EQ(1ULL, FoldGUdivInt.value().getLimitedValue());
Optional<APInt> FoldGUdivMix =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUdivMix.has_value());
EXPECT_EQ(0ULL, FoldGUdivMix.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGUdivMix.value().getLimitedValue());
// Test G_SDIV folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGSdivInt =
ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSdivInt.has_value());
EXPECT_EQ(1ULL, FoldGSdivInt.getValue().getLimitedValue());
EXPECT_EQ(1ULL, FoldGSdivInt.value().getLimitedValue());
Optional<APInt> FoldGSdivMix =
ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSdivMix.has_value());
EXPECT_EQ(0ULL, FoldGSdivMix.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGSdivMix.value().getLimitedValue());
// Test G_UREM folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGUremInt =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUremInt.has_value());
EXPECT_EQ(1ULL, FoldGUremInt.getValue().getLimitedValue());
EXPECT_EQ(1ULL, FoldGUremInt.value().getLimitedValue());
Optional<APInt> FoldGUremMix =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUremMix.has_value());
EXPECT_EQ(0ULL, FoldGUremMix.getValue().getLimitedValue());
EXPECT_EQ(0ULL, FoldGUremMix.value().getLimitedValue());
// Test G_SREM folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGSremInt =
ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSremInt.has_value());
EXPECT_EQ(7ULL, FoldGSremInt.getValue().getLimitedValue());
EXPECT_EQ(7ULL, FoldGSremInt.value().getLimitedValue());
Optional<APInt> FoldGSremMix =
ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSremMix.has_value());
EXPECT_EQ(16ULL, FoldGSremMix.getValue().getLimitedValue());
EXPECT_EQ(16ULL, FoldGSremMix.value().getLimitedValue());
}
} // namespace

View File

@ -253,7 +253,7 @@ void TestAllForms() {
EXPECT_TRUE((bool)FormValue);
BlockDataOpt = FormValue->getAsBlock();
EXPECT_TRUE(BlockDataOpt.has_value());
ExtractedBlockData = BlockDataOpt.getValue();
ExtractedBlockData = BlockDataOpt.value();
EXPECT_EQ(ExtractedBlockData.size(), BlockSize);
EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0);
@ -261,7 +261,7 @@ void TestAllForms() {
EXPECT_TRUE((bool)FormValue);
BlockDataOpt = FormValue->getAsBlock();
EXPECT_TRUE(BlockDataOpt.has_value());
ExtractedBlockData = BlockDataOpt.getValue();
ExtractedBlockData = BlockDataOpt.value();
EXPECT_EQ(ExtractedBlockData.size(), BlockSize);
EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0);
@ -269,7 +269,7 @@ void TestAllForms() {
EXPECT_TRUE((bool)FormValue);
BlockDataOpt = FormValue->getAsBlock();
EXPECT_TRUE(BlockDataOpt.has_value());
ExtractedBlockData = BlockDataOpt.getValue();
ExtractedBlockData = BlockDataOpt.value();
EXPECT_EQ(ExtractedBlockData.size(), BlockSize);
EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0);
@ -277,7 +277,7 @@ void TestAllForms() {
EXPECT_TRUE((bool)FormValue);
BlockDataOpt = FormValue->getAsBlock();
EXPECT_TRUE(BlockDataOpt.has_value());
ExtractedBlockData = BlockDataOpt.getValue();
ExtractedBlockData = BlockDataOpt.value();
EXPECT_EQ(ExtractedBlockData.size(), BlockSize);
EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0);
@ -287,7 +287,7 @@ void TestAllForms() {
EXPECT_TRUE((bool)FormValue);
BlockDataOpt = FormValue->getAsBlock();
EXPECT_TRUE(BlockDataOpt.has_value());
ExtractedBlockData = BlockDataOpt.getValue();
ExtractedBlockData = BlockDataOpt.value();
EXPECT_EQ(ExtractedBlockData.size(), 16u);
EXPECT_TRUE(memcmp(ExtractedBlockData.data(), Data16, 16) == 0);
}
@ -989,21 +989,21 @@ template <uint16_t Version, class AddrType> void TestAddresses() {
EXPECT_FALSE((bool)OptU64);
} else {
EXPECT_TRUE((bool)OptU64);
EXPECT_EQ(OptU64.getValue(), ActualHighPC);
EXPECT_EQ(OptU64.value(), ActualHighPC);
}
// Get the high PC as an unsigned constant. This should succeed if the high PC
// was encoded as an offset and fail if the high PC was encoded as an address.
OptU64 = toUnsigned(SubprogramDieLowHighPC.find(DW_AT_high_pc));
if (SupportsHighPCAsOffset) {
EXPECT_TRUE((bool)OptU64);
EXPECT_EQ(OptU64.getValue(), ActualHighPCOffset);
EXPECT_EQ(OptU64.value(), ActualHighPCOffset);
} else {
EXPECT_FALSE((bool)OptU64);
}
OptU64 = SubprogramDieLowHighPC.getHighPC(ActualLowPC);
EXPECT_TRUE((bool)OptU64);
EXPECT_EQ(OptU64.getValue(), ActualHighPC);
EXPECT_EQ(OptU64.value(), ActualHighPC);
EXPECT_TRUE(SubprogramDieLowHighPC.getLowAndHighPC(LowPC, HighPC, SectionIndex));
EXPECT_EQ(LowPC, ActualLowPC);

View File

@ -79,16 +79,16 @@ TEST(DWARFFormValue, SignedConstantForms) {
auto Sign2 = createDataXFormValue<uint16_t>(DW_FORM_data2, -12345);
auto Sign4 = createDataXFormValue<uint32_t>(DW_FORM_data4, -123456789);
auto Sign8 = createDataXFormValue<uint64_t>(DW_FORM_data8, -1);
EXPECT_EQ(Sign1.getAsSignedConstant().getValue(), -123);
EXPECT_EQ(Sign2.getAsSignedConstant().getValue(), -12345);
EXPECT_EQ(Sign4.getAsSignedConstant().getValue(), -123456789);
EXPECT_EQ(Sign8.getAsSignedConstant().getValue(), -1);
EXPECT_EQ(Sign1.getAsSignedConstant().value(), -123);
EXPECT_EQ(Sign2.getAsSignedConstant().value(), -12345);
EXPECT_EQ(Sign4.getAsSignedConstant().value(), -123456789);
EXPECT_EQ(Sign8.getAsSignedConstant().value(), -1);
// Check that we can handle big positive values, but that we return
// an error just over the limit.
auto UMax = createULEBFormValue(LLONG_MAX);
auto TooBig = createULEBFormValue(uint64_t(LLONG_MAX) + 1);
EXPECT_EQ(UMax.getAsSignedConstant().getValue(), LLONG_MAX);
EXPECT_EQ(UMax.getAsSignedConstant().value(), LLONG_MAX);
EXPECT_EQ(TooBig.getAsSignedConstant().has_value(), false);
// Sanity check some other forms.
@ -100,14 +100,14 @@ TEST(DWARFFormValue, SignedConstantForms) {
auto LEBMax = createSLEBFormValue(LLONG_MAX);
auto LEB1 = createSLEBFormValue(-42);
auto LEB2 = createSLEBFormValue(42);
EXPECT_EQ(Data1.getAsSignedConstant().getValue(), 120);
EXPECT_EQ(Data2.getAsSignedConstant().getValue(), 32000);
EXPECT_EQ(Data4.getAsSignedConstant().getValue(), 2000000000);
EXPECT_EQ(Data8.getAsSignedConstant().getValue(), 0x1234567812345678LL);
EXPECT_EQ(LEBMin.getAsSignedConstant().getValue(), LLONG_MIN);
EXPECT_EQ(LEBMax.getAsSignedConstant().getValue(), LLONG_MAX);
EXPECT_EQ(LEB1.getAsSignedConstant().getValue(), -42);
EXPECT_EQ(LEB2.getAsSignedConstant().getValue(), 42);
EXPECT_EQ(Data1.getAsSignedConstant().value(), 120);
EXPECT_EQ(Data2.getAsSignedConstant().value(), 32000);
EXPECT_EQ(Data4.getAsSignedConstant().value(), 2000000000);
EXPECT_EQ(Data8.getAsSignedConstant().value(), 0x1234567812345678LL);
EXPECT_EQ(LEBMin.getAsSignedConstant().value(), LLONG_MIN);
EXPECT_EQ(LEBMax.getAsSignedConstant().value(), LLONG_MAX);
EXPECT_EQ(LEB1.getAsSignedConstant().value(), -42);
EXPECT_EQ(LEB2.getAsSignedConstant().value(), 42);
// Data16 is a little tricky.
char Cksum[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};

View File

@ -766,8 +766,8 @@ TEST_F(FileCheckTest, NumericVariable) {
ASSERT_TRUE(Value);
EXPECT_EQ(925, cantFail(Value->getSignedValue()));
// getStringValue should return the same memory not just the same characters.
EXPECT_EQ(StringValue.begin(), FooVar.getStringValue().getValue().begin());
EXPECT_EQ(StringValue.end(), FooVar.getStringValue().getValue().end());
EXPECT_EQ(StringValue.begin(), FooVar.getStringValue().value().begin());
EXPECT_EQ(StringValue.end(), FooVar.getStringValue().value().end());
EvalResult = FooVarUse.eval();
ASSERT_THAT_EXPECTED(EvalResult, Succeeded());
EXPECT_EQ(925, cantFail(EvalResult->getSignedValue()));

View File

@ -1045,52 +1045,51 @@ TEST_F(DILocationTest, cloneTemporary) {
}
TEST_F(DILocationTest, discriminatorEncoding) {
EXPECT_EQ(0U, DILocation::encodeDiscriminator(0, 0, 0).getValue());
EXPECT_EQ(0U, DILocation::encodeDiscriminator(0, 0, 0).value());
// Encode base discriminator as a component: lsb is 0, then the value.
// The other components are all absent, so we leave all the other bits 0.
EXPECT_EQ(2U, DILocation::encodeDiscriminator(1, 0, 0).getValue());
EXPECT_EQ(2U, DILocation::encodeDiscriminator(1, 0, 0).value());
// Base discriminator component is empty, so lsb is 1. Next component is not
// empty, so its lsb is 0, then its value (1). Next component is empty.
// So the bit pattern is 101.
EXPECT_EQ(5U, DILocation::encodeDiscriminator(0, 1, 0).getValue());
EXPECT_EQ(5U, DILocation::encodeDiscriminator(0, 1, 0).value());
// First 2 components are empty, so the bit pattern is 11. Then the
// next component - ending up with 1011.
EXPECT_EQ(0xbU, DILocation::encodeDiscriminator(0, 0, 1).getValue());
EXPECT_EQ(0xbU, DILocation::encodeDiscriminator(0, 0, 1).value());
// The bit pattern for the first 2 components is 11. The next bit is 0,
// because the last component is not empty. We have 29 bits usable for
// encoding, but we cap it at 12 bits uniformously for all components. We
// encode the last component over 14 bits.
EXPECT_EQ(0xfffbU, DILocation::encodeDiscriminator(0, 0, 0xfff).getValue());
EXPECT_EQ(0xfffbU, DILocation::encodeDiscriminator(0, 0, 0xfff).value());
EXPECT_EQ(0x102U, DILocation::encodeDiscriminator(1, 1, 0).getValue());
EXPECT_EQ(0x102U, DILocation::encodeDiscriminator(1, 1, 0).value());
EXPECT_EQ(0x13eU, DILocation::encodeDiscriminator(0x1f, 1, 0).getValue());
EXPECT_EQ(0x13eU, DILocation::encodeDiscriminator(0x1f, 1, 0).value());
EXPECT_EQ(0x87feU, DILocation::encodeDiscriminator(0x1ff, 1, 0).getValue());
EXPECT_EQ(0x87feU, DILocation::encodeDiscriminator(0x1ff, 1, 0).value());
EXPECT_EQ(0x1f3eU, DILocation::encodeDiscriminator(0x1f, 0x1f, 0).getValue());
EXPECT_EQ(0x1f3eU, DILocation::encodeDiscriminator(0x1f, 0x1f, 0).value());
EXPECT_EQ(0x3ff3eU,
DILocation::encodeDiscriminator(0x1f, 0x1ff, 0).getValue());
EXPECT_EQ(0x3ff3eU, DILocation::encodeDiscriminator(0x1f, 0x1ff, 0).value());
EXPECT_EQ(0x1ff87feU,
DILocation::encodeDiscriminator(0x1ff, 0x1ff, 0).getValue());
DILocation::encodeDiscriminator(0x1ff, 0x1ff, 0).value());
EXPECT_EQ(0xfff9f3eU,
DILocation::encodeDiscriminator(0x1f, 0x1f, 0xfff).getValue());
DILocation::encodeDiscriminator(0x1f, 0x1f, 0xfff).value());
EXPECT_EQ(0xffc3ff3eU,
DILocation::encodeDiscriminator(0x1f, 0x1ff, 0x1ff).getValue());
DILocation::encodeDiscriminator(0x1f, 0x1ff, 0x1ff).value());
EXPECT_EQ(0xffcf87feU,
DILocation::encodeDiscriminator(0x1ff, 0x1f, 0x1ff).getValue());
DILocation::encodeDiscriminator(0x1ff, 0x1f, 0x1ff).value());
EXPECT_EQ(0xe1ff87feU,
DILocation::encodeDiscriminator(0x1ff, 0x1ff, 7).getValue());
DILocation::encodeDiscriminator(0x1ff, 0x1ff, 7).value());
}
TEST_F(DILocationTest, discriminatorEncodingNegativeTests) {
@ -1113,36 +1112,36 @@ TEST_F(DILocationTest, discriminatorSpecialCases) {
EXPECT_EQ(0U, L1->getBaseDiscriminator());
EXPECT_EQ(1U, L1->getDuplicationFactor());
EXPECT_EQ(L1, L1->cloneWithBaseDiscriminator(0).getValue());
EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(0).getValue());
EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(1).getValue());
EXPECT_EQ(L1, L1->cloneWithBaseDiscriminator(0).value());
EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(0).value());
EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(1).value());
auto L2 = L1->cloneWithBaseDiscriminator(1).getValue();
auto L2 = L1->cloneWithBaseDiscriminator(1).value();
EXPECT_EQ(0U, L1->getBaseDiscriminator());
EXPECT_EQ(1U, L1->getDuplicationFactor());
EXPECT_EQ(1U, L2->getBaseDiscriminator());
EXPECT_EQ(1U, L2->getDuplicationFactor());
auto L3 = L2->cloneByMultiplyingDuplicationFactor(2).getValue();
auto L3 = L2->cloneByMultiplyingDuplicationFactor(2).value();
EXPECT_EQ(1U, L3->getBaseDiscriminator());
EXPECT_EQ(2U, L3->getDuplicationFactor());
EXPECT_EQ(L2, L2->cloneByMultiplyingDuplicationFactor(1).getValue());
EXPECT_EQ(L2, L2->cloneByMultiplyingDuplicationFactor(1).value());
auto L4 = L3->cloneByMultiplyingDuplicationFactor(4).getValue();
auto L4 = L3->cloneByMultiplyingDuplicationFactor(4).value();
EXPECT_EQ(1U, L4->getBaseDiscriminator());
EXPECT_EQ(8U, L4->getDuplicationFactor());
auto L5 = L4->cloneWithBaseDiscriminator(2).getValue();
auto L5 = L4->cloneWithBaseDiscriminator(2).value();
EXPECT_EQ(2U, L5->getBaseDiscriminator());
EXPECT_EQ(8U, L5->getDuplicationFactor());
// Check extreme cases
auto L6 = L1->cloneWithBaseDiscriminator(0xfff).getValue();
auto L6 = L1->cloneWithBaseDiscriminator(0xfff).value();
EXPECT_EQ(0xfffU, L6->getBaseDiscriminator());
EXPECT_EQ(0xfffU, L6->cloneByMultiplyingDuplicationFactor(0xfff)
.getValue()
.value()
->getDuplicationFactor());
// Check we return None for unencodable cases.

Some files were not shown because too many files have changed in this diff Show More