[Target] llvm::Optional => std::optional
The updated functions are mostly internal with a few exceptions (virtual functions in TargetInstrInfo.h, TargetRegisterInfo.h). To minimize changes to LLVMCodeGen, GlobalISel files are skipped. https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716
This commit is contained in:
parent
7f773ce6fc
commit
b0df70403d
|
@ -21,17 +21,6 @@
|
|||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
template <typename T, typename = decltype(std::declval<raw_ostream &>()
|
||||
<< std::declval<const T &>())>
|
||||
raw_ostream &operator<<(raw_ostream &OS, const std::optional<T> &O) {
|
||||
if (O)
|
||||
OS << *O;
|
||||
else
|
||||
OS << std::nullopt;
|
||||
return OS;
|
||||
}
|
||||
|
||||
namespace bolt {
|
||||
|
||||
// NOTE: using SmallVector for instruction list results in a memory regression.
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "llvm/Support/type_traits.h"
|
||||
#include <cassert>
|
||||
#include <new>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace llvm {
|
||||
|
@ -490,6 +491,16 @@ raw_ostream &operator<<(raw_ostream &OS, const Optional<T> &O) {
|
|||
return OS;
|
||||
}
|
||||
|
||||
template <typename T, typename = decltype(std::declval<raw_ostream &>()
|
||||
<< std::declval<const T &>())>
|
||||
raw_ostream &operator<<(raw_ostream &OS, const std::optional<T> &O) {
|
||||
if (O)
|
||||
OS << *O;
|
||||
else
|
||||
OS << std::nullopt;
|
||||
return OS;
|
||||
}
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_ADT_OPTIONAL_H
|
||||
|
|
|
@ -754,7 +754,7 @@ public:
|
|||
///
|
||||
/// Note: This hook is guaranteed to be called from the innermost to the
|
||||
/// outermost prologue of the loop being software pipelined.
|
||||
virtual Optional<bool>
|
||||
virtual std::optional<bool>
|
||||
createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond) = 0;
|
||||
|
||||
|
@ -1009,7 +1009,7 @@ protected:
|
|||
/// If the specific machine instruction is a instruction that moves/copies
|
||||
/// value from one register to another register return destination and source
|
||||
/// registers as machine operands.
|
||||
virtual Optional<DestSourcePair>
|
||||
virtual std::optional<DestSourcePair>
|
||||
isCopyInstrImpl(const MachineInstr &MI) const {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
@ -1032,7 +1032,7 @@ public:
|
|||
/// For COPY-instruction the method naturally returns destination and source
|
||||
/// registers as machine operands, for all other instructions the method calls
|
||||
/// target-dependent implementation.
|
||||
Optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
|
||||
std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
|
||||
if (MI.isCopy()) {
|
||||
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
||||
}
|
||||
|
@ -1043,8 +1043,8 @@ public:
|
|||
/// immediate value and a physical register, and stores the result in
|
||||
/// the given physical register \c Reg, return a pair of the source
|
||||
/// register and the offset which has been added.
|
||||
virtual Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
@ -1380,7 +1380,7 @@ public:
|
|||
/// MachineInstr that is accessing memory. These values are returned as a
|
||||
/// struct ExtAddrMode which contains all relevant information to make up the
|
||||
/// address.
|
||||
virtual Optional<ExtAddrMode>
|
||||
virtual std::optional<ExtAddrMode>
|
||||
getAddrModeFromMemoryOp(const MachineInstr &MemI,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
return std::nullopt;
|
||||
|
@ -1984,8 +1984,8 @@ public:
|
|||
/// Produce the expression describing the \p MI loading a value into
|
||||
/// the physical register \p Reg. This hook should only be used with
|
||||
/// \p MIs belonging to VReg-less functions.
|
||||
virtual Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const;
|
||||
virtual std::optional<ParamLoadedValue>
|
||||
describeLoadedValue(const MachineInstr &MI, Register Reg) const;
|
||||
|
||||
/// Given the generic extension instruction \p ExtMI, returns true if this
|
||||
/// extension is a likely candidate for being folded into an another
|
||||
|
|
|
@ -531,7 +531,7 @@ public:
|
|||
/// The absence of an explanation does not mean that the register is not
|
||||
/// reserved (meaning, you should check that PhysReg is in fact reserved
|
||||
/// before calling this).
|
||||
virtual llvm::Optional<std::string>
|
||||
virtual std::optional<std::string>
|
||||
explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const {
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -405,7 +405,7 @@ void AsmPrinter::emitInlineAsm(const MachineInstr *MI) const {
|
|||
DiagnosticInfoInlineAsm(LocCookie, Note, DiagnosticSeverity::DS_Note));
|
||||
|
||||
for (const Register RR : RestrRegs) {
|
||||
if (llvm::Optional<std::string> reason =
|
||||
if (std::optional<std::string> reason =
|
||||
TRI->explainReservedReg(*MF, RR)) {
|
||||
MMI->getModule()->getContext().diagnose(DiagnosticInfoInlineAsm(
|
||||
LocCookie, *reason, DiagnosticSeverity::DS_Note));
|
||||
|
|
|
@ -88,14 +88,14 @@ static cl::opt<bool> MCPUseCopyInstr("mcp-use-is-copy-instr", cl::init(false),
|
|||
|
||||
namespace {
|
||||
|
||||
static Optional<DestSourcePair> isCopyInstr(const MachineInstr &MI,
|
||||
const TargetInstrInfo &TII,
|
||||
bool UseCopyInstr) {
|
||||
static std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI,
|
||||
const TargetInstrInfo &TII,
|
||||
bool UseCopyInstr) {
|
||||
if (UseCopyInstr)
|
||||
return TII.isCopyInstr(MI);
|
||||
|
||||
if (MI.isCopy())
|
||||
return Optional<DestSourcePair>(
|
||||
return std::optional<DestSourcePair>(
|
||||
DestSourcePair{MI.getOperand(0), MI.getOperand(1)});
|
||||
|
||||
return std::nullopt;
|
||||
|
@ -137,7 +137,7 @@ public:
|
|||
auto I = Copies.find(*RUI);
|
||||
if (I != Copies.end()) {
|
||||
if (MachineInstr *MI = I->second.MI) {
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*MI, TII, UseCopyInstr);
|
||||
assert(CopyOperands && "Expect copy");
|
||||
|
||||
|
@ -166,7 +166,7 @@ public:
|
|||
// When we clobber the destination of a copy, we need to clobber the
|
||||
// whole register it defined.
|
||||
if (MachineInstr *MI = I->second.MI) {
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*MI, TII, UseCopyInstr);
|
||||
markRegsUnavailable({CopyOperands->Destination->getReg().asMCReg()},
|
||||
TRI);
|
||||
|
@ -180,7 +180,8 @@ public:
|
|||
/// Add this copy's registers into the tracker's copy maps.
|
||||
void trackCopy(MachineInstr *MI, const TargetRegisterInfo &TRI,
|
||||
const TargetInstrInfo &TII, bool UseCopyInstr) {
|
||||
Optional<DestSourcePair> CopyOperands = isCopyInstr(*MI, TII, UseCopyInstr);
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*MI, TII, UseCopyInstr);
|
||||
assert(CopyOperands && "Tracking non-copy?");
|
||||
|
||||
MCRegister Src = CopyOperands->Source->getReg().asMCReg();
|
||||
|
@ -236,7 +237,7 @@ public:
|
|||
if (!AvailCopy)
|
||||
return nullptr;
|
||||
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*AvailCopy, TII, UseCopyInstr);
|
||||
Register AvailSrc = CopyOperands->Source->getReg();
|
||||
Register AvailDef = CopyOperands->Destination->getReg();
|
||||
|
@ -266,7 +267,7 @@ public:
|
|||
if (!AvailCopy)
|
||||
return nullptr;
|
||||
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*AvailCopy, TII, UseCopyInstr);
|
||||
Register AvailSrc = CopyOperands->Source->getReg();
|
||||
Register AvailDef = CopyOperands->Destination->getReg();
|
||||
|
@ -383,7 +384,7 @@ static bool isNopCopy(const MachineInstr &PreviousCopy, MCRegister Src,
|
|||
MCRegister Def, const TargetRegisterInfo *TRI,
|
||||
const TargetInstrInfo *TII, bool UseCopyInstr) {
|
||||
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(PreviousCopy, *TII, UseCopyInstr);
|
||||
MCRegister PreviousSrc = CopyOperands->Source->getReg().asMCReg();
|
||||
MCRegister PreviousDef = CopyOperands->Destination->getReg().asMCReg();
|
||||
|
@ -422,7 +423,8 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy,
|
|||
|
||||
// Copy was redundantly redefining either Src or Def. Remove earlier kill
|
||||
// flags between Copy and PrevCopy because the value will be reused now.
|
||||
Optional<DestSourcePair> CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr);
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(Copy, *TII, UseCopyInstr);
|
||||
assert(CopyOperands);
|
||||
|
||||
Register CopyDef = CopyOperands->Destination->getReg();
|
||||
|
@ -439,8 +441,8 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy,
|
|||
|
||||
bool MachineCopyPropagation::isBackwardPropagatableRegClassCopy(
|
||||
const MachineInstr &Copy, const MachineInstr &UseI, unsigned UseIdx) {
|
||||
|
||||
Optional<DestSourcePair> CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr);
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(Copy, *TII, UseCopyInstr);
|
||||
Register Def = CopyOperands->Destination->getReg();
|
||||
|
||||
if (const TargetRegisterClass *URC =
|
||||
|
@ -458,8 +460,8 @@ bool MachineCopyPropagation::isBackwardPropagatableRegClassCopy(
|
|||
bool MachineCopyPropagation::isForwardableRegClassCopy(const MachineInstr &Copy,
|
||||
const MachineInstr &UseI,
|
||||
unsigned UseIdx) {
|
||||
|
||||
Optional<DestSourcePair> CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr);
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(Copy, *TII, UseCopyInstr);
|
||||
Register CopySrcReg = CopyOperands->Source->getReg();
|
||||
|
||||
// If the new register meets the opcode register constraints, then allow
|
||||
|
@ -587,7 +589,7 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
|
|||
if (!Copy)
|
||||
continue;
|
||||
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*Copy, *TII, UseCopyInstr);
|
||||
Register CopyDstReg = CopyOperands->Destination->getReg();
|
||||
const MachineOperand &CopySrc = *CopyOperands->Source;
|
||||
|
@ -654,7 +656,8 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
|
||||
for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
|
||||
// Analyze copies (which don't overlap themselves).
|
||||
Optional<DestSourcePair> CopyOperands = isCopyInstr(MI, *TII, UseCopyInstr);
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(MI, *TII, UseCopyInstr);
|
||||
if (CopyOperands) {
|
||||
|
||||
Register RegSrc = CopyOperands->Source->getReg();
|
||||
|
@ -777,7 +780,7 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
MaybeDeadCopies.begin();
|
||||
DI != MaybeDeadCopies.end();) {
|
||||
MachineInstr *MaybeDead = *DI;
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*MaybeDead, *TII, UseCopyInstr);
|
||||
MCRegister Reg = CopyOperands->Destination->getReg().asMCReg();
|
||||
assert(!MRI->isReserved(Reg));
|
||||
|
@ -816,7 +819,7 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
LLVM_DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
|
||||
MaybeDead->dump());
|
||||
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*MaybeDead, *TII, UseCopyInstr);
|
||||
assert(CopyOperands);
|
||||
|
||||
|
@ -845,7 +848,8 @@ static bool isBackwardPropagatableCopy(MachineInstr &MI,
|
|||
const MachineRegisterInfo &MRI,
|
||||
const TargetInstrInfo &TII,
|
||||
bool UseCopyInstr) {
|
||||
Optional<DestSourcePair> CopyOperands = isCopyInstr(MI, TII, UseCopyInstr);
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(MI, TII, UseCopyInstr);
|
||||
assert(CopyOperands && "MI is expected to be a COPY");
|
||||
|
||||
Register Def = CopyOperands->Destination->getReg();
|
||||
|
@ -887,7 +891,7 @@ void MachineCopyPropagation::propagateDefs(MachineInstr &MI) {
|
|||
if (!Copy)
|
||||
continue;
|
||||
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*Copy, *TII, UseCopyInstr);
|
||||
Register Def = CopyOperands->Destination->getReg();
|
||||
Register Src = CopyOperands->Source->getReg();
|
||||
|
@ -925,7 +929,8 @@ void MachineCopyPropagation::BackwardCopyPropagateBlock(
|
|||
|
||||
for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) {
|
||||
// Ignore non-trivial COPYs.
|
||||
Optional<DestSourcePair> CopyOperands = isCopyInstr(MI, *TII, UseCopyInstr);
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(MI, *TII, UseCopyInstr);
|
||||
if (CopyOperands && MI.getNumOperands() == 2) {
|
||||
Register DefReg = CopyOperands->Destination->getReg();
|
||||
Register SrcReg = CopyOperands->Source->getReg();
|
||||
|
@ -986,8 +991,7 @@ void MachineCopyPropagation::BackwardCopyPropagateBlock(
|
|||
}
|
||||
|
||||
for (auto *Copy : MaybeDeadCopies) {
|
||||
|
||||
Optional<DestSourcePair> CopyOperands =
|
||||
std::optional<DestSourcePair> CopyOperands =
|
||||
isCopyInstr(*Copy, *TII, UseCopyInstr);
|
||||
Register Src = CopyOperands->Source->getReg();
|
||||
Register Def = CopyOperands->Destination->getReg();
|
||||
|
|
|
@ -880,7 +880,7 @@ void ModuloScheduleExpander::addBranches(MachineBasicBlock &PreheaderBB,
|
|||
MachineBasicBlock *Epilog = EpilogBBs[i];
|
||||
|
||||
SmallVector<MachineOperand, 4> Cond;
|
||||
Optional<bool> StaticallyGreater =
|
||||
std::optional<bool> StaticallyGreater =
|
||||
LoopInfo->createTripCountGreaterCondition(j + 1, *Prolog, Cond);
|
||||
unsigned numAdded = 0;
|
||||
if (!StaticallyGreater) {
|
||||
|
@ -1963,7 +1963,7 @@ void PeelingModuloScheduleExpander::fixupBranches() {
|
|||
MachineBasicBlock *Epilog = *EI;
|
||||
SmallVector<MachineOperand, 4> Cond;
|
||||
TII->removeBranch(*Prolog);
|
||||
Optional<bool> StaticallyGreater =
|
||||
std::optional<bool> StaticallyGreater =
|
||||
LoopInfo->createTripCountGreaterCondition(TC, *Prolog, Cond);
|
||||
if (!StaticallyGreater) {
|
||||
LLVM_DEBUG(dbgs() << "Dynamic: TC > " << TC << "\n");
|
||||
|
|
|
@ -1171,7 +1171,7 @@ bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
|
|||
return (DefCycle != -1 && DefCycle <= 1);
|
||||
}
|
||||
|
||||
Optional<ParamLoadedValue>
|
||||
std::optional<ParamLoadedValue>
|
||||
TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
const MachineFunction *MF = MI.getMF();
|
||||
|
|
|
@ -255,7 +255,7 @@ void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
|
|||
|
||||
void AArch64AsmPrinter::emitFunctionHeaderComment() {
|
||||
const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
|
||||
Optional<std::string> OutlinerString = FI->getOutliningStyle();
|
||||
std::optional<std::string> OutlinerString = FI->getOutliningStyle();
|
||||
if (OutlinerString != std::nullopt)
|
||||
OutStreamer->getCommentOS() << ' ' << OutlinerString;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ class AArch64CompressJumpTables : public MachineFunctionPass {
|
|||
|
||||
/// Returns the size in instructions of the block \p MBB, or None if we
|
||||
/// couldn't get a safe upper bound.
|
||||
Optional<int> computeBlockSize(MachineBasicBlock &MBB);
|
||||
std::optional<int> computeBlockSize(MachineBasicBlock &MBB);
|
||||
|
||||
/// Gather information about the function, returns false if we can't perform
|
||||
/// this optimization for some reason.
|
||||
|
@ -69,7 +69,7 @@ char AArch64CompressJumpTables::ID = 0;
|
|||
INITIALIZE_PASS(AArch64CompressJumpTables, DEBUG_TYPE,
|
||||
"AArch64 compress jump tables pass", false, false)
|
||||
|
||||
Optional<int>
|
||||
std::optional<int>
|
||||
AArch64CompressJumpTables::computeBlockSize(MachineBasicBlock &MBB) {
|
||||
int Size = 0;
|
||||
for (const MachineInstr &MI : MBB) {
|
||||
|
|
|
@ -234,7 +234,7 @@ static unsigned makeTag(unsigned Dest, unsigned Base, unsigned Offset) {
|
|||
return (Dest & 0xf) | ((Base & 0xf) << 4) | ((Offset & 0x3f) << 8);
|
||||
}
|
||||
|
||||
static Optional<LoadInfo> getLoadInfo(const MachineInstr &MI) {
|
||||
static std::optional<LoadInfo> getLoadInfo(const MachineInstr &MI) {
|
||||
int DestRegIdx;
|
||||
int BaseRegIdx;
|
||||
int OffsetIdx;
|
||||
|
@ -656,8 +656,9 @@ static Optional<LoadInfo> getLoadInfo(const MachineInstr &MI) {
|
|||
return LI;
|
||||
}
|
||||
|
||||
static Optional<unsigned> getTag(const TargetRegisterInfo *TRI,
|
||||
const MachineInstr &MI, const LoadInfo &LI) {
|
||||
static std::optional<unsigned> getTag(const TargetRegisterInfo *TRI,
|
||||
const MachineInstr &MI,
|
||||
const LoadInfo &LI) {
|
||||
unsigned Dest = LI.DestReg ? TRI->getEncodingValue(LI.DestReg) : 0;
|
||||
unsigned Base = TRI->getEncodingValue(LI.BaseReg);
|
||||
unsigned Off;
|
||||
|
@ -679,10 +680,10 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) {
|
|||
TagMap.clear();
|
||||
for (MachineBasicBlock *MBB : L.getBlocks())
|
||||
for (MachineInstr &MI : *MBB) {
|
||||
Optional<LoadInfo> LInfo = getLoadInfo(MI);
|
||||
std::optional<LoadInfo> LInfo = getLoadInfo(MI);
|
||||
if (!LInfo)
|
||||
continue;
|
||||
Optional<unsigned> Tag = getTag(TRI, MI, *LInfo);
|
||||
std::optional<unsigned> Tag = getTag(TRI, MI, *LInfo);
|
||||
if (!Tag)
|
||||
continue;
|
||||
TagMap[*Tag].push_back(&MI);
|
||||
|
@ -719,11 +720,11 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) {
|
|||
if (!TII->isStridedAccess(MI))
|
||||
continue;
|
||||
|
||||
Optional<LoadInfo> OptLdI = getLoadInfo(MI);
|
||||
std::optional<LoadInfo> OptLdI = getLoadInfo(MI);
|
||||
if (!OptLdI)
|
||||
continue;
|
||||
LoadInfo LdI = *OptLdI;
|
||||
Optional<unsigned> OptOldTag = getTag(TRI, MI, LdI);
|
||||
std::optional<unsigned> OptOldTag = getTag(TRI, MI, LdI);
|
||||
if (!OptOldTag)
|
||||
continue;
|
||||
auto &OldCollisions = TagMap[*OptOldTag];
|
||||
|
|
|
@ -1475,7 +1475,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
|
|||
|
||||
// Set tagged base pointer to the requested stack slot.
|
||||
// Ideally it should match SP value after prologue.
|
||||
Optional<int> TBPI = AFI->getTaggedBasePointerIndex();
|
||||
std::optional<int> TBPI = AFI->getTaggedBasePointerIndex();
|
||||
if (TBPI)
|
||||
AFI->setTaggedBasePointerOffset(-MFI.getObjectOffset(*TBPI));
|
||||
else
|
||||
|
@ -3933,7 +3933,7 @@ void AArch64FrameLowering::orderFrameObjects(
|
|||
// and save one instruction when generating the base pointer because IRG does
|
||||
// not allow an immediate offset.
|
||||
const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
|
||||
Optional<int> TBPI = AFI.getTaggedBasePointerIndex();
|
||||
std::optional<int> TBPI = AFI.getTaggedBasePointerIndex();
|
||||
if (TBPI) {
|
||||
FrameObjects[*TBPI].ObjectFirst = true;
|
||||
FrameObjects[*TBPI].GroupFirst = true;
|
||||
|
|
|
@ -4333,7 +4333,8 @@ static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
|
|||
|
||||
// Returns lane if Op extracts from a two-element vector and lane is constant
|
||||
// (i.e., extractelt(<2 x Ty> %v, ConstantLane)), and None otherwise.
|
||||
static Optional<uint64_t> getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {
|
||||
static std::optional<uint64_t>
|
||||
getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {
|
||||
SDNode *OpNode = Op.getNode();
|
||||
if (OpNode->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
|
||||
return std::nullopt;
|
||||
|
@ -4656,7 +4657,7 @@ SDValue AArch64TargetLowering::getPStateSM(SelectionDAG &DAG, SDValue Chain,
|
|||
Mask);
|
||||
}
|
||||
|
||||
static Optional<SMEAttrs> getCalleeAttrsFromExternalFunction(SDValue V) {
|
||||
static std::optional<SMEAttrs> getCalleeAttrsFromExternalFunction(SDValue V) {
|
||||
if (auto *ES = dyn_cast<ExternalSymbolSDNode>(V)) {
|
||||
StringRef S(ES->getSymbol());
|
||||
if (S == "__arm_sme_state" || S == "__arm_tpidr2_save")
|
||||
|
@ -4739,8 +4740,10 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
|||
SDValue LHS = Op.getOperand(1);
|
||||
SDValue RHS = Op.getOperand(2);
|
||||
|
||||
Optional<uint64_t> LHSLane = getConstantLaneNumOfExtractHalfOperand(LHS);
|
||||
Optional<uint64_t> RHSLane = getConstantLaneNumOfExtractHalfOperand(RHS);
|
||||
std::optional<uint64_t> LHSLane =
|
||||
getConstantLaneNumOfExtractHalfOperand(LHS);
|
||||
std::optional<uint64_t> RHSLane =
|
||||
getConstantLaneNumOfExtractHalfOperand(RHS);
|
||||
|
||||
assert((!LHSLane || *LHSLane < 2) && "Expect lane to be None or 0 or 1");
|
||||
assert((!RHSLane || *RHSLane < 2) && "Expect lane to be None or 0 or 1");
|
||||
|
@ -4749,9 +4752,10 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
|||
// instructions execute on SIMD registers. So canonicalize i64 to v1i64,
|
||||
// which ISel recognizes better. For example, generate a ldr into d*
|
||||
// registers as opposed to a GPR load followed by a fmov.
|
||||
auto TryVectorizeOperand =
|
||||
[](SDValue N, Optional<uint64_t> NLane, Optional<uint64_t> OtherLane,
|
||||
const SDLoc &dl, SelectionDAG &DAG) -> SDValue {
|
||||
auto TryVectorizeOperand = [](SDValue N, std::optional<uint64_t> NLane,
|
||||
std::optional<uint64_t> OtherLane,
|
||||
const SDLoc &dl,
|
||||
SelectionDAG &DAG) -> SDValue {
|
||||
// If the operand is an higher half itself, rewrite it to
|
||||
// extract_high_v2i64; this way aarch64_neon_pmull64 could
|
||||
// re-use the dag-combiner function with aarch64_neon_{pmull,smull,umull}.
|
||||
|
@ -4822,7 +4826,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
|||
unsigned ElementSize = 128 / Op.getValueType().getVectorMinNumElements();
|
||||
unsigned NumActiveElems =
|
||||
Op.getConstantOperandVal(2) - Op.getConstantOperandVal(1);
|
||||
Optional<unsigned> PredPattern =
|
||||
std::optional<unsigned> PredPattern =
|
||||
getSVEPredPatternFromNumElements(NumActiveElems);
|
||||
if ((PredPattern != std::nullopt) &&
|
||||
NumActiveElems <= (MinSVEVectorSize / ElementSize))
|
||||
|
@ -7055,7 +7059,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
SMEAttrs CalleeAttrs, CallerAttrs(MF.getFunction());
|
||||
if (CLI.CB)
|
||||
CalleeAttrs = SMEAttrs(*CLI.CB);
|
||||
else if (Optional<SMEAttrs> Attrs =
|
||||
else if (std::optional<SMEAttrs> Attrs =
|
||||
getCalleeAttrsFromExternalFunction(CLI.Callee))
|
||||
CalleeAttrs = *Attrs;
|
||||
|
||||
|
@ -7084,7 +7088,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
}
|
||||
|
||||
SDValue PStateSM;
|
||||
Optional<bool> RequiresSMChange = CallerAttrs.requiresSMChange(CalleeAttrs);
|
||||
std::optional<bool> RequiresSMChange =
|
||||
CallerAttrs.requiresSMChange(CalleeAttrs);
|
||||
if (RequiresSMChange)
|
||||
PStateSM = getPStateSM(DAG, Chain, CallerAttrs, DL, MVT::i64);
|
||||
|
||||
|
@ -9011,7 +9016,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
|
|||
// splice predicate. However, we can only do this if we can guarantee that
|
||||
// there are enough elements in the vector, hence we check the index <= min
|
||||
// number of elements.
|
||||
Optional<unsigned> PredPattern;
|
||||
std::optional<unsigned> PredPattern;
|
||||
if (Ty.isScalableVector() && IdxVal < 0 &&
|
||||
(PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
|
||||
std::nullopt) {
|
||||
|
@ -12561,7 +12566,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
|
|||
if (Vec0.isUndef())
|
||||
return Op;
|
||||
|
||||
Optional<unsigned> PredPattern =
|
||||
std::optional<unsigned> PredPattern =
|
||||
getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
|
||||
auto PredTy = VT.changeVectorElementType(MVT::i1);
|
||||
SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
|
||||
|
@ -14112,7 +14117,7 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
|
|||
|
||||
Value *PTrue = nullptr;
|
||||
if (UseScalable) {
|
||||
Optional<unsigned> PgPattern =
|
||||
std::optional<unsigned> PgPattern =
|
||||
getSVEPredPatternFromNumElements(FVTy->getNumElements());
|
||||
if (Subtarget->getMinSVEVectorSizeInBits() ==
|
||||
Subtarget->getMaxSVEVectorSizeInBits() &&
|
||||
|
@ -14304,7 +14309,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
|
|||
|
||||
Value *PTrue = nullptr;
|
||||
if (UseScalable) {
|
||||
Optional<unsigned> PgPattern =
|
||||
std::optional<unsigned> PgPattern =
|
||||
getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
|
||||
if (Subtarget->getMinSVEVectorSizeInBits() ==
|
||||
Subtarget->getMaxSVEVectorSizeInBits() &&
|
||||
|
@ -22274,7 +22279,7 @@ static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
|
|||
DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
|
||||
"Expected legal fixed length vector!");
|
||||
|
||||
Optional<unsigned> PgPattern =
|
||||
std::optional<unsigned> PgPattern =
|
||||
getSVEPredPatternFromNumElements(VT.getVectorNumElements());
|
||||
assert(PgPattern && "Unexpected element count for SVE predicate");
|
||||
|
||||
|
|
|
@ -1646,7 +1646,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
|
|||
/// \returns None otherwise.
|
||||
///
|
||||
/// Collect instructions using that flags in \p CCUseInstrs if provided.
|
||||
Optional<UsedNZCV>
|
||||
std::optional<UsedNZCV>
|
||||
llvm::examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
|
||||
const TargetRegisterInfo &TRI,
|
||||
SmallVectorImpl<MachineInstr *> *CCUseInstrs) {
|
||||
|
@ -1701,7 +1701,7 @@ static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
|
|||
if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
|
||||
return false;
|
||||
|
||||
Optional<UsedNZCV> NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI);
|
||||
std::optional<UsedNZCV> NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI);
|
||||
if (!NZVCUsed || NZVCUsed->C || NZVCUsed->V)
|
||||
return false;
|
||||
|
||||
|
@ -1787,7 +1787,7 @@ static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
|
|||
if (MIUsedNZCV.C || MIUsedNZCV.V)
|
||||
return false;
|
||||
|
||||
Optional<UsedNZCV> NZCVUsedAfterCmp =
|
||||
std::optional<UsedNZCV> NZCVUsedAfterCmp =
|
||||
examineCFlagsUse(MI, CmpInstr, TRI, &CCUseInstrs);
|
||||
// Condition flags are not used in CmpInstr basic block successors and only
|
||||
// Z or N flags allowed to be used after CmpInstr within its basic block
|
||||
|
@ -2214,7 +2214,7 @@ bool AArch64InstrInfo::hasUnscaledLdStOffset(unsigned Opc) {
|
|||
}
|
||||
}
|
||||
|
||||
Optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {
|
||||
std::optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {
|
||||
switch (Opc) {
|
||||
default: return {};
|
||||
case AArch64::PRFMui: return AArch64::PRFUMi;
|
||||
|
@ -2594,7 +2594,7 @@ bool AArch64InstrInfo::getMemOperandsWithOffsetWidth(
|
|||
return true;
|
||||
}
|
||||
|
||||
Optional<ExtAddrMode>
|
||||
std::optional<ExtAddrMode>
|
||||
AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
const MachineOperand *Base; // Filled with the base operand of MI.
|
||||
|
@ -4699,7 +4699,7 @@ int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI,
|
|||
// If the offset doesn't match the scale, we rewrite the instruction to
|
||||
// use the unscaled instruction instead. Likewise, if we have a negative
|
||||
// offset and there is an unscaled op to use.
|
||||
Optional<unsigned> UnscaledOp =
|
||||
std::optional<unsigned> UnscaledOp =
|
||||
AArch64InstrInfo::getUnscaledLdSt(MI.getOpcode());
|
||||
bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0);
|
||||
if (useUnscaledOp &&
|
||||
|
@ -8080,7 +8080,7 @@ bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
|
|||
return MF.getFunction().hasMinSize();
|
||||
}
|
||||
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
||||
|
||||
// AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg
|
||||
|
@ -8100,8 +8100,8 @@ AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
|||
return std::nullopt;
|
||||
}
|
||||
|
||||
Optional<RegImmPair> AArch64InstrInfo::isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
std::optional<RegImmPair>
|
||||
AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const {
|
||||
int Sign = 1;
|
||||
int64_t Offset = 0;
|
||||
|
||||
|
@ -8139,7 +8139,7 @@ Optional<RegImmPair> AArch64InstrInfo::isAddImmediate(const MachineInstr &MI,
|
|||
/// If the given ORR instruction is a copy, and \p DescribedReg overlaps with
|
||||
/// the destination register then, if possible, describe the value in terms of
|
||||
/// the source register.
|
||||
static Optional<ParamLoadedValue>
|
||||
static std::optional<ParamLoadedValue>
|
||||
describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg,
|
||||
const TargetInstrInfo *TII,
|
||||
const TargetRegisterInfo *TRI) {
|
||||
|
@ -8174,7 +8174,7 @@ describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg,
|
|||
return std::nullopt;
|
||||
}
|
||||
|
||||
Optional<ParamLoadedValue>
|
||||
std::optional<ParamLoadedValue>
|
||||
AArch64InstrInfo::describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
const MachineFunction *MF = MI.getMF();
|
||||
|
|
|
@ -85,7 +85,7 @@ public:
|
|||
|
||||
/// Returns the unscaled load/store for the scaled load/store opcode,
|
||||
/// if there is a corresponding unscaled variant available.
|
||||
static Optional<unsigned> getUnscaledLdSt(unsigned Opc);
|
||||
static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
|
||||
|
||||
/// Scaling factor for (scaled or unscaled) load or store.
|
||||
static int getMemScale(unsigned Opc);
|
||||
|
@ -133,7 +133,7 @@ public:
|
|||
/// Hint that pairing the given load or store is unprofitable.
|
||||
static void suppressLdStPair(MachineInstr &MI);
|
||||
|
||||
Optional<ExtAddrMode>
|
||||
std::optional<ExtAddrMode>
|
||||
getAddrModeFromMemoryOp(const MachineInstr &MemI,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
|
@ -314,11 +314,11 @@ public:
|
|||
/// on Windows.
|
||||
static bool isSEHInstruction(const MachineInstr &MI);
|
||||
|
||||
Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
|
||||
Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
std::optional<ParamLoadedValue>
|
||||
describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
|
||||
|
||||
unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override;
|
||||
|
||||
|
@ -339,7 +339,7 @@ protected:
|
|||
/// If the specific machine instruction is an instruction that moves/copies
|
||||
/// value from one register to another register return destination and source
|
||||
/// registers as machine operands.
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
isCopyInstrImpl(const MachineInstr &MI) const override;
|
||||
|
||||
private:
|
||||
|
@ -392,7 +392,7 @@ struct UsedNZCV {
|
|||
/// \returns None otherwise.
|
||||
///
|
||||
/// Collect instructions using that flags in \p CCUseInstrs if provided.
|
||||
Optional<UsedNZCV>
|
||||
std::optional<UsedNZCV>
|
||||
examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
|
||||
const TargetRegisterInfo &TRI,
|
||||
SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
|
||||
|
|
|
@ -99,7 +99,7 @@ using LdStPairFlags = struct LdStPairFlags {
|
|||
// If not none, RenameReg can be used to rename the result register of the
|
||||
// first store in a pair. Currently this only works when merging stores
|
||||
// forward.
|
||||
Optional<MCPhysReg> RenameReg = std::nullopt;
|
||||
std::optional<MCPhysReg> RenameReg = std::nullopt;
|
||||
|
||||
LdStPairFlags() = default;
|
||||
|
||||
|
@ -111,7 +111,7 @@ using LdStPairFlags = struct LdStPairFlags {
|
|||
|
||||
void setRenameReg(MCPhysReg R) { RenameReg = R; }
|
||||
void clearRenameReg() { RenameReg = std::nullopt; }
|
||||
Optional<MCPhysReg> getRenameReg() const { return RenameReg; }
|
||||
std::optional<MCPhysReg> getRenameReg() const { return RenameReg; }
|
||||
};
|
||||
|
||||
struct AArch64LoadStoreOpt : public MachineFunctionPass {
|
||||
|
@ -846,7 +846,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
|
|||
|
||||
bool MergeForward = Flags.getMergeForward();
|
||||
|
||||
Optional<MCPhysReg> RenameReg = Flags.getRenameReg();
|
||||
std::optional<MCPhysReg> RenameReg = Flags.getRenameReg();
|
||||
if (MergeForward && RenameReg) {
|
||||
MCRegister RegToRename = getLdStRegOp(*I).getReg();
|
||||
DefinedInBB.addReg(*RenameReg);
|
||||
|
@ -1470,7 +1470,7 @@ canRenameUpToDef(MachineInstr &FirstMI, LiveRegUnits &UsedInBetween,
|
|||
// * not used in \p UsedInBetween; UsedInBetween must contain all accessed
|
||||
// registers in the range the rename register will be used,
|
||||
// * is available in all used register classes (checked using RequiredClasses).
|
||||
static Optional<MCPhysReg> tryToFindRegisterToRename(
|
||||
static std::optional<MCPhysReg> tryToFindRegisterToRename(
|
||||
const MachineFunction &MF, Register Reg, LiveRegUnits &DefinedInBB,
|
||||
LiveRegUnits &UsedInBetween,
|
||||
SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
|
||||
|
@ -1719,9 +1719,10 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
|
|||
RequiredClasses, TRI)};
|
||||
|
||||
if (*MaybeCanRename) {
|
||||
Optional<MCPhysReg> MaybeRenameReg = tryToFindRegisterToRename(
|
||||
*FirstMI.getParent()->getParent(), Reg, DefinedInBB,
|
||||
UsedInBetween, RequiredClasses, TRI);
|
||||
std::optional<MCPhysReg> MaybeRenameReg =
|
||||
tryToFindRegisterToRename(*FirstMI.getParent()->getParent(),
|
||||
Reg, DefinedInBB, UsedInBetween,
|
||||
RequiredClasses, TRI);
|
||||
if (MaybeRenameReg) {
|
||||
Flags.setRenameReg(*MaybeRenameReg);
|
||||
Flags.setMergeForward(true);
|
||||
|
|
|
@ -65,7 +65,7 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass {
|
|||
using OpcodePair = std::pair<unsigned, unsigned>;
|
||||
template <typename T>
|
||||
using SplitAndOpcFunc =
|
||||
std::function<Optional<OpcodePair>(T, unsigned, T &, T &)>;
|
||||
std::function<std::optional<OpcodePair>(T, unsigned, T &, T &)>;
|
||||
using BuildMIFunc =
|
||||
std::function<void(MachineInstr &, OpcodePair, unsigned, unsigned,
|
||||
Register, Register, Register)>;
|
||||
|
@ -173,7 +173,8 @@ bool AArch64MIPeepholeOpt::visitAND(
|
|||
|
||||
return splitTwoPartImm<T>(
|
||||
MI,
|
||||
[Opc](T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<OpcodePair> {
|
||||
[Opc](T Imm, unsigned RegSize, T &Imm0,
|
||||
T &Imm1) -> std::optional<OpcodePair> {
|
||||
if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1))
|
||||
return std::make_pair(Opc, Opc);
|
||||
return std::nullopt;
|
||||
|
@ -337,7 +338,7 @@ bool AArch64MIPeepholeOpt::visitADDSUB(
|
|||
return splitTwoPartImm<T>(
|
||||
MI,
|
||||
[PosOpc, NegOpc](T Imm, unsigned RegSize, T &Imm0,
|
||||
T &Imm1) -> Optional<OpcodePair> {
|
||||
T &Imm1) -> std::optional<OpcodePair> {
|
||||
if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
|
||||
return std::make_pair(PosOpc, PosOpc);
|
||||
if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1))
|
||||
|
@ -367,8 +368,9 @@ bool AArch64MIPeepholeOpt::visitADDSSUBS(
|
|||
// that the condition code usages are only for Equal and Not Equal
|
||||
return splitTwoPartImm<T>(
|
||||
MI,
|
||||
[PosOpcs, NegOpcs, &MI, &TRI = TRI, &MRI = MRI](
|
||||
T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<OpcodePair> {
|
||||
[PosOpcs, NegOpcs, &MI, &TRI = TRI,
|
||||
&MRI = MRI](T Imm, unsigned RegSize, T &Imm0,
|
||||
T &Imm1) -> std::optional<OpcodePair> {
|
||||
OpcodePair OP;
|
||||
if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
|
||||
OP = PosOpcs;
|
||||
|
@ -379,7 +381,7 @@ bool AArch64MIPeepholeOpt::visitADDSSUBS(
|
|||
// Check conditional uses last since it is expensive for scanning
|
||||
// proceeding instructions
|
||||
MachineInstr &SrcMI = *MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
|
||||
Optional<UsedNZCV> NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI);
|
||||
std::optional<UsedNZCV> NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI);
|
||||
if (!NZCVUsed || NZCVUsed->C || NZCVUsed->V)
|
||||
return std::nullopt;
|
||||
return OP;
|
||||
|
|
|
@ -140,7 +140,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
|
|||
SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
|
||||
|
||||
/// FrameIndex for the tagged base pointer.
|
||||
Optional<int> TaggedBasePointerIndex;
|
||||
std::optional<int> TaggedBasePointerIndex;
|
||||
|
||||
/// Offset from SP-at-entry to the tagged base pointer.
|
||||
/// Tagged base pointer is set up to point to the first (lowest address)
|
||||
|
@ -149,7 +149,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
|
|||
|
||||
/// OutliningStyle denotes, if a function was outined, how it was outlined,
|
||||
/// e.g. Tail Call, Thunk, or Function if none apply.
|
||||
Optional<std::string> OutliningStyle;
|
||||
std::optional<std::string> OutliningStyle;
|
||||
|
||||
// Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
|
||||
// CalleeSavedStackSize) to the address of the frame record.
|
||||
|
@ -189,10 +189,10 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
|
|||
|
||||
|
||||
/// True if the function need unwind information.
|
||||
mutable Optional<bool> NeedsDwarfUnwindInfo;
|
||||
mutable std::optional<bool> NeedsDwarfUnwindInfo;
|
||||
|
||||
/// True if the function need asynchronous unwind information.
|
||||
mutable Optional<bool> NeedsAsyncDwarfUnwindInfo;
|
||||
mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
|
||||
|
||||
public:
|
||||
explicit AArch64FunctionInfo(MachineFunction &MF);
|
||||
|
@ -251,7 +251,9 @@ public:
|
|||
uint64_t getLocalStackSize() const { return LocalStackSize; }
|
||||
|
||||
void setOutliningStyle(std::string Style) { OutliningStyle = Style; }
|
||||
Optional<std::string> getOutliningStyle() const { return OutliningStyle; }
|
||||
std::optional<std::string> getOutliningStyle() const {
|
||||
return OutliningStyle;
|
||||
}
|
||||
|
||||
void setCalleeSavedStackSize(unsigned Size) {
|
||||
CalleeSavedStackSize = Size;
|
||||
|
@ -407,7 +409,7 @@ public:
|
|||
return ForwardedMustTailRegParms;
|
||||
}
|
||||
|
||||
Optional<int> getTaggedBasePointerIndex() const {
|
||||
std::optional<int> getTaggedBasePointerIndex() const {
|
||||
return TaggedBasePointerIndex;
|
||||
}
|
||||
void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
|
||||
|
|
|
@ -354,7 +354,7 @@ const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
|
|||
return CSR_AArch64_StackProbe_Windows_RegMask;
|
||||
}
|
||||
|
||||
llvm::Optional<std::string>
|
||||
std::optional<std::string>
|
||||
AArch64RegisterInfo::explainReservedReg(const MachineFunction &MF,
|
||||
MCRegister PhysReg) const {
|
||||
if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
|
||||
|
|
|
@ -94,7 +94,7 @@ public:
|
|||
|
||||
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const;
|
||||
BitVector getReservedRegs(const MachineFunction &MF) const override;
|
||||
llvm::Optional<std::string>
|
||||
std::optional<std::string>
|
||||
explainReservedReg(const MachineFunction &MF,
|
||||
MCRegister PhysReg) const override;
|
||||
bool isAsmClobberable(const MachineFunction &MF,
|
||||
|
|
|
@ -74,7 +74,7 @@ public:
|
|||
bool mayUseUncheckedLoadStore();
|
||||
void uncheckUsesOf(unsigned TaggedReg, int FI);
|
||||
void uncheckLoadsAndStores();
|
||||
Optional<int> findFirstSlotCandidate();
|
||||
std::optional<int> findFirstSlotCandidate();
|
||||
|
||||
bool runOnMachineFunction(MachineFunction &Func) override;
|
||||
StringRef getPassName() const override {
|
||||
|
@ -238,7 +238,7 @@ static bool isSlotPreAllocated(MachineFrameInfo *MFI, int FI) {
|
|||
// eliminates a vreg (by replacing it with direct uses of IRG, which is usually
|
||||
// live almost everywhere anyway), and therefore needs to happen before
|
||||
// regalloc.
|
||||
Optional<int> AArch64StackTaggingPreRA::findFirstSlotCandidate() {
|
||||
std::optional<int> AArch64StackTaggingPreRA::findFirstSlotCandidate() {
|
||||
// Find the best (FI, Tag) pair to pin to offset 0.
|
||||
// Looking at the possible uses of a tagged address, the advantage of pinning
|
||||
// is:
|
||||
|
@ -378,7 +378,7 @@ bool AArch64StackTaggingPreRA::runOnMachineFunction(MachineFunction &Func) {
|
|||
// Find a slot that is used with zero tag offset, like ADDG #fi, 0.
|
||||
// If the base tagged pointer is set up to the address of this slot,
|
||||
// the ADDG instruction can be eliminated.
|
||||
Optional<int> BaseSlot = findFirstSlotCandidate();
|
||||
std::optional<int> BaseSlot = findFirstSlotCandidate();
|
||||
if (BaseSlot)
|
||||
AFI->setTaggedBasePointerIndex(*BaseSlot);
|
||||
|
||||
|
|
|
@ -2581,8 +2581,8 @@ static unsigned MatchNeonVectorRegName(StringRef Name) {
|
|||
/// is a valid vector kind. Where the number of elements in a vector
|
||||
/// or the vector width is implicit or explicitly unknown (but still a
|
||||
/// valid suffix kind), 0 is used.
|
||||
static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
|
||||
RegKind VectorKind) {
|
||||
static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
|
||||
RegKind VectorKind) {
|
||||
std::pair<int, int> Res = {-1, -1};
|
||||
|
||||
switch (VectorKind) {
|
||||
|
@ -2633,7 +2633,7 @@ static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
|
|||
if (Res == std::make_pair(-1, -1))
|
||||
return std::nullopt;
|
||||
|
||||
return Optional<std::pair<int, int>>(Res);
|
||||
return std::optional<std::pair<int, int>>(Res);
|
||||
}
|
||||
|
||||
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
|
||||
|
@ -3026,19 +3026,19 @@ AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
|
|||
auto LookupByName = [](StringRef N) {
|
||||
if (IsSVEPrefetch) {
|
||||
if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
|
||||
return Optional<unsigned>(Res->Encoding);
|
||||
return std::optional<unsigned>(Res->Encoding);
|
||||
} else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
|
||||
return Optional<unsigned>(Res->Encoding);
|
||||
return Optional<unsigned>();
|
||||
return std::optional<unsigned>(Res->Encoding);
|
||||
return std::optional<unsigned>();
|
||||
};
|
||||
|
||||
auto LookupByEncoding = [](unsigned E) {
|
||||
if (IsSVEPrefetch) {
|
||||
if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
|
||||
return Optional<StringRef>(Res->Name);
|
||||
return std::optional<StringRef>(Res->Name);
|
||||
} else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
|
||||
return Optional<StringRef>(Res->Name);
|
||||
return Optional<StringRef>();
|
||||
return std::optional<StringRef>(Res->Name);
|
||||
return std::optional<StringRef>();
|
||||
};
|
||||
unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
|
||||
|
||||
|
@ -4326,7 +4326,7 @@ AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
|
|||
return MatchOperand_NoMatch;
|
||||
|
||||
StringRef Tail = Name.drop_front(DotPosition);
|
||||
const Optional<std::pair<int, int>> &KindRes =
|
||||
const std::optional<std::pair<int, int>> &KindRes =
|
||||
parseVectorKind(Tail, RegKind::Matrix);
|
||||
if (!KindRes) {
|
||||
TokError("Expected the register to be followed by element width suffix");
|
||||
|
|
|
@ -536,7 +536,7 @@ inline unsigned getNumElementsFromSVEPredPattern(unsigned Pattern) {
|
|||
}
|
||||
|
||||
/// Return specific VL predicate pattern based on the number of elements.
|
||||
inline Optional<unsigned>
|
||||
inline std::optional<unsigned>
|
||||
getSVEPredPatternFromNumElements(unsigned MinNumElts) {
|
||||
switch (MinNumElts) {
|
||||
default:
|
||||
|
@ -839,7 +839,7 @@ inline static StringRef AArch64PACKeyIDToString(AArch64PACKey::ID KeyID) {
|
|||
}
|
||||
|
||||
/// Return numeric key ID for 2-letter identifier string.
|
||||
inline static Optional<AArch64PACKey::ID>
|
||||
inline static std::optional<AArch64PACKey::ID>
|
||||
AArch64StringToPACKeyID(StringRef Name) {
|
||||
if (Name == "ia")
|
||||
return AArch64PACKey::IA;
|
||||
|
|
|
@ -49,13 +49,15 @@ SMEAttrs::SMEAttrs(const AttributeList &Attrs) {
|
|||
Bitmask |= ZA_Preserved;
|
||||
}
|
||||
|
||||
Optional<bool> SMEAttrs::requiresSMChange(const SMEAttrs &Callee,
|
||||
bool BodyOverridesInterface) const {
|
||||
std::optional<bool>
|
||||
SMEAttrs::requiresSMChange(const SMEAttrs &Callee,
|
||||
bool BodyOverridesInterface) const {
|
||||
// If the transition is not through a call (e.g. when considering inlining)
|
||||
// and Callee has a streaming body, then we can ignore the interface of
|
||||
// Callee.
|
||||
if (BodyOverridesInterface && Callee.hasStreamingBody()) {
|
||||
return hasStreamingInterfaceOrBody() ? std::nullopt : Optional<bool>(true);
|
||||
return hasStreamingInterfaceOrBody() ? std::nullopt
|
||||
: std::optional<bool>(true);
|
||||
}
|
||||
|
||||
if (Callee.hasStreamingCompatibleInterface())
|
||||
|
|
|
@ -68,8 +68,9 @@ public:
|
|||
/// interface. This can be useful when considering e.g. inlining, where we
|
||||
/// explicitly want the body to overrule the interface (because after inlining
|
||||
/// the interface is no longer relevant).
|
||||
Optional<bool> requiresSMChange(const SMEAttrs &Callee,
|
||||
bool BodyOverridesInterface = false) const;
|
||||
std::optional<bool>
|
||||
requiresSMChange(const SMEAttrs &Callee,
|
||||
bool BodyOverridesInterface = false) const;
|
||||
|
||||
// Interfaces to query PSTATE.ZA
|
||||
bool hasNewZAInterface() const { return Bitmask & ZA_New; }
|
||||
|
|
|
@ -1053,7 +1053,7 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
Mov->addRegisterKilled(SrcReg, TRI);
|
||||
}
|
||||
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
||||
// VMOVRRD is also a copy instruction but it requires
|
||||
// special way of handling. It is more complex copy version
|
||||
|
@ -1069,7 +1069,7 @@ ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
|||
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
||||
}
|
||||
|
||||
Optional<ParamLoadedValue>
|
||||
std::optional<ParamLoadedValue>
|
||||
ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
if (auto DstSrcPair = isCopyInstrImpl(MI)) {
|
||||
|
@ -5555,8 +5555,8 @@ ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
|
|||
return makeArrayRef(TargetFlags);
|
||||
}
|
||||
|
||||
Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
std::optional<RegImmPair>
|
||||
ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const {
|
||||
int Sign = 1;
|
||||
unsigned Opcode = MI.getOpcode();
|
||||
int64_t Offset = 0;
|
||||
|
@ -6815,7 +6815,7 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
Optional<bool> createTripCountGreaterCondition(
|
||||
std::optional<bool> createTripCountGreaterCondition(
|
||||
int TC, MachineBasicBlock &MBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond) override {
|
||||
|
||||
|
|
|
@ -104,13 +104,13 @@ protected:
|
|||
/// If the specific machine instruction is an instruction that moves/copies
|
||||
/// value from one register to another register return destination and source
|
||||
/// registers as machine operands.
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
isCopyInstrImpl(const MachineInstr &MI) const override;
|
||||
|
||||
/// Specialization of \ref TargetInstrInfo::describeLoadedValue, used to
|
||||
/// enhance debug entry value descriptions for ARM targets.
|
||||
Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
std::optional<ParamLoadedValue>
|
||||
describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
|
||||
|
||||
public:
|
||||
// Return whether the target has an explicit NOP encoding.
|
||||
|
@ -531,8 +531,8 @@ public:
|
|||
return MI.getOperand(3).getReg();
|
||||
}
|
||||
|
||||
Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
};
|
||||
|
||||
/// Get the operands corresponding to the given \p Pred value. By default, the
|
||||
|
|
|
@ -98,7 +98,7 @@ private:
|
|||
int computeScale(unsigned GEPElemSize, unsigned MemoryElemSize);
|
||||
// If the value is a constant, or derived from constants via additions
|
||||
// and multilications, return its numeric value
|
||||
Optional<int64_t> getIfConst(const Value *V);
|
||||
std::optional<int64_t> getIfConst(const Value *V);
|
||||
// If Inst is an add instruction, check whether one summand is a
|
||||
// constant. If so, scale this constant and return it together with
|
||||
// the other summand.
|
||||
|
@ -335,31 +335,31 @@ int MVEGatherScatterLowering::computeScale(unsigned GEPElemSize,
|
|||
return -1;
|
||||
}
|
||||
|
||||
Optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) {
|
||||
std::optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) {
|
||||
const Constant *C = dyn_cast<Constant>(V);
|
||||
if (C && C->getSplatValue())
|
||||
return Optional<int64_t>{C->getUniqueInteger().getSExtValue()};
|
||||
return std::optional<int64_t>{C->getUniqueInteger().getSExtValue()};
|
||||
if (!isa<Instruction>(V))
|
||||
return Optional<int64_t>{};
|
||||
return std::optional<int64_t>{};
|
||||
|
||||
const Instruction *I = cast<Instruction>(V);
|
||||
if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Or ||
|
||||
I->getOpcode() == Instruction::Mul ||
|
||||
I->getOpcode() == Instruction::Shl) {
|
||||
Optional<int64_t> Op0 = getIfConst(I->getOperand(0));
|
||||
Optional<int64_t> Op1 = getIfConst(I->getOperand(1));
|
||||
std::optional<int64_t> Op0 = getIfConst(I->getOperand(0));
|
||||
std::optional<int64_t> Op1 = getIfConst(I->getOperand(1));
|
||||
if (!Op0 || !Op1)
|
||||
return Optional<int64_t>{};
|
||||
return std::optional<int64_t>{};
|
||||
if (I->getOpcode() == Instruction::Add)
|
||||
return Optional<int64_t>{Op0.value() + Op1.value()};
|
||||
return std::optional<int64_t>{Op0.value() + Op1.value()};
|
||||
if (I->getOpcode() == Instruction::Mul)
|
||||
return Optional<int64_t>{Op0.value() * Op1.value()};
|
||||
return std::optional<int64_t>{Op0.value() * Op1.value()};
|
||||
if (I->getOpcode() == Instruction::Shl)
|
||||
return Optional<int64_t>{Op0.value() << Op1.value()};
|
||||
return std::optional<int64_t>{Op0.value() << Op1.value()};
|
||||
if (I->getOpcode() == Instruction::Or)
|
||||
return Optional<int64_t>{Op0.value() | Op1.value()};
|
||||
return std::optional<int64_t>{Op0.value() | Op1.value()};
|
||||
}
|
||||
return Optional<int64_t>{};
|
||||
return std::optional<int64_t>{};
|
||||
}
|
||||
|
||||
// Return true if I is an Or instruction that is equivalent to an add, due to
|
||||
|
@ -381,7 +381,7 @@ MVEGatherScatterLowering::getVarAndConst(Value *Inst, int TypeScale) {
|
|||
return ReturnFalse;
|
||||
|
||||
Value *Summand;
|
||||
Optional<int64_t> Const;
|
||||
std::optional<int64_t> Const;
|
||||
// Find out which operand the value that is increased is
|
||||
if ((Const = getIfConst(Add->getOperand(0))))
|
||||
Summand = Add->getOperand(1);
|
||||
|
|
|
@ -77,7 +77,7 @@ protected:
|
|||
|
||||
public:
|
||||
struct ExtendedProperties {
|
||||
llvm::Optional<ComponentType> ElementType;
|
||||
std::optional<ComponentType> ElementType;
|
||||
|
||||
// The value ordering of this enumeration is part of the DXIL ABI. Elements
|
||||
// can only be added to the end, and not removed.
|
||||
|
|
|
@ -751,9 +751,9 @@ public:
|
|||
return MI == EndLoop;
|
||||
}
|
||||
|
||||
Optional<bool>
|
||||
createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond) override {
|
||||
std::optional<bool> createTripCountGreaterCondition(
|
||||
int TC, MachineBasicBlock &MBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond) override {
|
||||
if (TripCount == -1) {
|
||||
// Check if we're done with the loop.
|
||||
Register Done = TII->createVR(MF, MVT::i1);
|
||||
|
|
|
@ -673,7 +673,7 @@ void M68kFrameLowering::emitEpilogue(MachineFunction &MF,
|
|||
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
|
||||
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
|
||||
Optional<unsigned> RetOpcode;
|
||||
std::optional<unsigned> RetOpcode;
|
||||
if (MBBI != MBB.end())
|
||||
RetOpcode = MBBI->getOpcode();
|
||||
DebugLoc DL;
|
||||
|
|
|
@ -110,7 +110,7 @@ MCOperand M68kMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
|
|||
return MCOperand::createExpr(Expr);
|
||||
}
|
||||
|
||||
Optional<MCOperand>
|
||||
std::optional<MCOperand>
|
||||
M68kMCInstLower::LowerOperand(const MachineInstr *MI,
|
||||
const MachineOperand &MO) const {
|
||||
switch (MO.getType()) {
|
||||
|
@ -148,7 +148,7 @@ void M68kMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
|||
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = MI->getOperand(i);
|
||||
Optional<MCOperand> MCOp = LowerOperand(MI, MO);
|
||||
std::optional<MCOperand> MCOp = LowerOperand(MI, MO);
|
||||
|
||||
if (MCOp.has_value() && MCOp.value().isValid())
|
||||
OutMI.addOperand(MCOp.value());
|
||||
|
|
|
@ -44,8 +44,8 @@ public:
|
|||
|
||||
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
|
||||
|
||||
Optional<MCOperand> LowerOperand(const MachineInstr *MI,
|
||||
const MachineOperand &MO) const;
|
||||
std::optional<MCOperand> LowerOperand(const MachineInstr *MI,
|
||||
const MachineOperand &MO) const;
|
||||
|
||||
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
|
||||
};
|
||||
|
|
|
@ -96,7 +96,7 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
MIB.addReg(SrcReg, getKillRegState(KillSrc));
|
||||
}
|
||||
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
Mips16InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
||||
if (MI.isMoveReg())
|
||||
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
||||
|
|
|
@ -106,7 +106,8 @@ protected:
|
|||
/// If the specific machine instruction is a instruction that moves/copies
|
||||
/// value from one register to another register return destination and source
|
||||
/// registers as machine operands.
|
||||
Optional<DestSourcePair> isCopyInstrImpl(const MachineInstr &MI) const override;
|
||||
std::optional<DestSourcePair>
|
||||
isCopyInstrImpl(const MachineInstr &MI) const override;
|
||||
|
||||
private:
|
||||
unsigned getAnalyzableBrOpc(unsigned Opc) const override;
|
||||
|
|
|
@ -933,7 +933,7 @@ MipsInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
|
|||
return makeArrayRef(Flags);
|
||||
}
|
||||
|
||||
Optional<ParamLoadedValue>
|
||||
std::optional<ParamLoadedValue>
|
||||
MipsInstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
|
||||
DIExpression *Expr =
|
||||
DIExpression::get(MI.getMF()->getFunction().getContext(), {});
|
||||
|
@ -962,8 +962,8 @@ MipsInstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
|
|||
return TargetInstrInfo::describeLoadedValue(MI, Reg);
|
||||
}
|
||||
|
||||
Optional<RegImmPair> MipsInstrInfo::isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
std::optional<RegImmPair> MipsInstrInfo::isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const {
|
||||
// TODO: Handle cases where Reg is a super- or sub-register of the
|
||||
// destination register.
|
||||
const MachineOperand &Op0 = MI.getOperand(0);
|
||||
|
|
|
@ -185,11 +185,11 @@ public:
|
|||
ArrayRef<std::pair<unsigned, const char *>>
|
||||
getSerializableDirectMachineOperandTargetFlags() const override;
|
||||
|
||||
Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
|
||||
Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
std::optional<ParamLoadedValue>
|
||||
describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
|
||||
|
||||
protected:
|
||||
bool isZeroImm(const MachineOperand &op) const;
|
||||
|
|
|
@ -221,7 +221,7 @@ static bool isReadOrWriteToDSPReg(const MachineInstr &MI, bool &isWrite) {
|
|||
/// We check for the common case of 'or', as it's MIPS' preferred instruction
|
||||
/// for GPRs but we have to check the operands to ensure that is the case.
|
||||
/// Other move instructions for MIPS are directly identifiable.
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
MipsSEInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
||||
bool isDSPControlWrite = false;
|
||||
// Condition is made to match the creation of WRDSP/RDDSP copy instruction
|
||||
|
|
|
@ -81,7 +81,7 @@ protected:
|
|||
/// If the specific machine instruction is a instruction that moves/copies
|
||||
/// value from one register to another register return destination and source
|
||||
/// registers as machine operands.
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
isCopyInstrImpl(const MachineInstr &MI) const override;
|
||||
|
||||
private:
|
||||
|
|
|
@ -183,7 +183,7 @@ public:
|
|||
}
|
||||
|
||||
protected:
|
||||
llvm::Optional<MipsABIInfo> ABI;
|
||||
std::optional<MipsABIInfo> ABI;
|
||||
MipsABIFlagsSection ABIFlagsSection;
|
||||
|
||||
bool GPRInfoSet;
|
||||
|
|
|
@ -808,10 +808,12 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
|
|||
|
||||
// Helper function template to reduce amount of boilerplate code for
|
||||
// opcode selection.
|
||||
static Optional<unsigned> pickOpcodeForVT(
|
||||
MVT::SimpleValueType VT, unsigned Opcode_i8, unsigned Opcode_i16,
|
||||
unsigned Opcode_i32, Optional<unsigned> Opcode_i64, unsigned Opcode_f16,
|
||||
unsigned Opcode_f16x2, unsigned Opcode_f32, Optional<unsigned> Opcode_f64) {
|
||||
static std::optional<unsigned>
|
||||
pickOpcodeForVT(MVT::SimpleValueType VT, unsigned Opcode_i8,
|
||||
unsigned Opcode_i16, unsigned Opcode_i32,
|
||||
std::optional<unsigned> Opcode_i64, unsigned Opcode_f16,
|
||||
unsigned Opcode_f16x2, unsigned Opcode_f32,
|
||||
std::optional<unsigned> Opcode_f64) {
|
||||
switch (VT) {
|
||||
case MVT::i1:
|
||||
case MVT::i8:
|
||||
|
@ -924,7 +926,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
|
|||
SDValue N1 = N->getOperand(1);
|
||||
SDValue Addr;
|
||||
SDValue Offset, Base;
|
||||
Optional<unsigned> Opcode;
|
||||
std::optional<unsigned> Opcode;
|
||||
MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
|
||||
|
||||
if (SelectDirectAddr(N1, Addr)) {
|
||||
|
@ -1003,7 +1005,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
|
|||
SDValue Chain = N->getOperand(0);
|
||||
SDValue Op1 = N->getOperand(1);
|
||||
SDValue Addr, Offset, Base;
|
||||
Optional<unsigned> Opcode;
|
||||
std::optional<unsigned> Opcode;
|
||||
SDLoc DL(N);
|
||||
SDNode *LD;
|
||||
MemSDNode *MemSD = cast<MemSDNode>(N);
|
||||
|
@ -1266,7 +1268,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
|
|||
Mem = cast<MemSDNode>(N);
|
||||
}
|
||||
|
||||
Optional<unsigned> Opcode;
|
||||
std::optional<unsigned> Opcode;
|
||||
SDLoc DL(N);
|
||||
SDNode *LD;
|
||||
SDValue Base, Offset, Addr;
|
||||
|
@ -1770,7 +1772,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
|
|||
SDValue BasePtr = ST->getBasePtr();
|
||||
SDValue Addr;
|
||||
SDValue Offset, Base;
|
||||
Optional<unsigned> Opcode;
|
||||
std::optional<unsigned> Opcode;
|
||||
MVT::SimpleValueType SourceVT =
|
||||
Value.getNode()->getSimpleValueType(0).SimpleTy;
|
||||
|
||||
|
@ -1873,7 +1875,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
|
|||
SDValue Chain = N->getOperand(0);
|
||||
SDValue Op1 = N->getOperand(1);
|
||||
SDValue Addr, Offset, Base;
|
||||
Optional<unsigned> Opcode;
|
||||
std::optional<unsigned> Opcode;
|
||||
SDLoc DL(N);
|
||||
SDNode *ST;
|
||||
EVT EltVT = Op1.getValueType();
|
||||
|
@ -2113,7 +2115,7 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
|
|||
EVT EltVT = Node->getValueType(0);
|
||||
EVT MemVT = Mem->getMemoryVT();
|
||||
|
||||
Optional<unsigned> Opcode;
|
||||
std::optional<unsigned> Opcode;
|
||||
|
||||
switch (VecSize) {
|
||||
default:
|
||||
|
@ -2198,7 +2200,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
|
|||
// Determine target opcode
|
||||
// If we have an i1, use an 8-bit store. The lowering code in
|
||||
// NVPTXISelLowering will have already emitted an upcast.
|
||||
Optional<unsigned> Opcode = 0;
|
||||
std::optional<unsigned> Opcode = 0;
|
||||
switch (NumElts) {
|
||||
default:
|
||||
return false;
|
||||
|
@ -2275,7 +2277,7 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
|
|||
// Determine target opcode
|
||||
// If we have an i1, use an 8-bit store. The lowering code in
|
||||
// NVPTXISelLowering will have already emitted an upcast.
|
||||
Optional<unsigned> Opcode = 0;
|
||||
std::optional<unsigned> Opcode = 0;
|
||||
switch (N->getOpcode()) {
|
||||
default:
|
||||
switch (NumElts) {
|
||||
|
|
|
@ -1790,7 +1790,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
}
|
||||
|
||||
SmallVector<SDValue, 16> ProxyRegOps;
|
||||
SmallVector<Optional<MVT>, 16> ProxyRegTruncates;
|
||||
SmallVector<std::optional<MVT>, 16> ProxyRegTruncates;
|
||||
|
||||
// Generate loads from param memory/moves from registers for result
|
||||
if (Ins.size() > 0) {
|
||||
|
@ -1873,9 +1873,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
ProxyRegOps.push_back(RetVal.getValue(j));
|
||||
|
||||
if (needTruncate)
|
||||
ProxyRegTruncates.push_back(Optional<MVT>(Ins[VecIdx + j].VT));
|
||||
ProxyRegTruncates.push_back(std::optional<MVT>(Ins[VecIdx + j].VT));
|
||||
else
|
||||
ProxyRegTruncates.push_back(Optional<MVT>());
|
||||
ProxyRegTruncates.push_back(std::optional<MVT>());
|
||||
}
|
||||
|
||||
Chain = RetVal.getValue(NumElts);
|
||||
|
|
|
@ -92,7 +92,8 @@ void PPCELFStreamer::emitInstruction(const MCInst &Inst,
|
|||
// instruction pair, return a value, otherwise return None. A true returned
|
||||
// value means the instruction is the PLDpc and a false value means it is
|
||||
// the user instruction.
|
||||
Optional<bool> IsPartOfGOTToPCRelPair = isPartOfGOTToPCRelPair(Inst, STI);
|
||||
std::optional<bool> IsPartOfGOTToPCRelPair =
|
||||
isPartOfGOTToPCRelPair(Inst, STI);
|
||||
|
||||
// User of the GOT-indirect address.
|
||||
// For example, the load that will get the relocation as follows:
|
||||
|
@ -196,8 +197,8 @@ void PPCELFStreamer::emitGOTToPCRelLabel(const MCInst &Inst) {
|
|||
// at the opcode and in the case of PLDpc we will return true. For the load
|
||||
// (or store) this function will return false indicating it has found the second
|
||||
// instruciton in the pair.
|
||||
Optional<bool> llvm::isPartOfGOTToPCRelPair(const MCInst &Inst,
|
||||
const MCSubtargetInfo &STI) {
|
||||
std::optional<bool> llvm::isPartOfGOTToPCRelPair(const MCInst &Inst,
|
||||
const MCSubtargetInfo &STI) {
|
||||
// Need at least two operands.
|
||||
if (Inst.getNumOperands() < 2)
|
||||
return std::nullopt;
|
||||
|
|
|
@ -48,8 +48,8 @@ private:
|
|||
|
||||
// Check if the instruction Inst is part of a pair of instructions that make up
|
||||
// a link time GOT PC Rel optimization.
|
||||
Optional<bool> isPartOfGOTToPCRelPair(const MCInst &Inst,
|
||||
const MCSubtargetInfo &STI);
|
||||
std::optional<bool> isPartOfGOTToPCRelPair(const MCInst &Inst,
|
||||
const MCSubtargetInfo &STI);
|
||||
|
||||
MCELFStreamer *createPPCELFStreamer(MCContext &Context,
|
||||
std::unique_ptr<MCAsmBackend> MAB,
|
||||
|
|
|
@ -198,8 +198,8 @@ class PPCFastISel final : public FastISel {
|
|||
|
||||
} // end anonymous namespace
|
||||
|
||||
static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
|
||||
switch (Pred) {
|
||||
static std::optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
|
||||
switch (Pred) {
|
||||
// These are not representable with any single compare.
|
||||
case CmpInst::FCMP_FALSE:
|
||||
case CmpInst::FCMP_TRUE:
|
||||
|
@ -770,7 +770,8 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
|
|||
// For now, just try the simplest case where it's fed by a compare.
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
|
||||
if (isValueAvailable(CI)) {
|
||||
Optional<PPC::Predicate> OptPPCPred = getComparePred(CI->getPredicate());
|
||||
std::optional<PPC::Predicate> OptPPCPred =
|
||||
getComparePred(CI->getPredicate());
|
||||
if (!OptPPCPred)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -5555,9 +5555,9 @@ public:
|
|||
return MI == EndLoop;
|
||||
}
|
||||
|
||||
Optional<bool>
|
||||
createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond) override {
|
||||
std::optional<bool> createTripCountGreaterCondition(
|
||||
int TC, MachineBasicBlock &MBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond) override {
|
||||
if (TripCount == -1) {
|
||||
// Since BDZ/BDZ8 that we will insert will also decrease the ctr by 1,
|
||||
// so we don't need to generate any thing here.
|
||||
|
|
|
@ -8533,7 +8533,7 @@ struct NodeExtensionHelper {
|
|||
/// SExt. If \p SExt is None, this returns the source of this operand.
|
||||
/// \see ::getSource().
|
||||
SDValue getOrCreateExtendedOp(const SDNode *Root, SelectionDAG &DAG,
|
||||
Optional<bool> SExt) const {
|
||||
std::optional<bool> SExt) const {
|
||||
if (!SExt.has_value())
|
||||
return OrigOperand;
|
||||
|
||||
|
@ -8623,7 +8623,7 @@ struct NodeExtensionHelper {
|
|||
}
|
||||
}
|
||||
|
||||
using CombineToTry = std::function<Optional<CombineResult>(
|
||||
using CombineToTry = std::function<std::optional<CombineResult>(
|
||||
SDNode * /*Root*/, const NodeExtensionHelper & /*LHS*/,
|
||||
const NodeExtensionHelper & /*RHS*/)>;
|
||||
|
||||
|
@ -8798,8 +8798,8 @@ struct CombineResult {
|
|||
unsigned TargetOpcode;
|
||||
// No value means no extension is needed. If extension is needed, the value
|
||||
// indicates if it needs to be sign extended.
|
||||
Optional<bool> SExtLHS;
|
||||
Optional<bool> SExtRHS;
|
||||
std::optional<bool> SExtLHS;
|
||||
std::optional<bool> SExtRHS;
|
||||
/// Root of the combine.
|
||||
SDNode *Root;
|
||||
/// LHS of the TargetOpcode.
|
||||
|
@ -8808,8 +8808,8 @@ struct CombineResult {
|
|||
NodeExtensionHelper RHS;
|
||||
|
||||
CombineResult(unsigned TargetOpcode, SDNode *Root,
|
||||
const NodeExtensionHelper &LHS, Optional<bool> SExtLHS,
|
||||
const NodeExtensionHelper &RHS, Optional<bool> SExtRHS)
|
||||
const NodeExtensionHelper &LHS, std::optional<bool> SExtLHS,
|
||||
const NodeExtensionHelper &RHS, std::optional<bool> SExtRHS)
|
||||
: TargetOpcode(TargetOpcode), SExtLHS(SExtLHS), SExtRHS(SExtRHS),
|
||||
Root(Root), LHS(LHS), RHS(RHS) {}
|
||||
|
||||
|
@ -8837,7 +8837,7 @@ struct CombineResult {
|
|||
///
|
||||
/// \returns None if the pattern doesn't match or a CombineResult that can be
|
||||
/// used to apply the pattern.
|
||||
static Optional<CombineResult>
|
||||
static std::optional<CombineResult>
|
||||
canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS, bool AllowSExt,
|
||||
bool AllowZExt) {
|
||||
|
@ -8863,7 +8863,7 @@ canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS,
|
|||
///
|
||||
/// \returns None if the pattern doesn't match or a CombineResult that can be
|
||||
/// used to apply the pattern.
|
||||
static Optional<CombineResult>
|
||||
static std::optional<CombineResult>
|
||||
canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS) {
|
||||
return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
|
||||
|
@ -8874,9 +8874,9 @@ canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS,
|
|||
///
|
||||
/// \returns None if the pattern doesn't match or a CombineResult that can be
|
||||
/// used to apply the pattern.
|
||||
static Optional<CombineResult> canFoldToVW_W(SDNode *Root,
|
||||
const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS) {
|
||||
static std::optional<CombineResult>
|
||||
canFoldToVW_W(SDNode *Root, const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS) {
|
||||
if (!RHS.areVLAndMaskCompatible(Root))
|
||||
return std::nullopt;
|
||||
|
||||
|
@ -8899,7 +8899,7 @@ static Optional<CombineResult> canFoldToVW_W(SDNode *Root,
|
|||
///
|
||||
/// \returns None if the pattern doesn't match or a CombineResult that can be
|
||||
/// used to apply the pattern.
|
||||
static Optional<CombineResult>
|
||||
static std::optional<CombineResult>
|
||||
canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS) {
|
||||
return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
|
||||
|
@ -8910,7 +8910,7 @@ canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS,
|
|||
///
|
||||
/// \returns None if the pattern doesn't match or a CombineResult that can be
|
||||
/// used to apply the pattern.
|
||||
static Optional<CombineResult>
|
||||
static std::optional<CombineResult>
|
||||
canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS) {
|
||||
return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/false,
|
||||
|
@ -8921,9 +8921,9 @@ canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
|
|||
///
|
||||
/// \returns None if the pattern doesn't match or a CombineResult that can be
|
||||
/// used to apply the pattern.
|
||||
static Optional<CombineResult> canFoldToVW_SU(SDNode *Root,
|
||||
const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS) {
|
||||
static std::optional<CombineResult>
|
||||
canFoldToVW_SU(SDNode *Root, const NodeExtensionHelper &LHS,
|
||||
const NodeExtensionHelper &RHS) {
|
||||
if (!LHS.SupportsSExt || !RHS.SupportsZExt)
|
||||
return std::nullopt;
|
||||
if (!LHS.areVLAndMaskCompatible(Root) || !RHS.areVLAndMaskCompatible(Root))
|
||||
|
@ -9020,7 +9020,7 @@ combineBinOp_VLToVWBinOp_VL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
|
|||
|
||||
for (NodeExtensionHelper::CombineToTry FoldingStrategy :
|
||||
FoldingStrategies) {
|
||||
Optional<CombineResult> Res = FoldingStrategy(N, LHS, RHS);
|
||||
std::optional<CombineResult> Res = FoldingStrategy(N, LHS, RHS);
|
||||
if (Res) {
|
||||
Matched = true;
|
||||
CombinesToApply.push_back(*Res);
|
||||
|
@ -11224,7 +11224,7 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
|
|||
}
|
||||
|
||||
static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
|
||||
Optional<unsigned> FirstMaskArgument,
|
||||
std::optional<unsigned> FirstMaskArgument,
|
||||
CCState &State, const RISCVTargetLowering &TLI) {
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
|
||||
if (RC == &RISCV::VRRegClass) {
|
||||
|
@ -11249,7 +11249,7 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
|
|||
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
|
||||
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
|
||||
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
|
||||
Optional<unsigned> FirstMaskArgument) {
|
||||
std::optional<unsigned> FirstMaskArgument) {
|
||||
unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
|
||||
assert(XLen == 32 || XLen == 64);
|
||||
MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
|
||||
|
@ -11471,7 +11471,7 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
|
|||
}
|
||||
|
||||
template <typename ArgTy>
|
||||
static Optional<unsigned> preAssignMask(const ArgTy &Args) {
|
||||
static std::optional<unsigned> preAssignMask(const ArgTy &Args) {
|
||||
for (const auto &ArgIdx : enumerate(Args)) {
|
||||
MVT ArgVT = ArgIdx.value().VT;
|
||||
if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
|
||||
|
@ -11487,7 +11487,7 @@ void RISCVTargetLowering::analyzeInputArgs(
|
|||
unsigned NumArgs = Ins.size();
|
||||
FunctionType *FType = MF.getFunction().getFunctionType();
|
||||
|
||||
Optional<unsigned> FirstMaskArgument;
|
||||
std::optional<unsigned> FirstMaskArgument;
|
||||
if (Subtarget.hasVInstructions())
|
||||
FirstMaskArgument = preAssignMask(Ins);
|
||||
|
||||
|
@ -11518,7 +11518,7 @@ void RISCVTargetLowering::analyzeOutputArgs(
|
|||
CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
|
||||
unsigned NumArgs = Outs.size();
|
||||
|
||||
Optional<unsigned> FirstMaskArgument;
|
||||
std::optional<unsigned> FirstMaskArgument;
|
||||
if (Subtarget.hasVInstructions())
|
||||
FirstMaskArgument = preAssignMask(Outs);
|
||||
|
||||
|
@ -11703,7 +11703,7 @@ static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
|
|||
ISD::ArgFlagsTy ArgFlags, CCState &State,
|
||||
bool IsFixed, bool IsRet, Type *OrigTy,
|
||||
const RISCVTargetLowering &TLI,
|
||||
Optional<unsigned> FirstMaskArgument) {
|
||||
std::optional<unsigned> FirstMaskArgument) {
|
||||
|
||||
// X5 and X6 might be used for save-restore libcall.
|
||||
static const MCPhysReg GPRList[] = {
|
||||
|
@ -12387,7 +12387,7 @@ bool RISCVTargetLowering::CanLowerReturn(
|
|||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
|
||||
|
||||
Optional<unsigned> FirstMaskArgument;
|
||||
std::optional<unsigned> FirstMaskArgument;
|
||||
if (Subtarget.hasVInstructions())
|
||||
FirstMaskArgument = preAssignMask(Outs);
|
||||
|
||||
|
|
|
@ -612,7 +612,7 @@ private:
|
|||
ISD::ArgFlagsTy ArgFlags, CCState &State,
|
||||
bool IsFixed, bool IsRet, Type *OrigTy,
|
||||
const RISCVTargetLowering &TLI,
|
||||
Optional<unsigned> FirstMaskArgument);
|
||||
std::optional<unsigned> FirstMaskArgument);
|
||||
|
||||
void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
|
||||
|
|
|
@ -87,7 +87,7 @@ static bool isScalarMoveInstr(const MachineInstr &MI) {
|
|||
|
||||
/// Get the EEW for a load or store instruction. Return None if MI is not
|
||||
/// a load or store which ignores SEW.
|
||||
static Optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
|
||||
static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
|
||||
switch (getRVVMCOpcode(MI.getOpcode())) {
|
||||
default:
|
||||
return std::nullopt;
|
||||
|
@ -659,7 +659,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
|
|||
InstrInfo.setAVLReg(RISCV::NoRegister);
|
||||
}
|
||||
#ifndef NDEBUG
|
||||
if (Optional<unsigned> EEW = getEEWForLoadStore(MI)) {
|
||||
if (std::optional<unsigned> EEW = getEEWForLoadStore(MI)) {
|
||||
assert(SEW == EEW && "Initial SEW doesn't match expected EEW");
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1102,7 +1102,7 @@ bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
|
|||
return MI.isAsCheapAsAMove();
|
||||
}
|
||||
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
||||
if (MI.isMoveReg())
|
||||
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
||||
|
@ -2383,7 +2383,7 @@ bool RISCV::isRVVSpill(const MachineInstr &MI) {
|
|||
return true;
|
||||
}
|
||||
|
||||
Optional<std::pair<unsigned, unsigned>>
|
||||
std::optional<std::pair<unsigned, unsigned>>
|
||||
RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
|
||||
switch (Opcode) {
|
||||
default:
|
||||
|
|
|
@ -112,7 +112,7 @@ public:
|
|||
|
||||
bool isAsCheapAsAMove(const MachineInstr &MI) const override;
|
||||
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
isCopyInstrImpl(const MachineInstr &MI) const override;
|
||||
|
||||
bool verifyInstruction(const MachineInstr &MI,
|
||||
|
@ -222,7 +222,8 @@ bool isZEXT_B(const MachineInstr &MI);
|
|||
// expect to see a FrameIndex operand.
|
||||
bool isRVVSpill(const MachineInstr &MI);
|
||||
|
||||
Optional<std::pair<unsigned, unsigned>> isRVVSpillForZvlsseg(unsigned Opcode);
|
||||
std::optional<std::pair<unsigned, unsigned>>
|
||||
isRVVSpillForZvlsseg(unsigned Opcode);
|
||||
|
||||
bool isFaultFirstLoad(const MachineInstr &MI);
|
||||
|
||||
|
|
|
@ -1721,12 +1721,12 @@ static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
|
|||
/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
|
||||
/// and external instruction \p Set.
|
||||
namespace SPIRV {
|
||||
Optional<bool> lowerBuiltin(const StringRef DemangledCall,
|
||||
SPIRV::InstructionSet::InstructionSet Set,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
const Register OrigRet, const Type *OrigRetTy,
|
||||
const SmallVectorImpl<Register> &Args,
|
||||
SPIRVGlobalRegistry *GR) {
|
||||
std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
|
||||
SPIRV::InstructionSet::InstructionSet Set,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
const Register OrigRet, const Type *OrigRetTy,
|
||||
const SmallVectorImpl<Register> &Args,
|
||||
SPIRVGlobalRegistry *GR) {
|
||||
LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
|
||||
|
||||
// SPIR-V type and return register.
|
||||
|
|
|
@ -31,12 +31,12 @@ namespace SPIRV {
|
|||
/// Register(0) otherwise.
|
||||
/// \p OrigRetTy is the type of the \p OrigRet.
|
||||
/// \p Args are the arguments of the lowered builtin call.
|
||||
Optional<bool> lowerBuiltin(const StringRef DemangledCall,
|
||||
InstructionSet::InstructionSet Set,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
const Register OrigRet, const Type *OrigRetTy,
|
||||
const SmallVectorImpl<Register> &Args,
|
||||
SPIRVGlobalRegistry *GR);
|
||||
std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
|
||||
InstructionSet::InstructionSet Set,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
const Register OrigRet, const Type *OrigRetTy,
|
||||
const SmallVectorImpl<Register> &Args,
|
||||
SPIRVGlobalRegistry *GR);
|
||||
/// Handles the translation of the provided special opaque/builtin type \p Type
|
||||
/// to SPIR-V type. Generates the corresponding machine instructions for the
|
||||
/// target type or gets the already existing OpType<...> register from the
|
||||
|
|
|
@ -44,13 +44,13 @@ enum ModuleSectionType {
|
|||
|
||||
struct Requirements {
|
||||
const bool IsSatisfiable;
|
||||
const Optional<Capability::Capability> Cap;
|
||||
const std::optional<Capability::Capability> Cap;
|
||||
const ExtensionList Exts;
|
||||
const unsigned MinVer; // 0 if no min version is required.
|
||||
const unsigned MaxVer; // 0 if no max version is required.
|
||||
|
||||
Requirements(bool IsSatisfiable = false,
|
||||
Optional<Capability::Capability> Cap = {},
|
||||
std::optional<Capability::Capability> Cap = {},
|
||||
ExtensionList Exts = {}, unsigned MinVer = 0,
|
||||
unsigned MaxVer = 0)
|
||||
: IsSatisfiable(IsSatisfiable), Cap(Cap), Exts(Exts), MinVer(MinVer),
|
||||
|
|
|
@ -59,7 +59,7 @@ bool isMaskArithmetic(SDValue Op) {
|
|||
}
|
||||
|
||||
/// \returns the VVP_* SDNode opcode corresponsing to \p OC.
|
||||
Optional<unsigned> getVVPOpcode(unsigned Opcode) {
|
||||
std::optional<unsigned> getVVPOpcode(unsigned Opcode) {
|
||||
switch (Opcode) {
|
||||
case ISD::MLOAD:
|
||||
return VEISD::VVP_LOAD;
|
||||
|
@ -163,7 +163,7 @@ bool isVVPReductionOp(unsigned Opcode) {
|
|||
}
|
||||
|
||||
// Return the AVL operand position for this VVP or VEC Op.
|
||||
Optional<int> getAVLPos(unsigned Opc) {
|
||||
std::optional<int> getAVLPos(unsigned Opc) {
|
||||
// This is only available for VP SDNodes
|
||||
auto PosOpt = ISD::getVPExplicitVectorLengthIdx(Opc);
|
||||
if (PosOpt)
|
||||
|
@ -188,7 +188,7 @@ Optional<int> getAVLPos(unsigned Opc) {
|
|||
return std::nullopt;
|
||||
}
|
||||
|
||||
Optional<int> getMaskPos(unsigned Opc) {
|
||||
std::optional<int> getMaskPos(unsigned Opc) {
|
||||
// This is only available for VP SDNodes
|
||||
auto PosOpt = ISD::getVPMaskIdx(Opc);
|
||||
if (PosOpt)
|
||||
|
@ -240,7 +240,7 @@ SDValue getMemoryPtr(SDValue Op) {
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
Optional<EVT> getIdiomaticVectorType(SDNode *Op) {
|
||||
std::optional<EVT> getIdiomaticVectorType(SDNode *Op) {
|
||||
unsigned OC = Op->getOpcode();
|
||||
|
||||
// For memory ops -> the transfered data type
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
namespace llvm {
|
||||
|
||||
Optional<unsigned> getVVPOpcode(unsigned Opcode);
|
||||
std::optional<unsigned> getVVPOpcode(unsigned Opcode);
|
||||
|
||||
bool isVVPUnaryOp(unsigned Opcode);
|
||||
bool isVVPBinaryOp(unsigned Opcode);
|
||||
|
@ -71,7 +71,7 @@ bool maySafelyIgnoreMask(SDValue Op);
|
|||
//
|
||||
/// AVL Functions {
|
||||
// The AVL operand position of this node.
|
||||
Optional<int> getAVLPos(unsigned);
|
||||
std::optional<int> getAVLPos(unsigned);
|
||||
|
||||
// Whether this is a LEGALAVL node.
|
||||
bool isLegalAVL(SDValue AVL);
|
||||
|
@ -80,7 +80,7 @@ bool isLegalAVL(SDValue AVL);
|
|||
SDValue getNodeAVL(SDValue);
|
||||
|
||||
// Mask position of this node.
|
||||
Optional<int> getMaskPos(unsigned);
|
||||
std::optional<int> getMaskPos(unsigned);
|
||||
|
||||
SDValue getNodeMask(SDValue);
|
||||
|
||||
|
@ -92,7 +92,7 @@ std::pair<SDValue, bool> getAnnotatedNodeAVL(SDValue);
|
|||
|
||||
/// Node Properties {
|
||||
|
||||
Optional<EVT> getIdiomaticVectorType(SDNode *Op);
|
||||
std::optional<EVT> getIdiomaticVectorType(SDNode *Op);
|
||||
|
||||
SDValue getLoadStoreStride(SDValue Op, VECustomDAG &CDAG);
|
||||
|
||||
|
@ -154,7 +154,7 @@ public:
|
|||
|
||||
/// getNode {
|
||||
SDValue getNode(unsigned OC, SDVTList VTL, ArrayRef<SDValue> OpV,
|
||||
Optional<SDNodeFlags> Flags = std::nullopt) const {
|
||||
std::optional<SDNodeFlags> Flags = std::nullopt) const {
|
||||
auto N = DAG.getNode(OC, DL, VTL, OpV);
|
||||
if (Flags)
|
||||
N->setFlags(*Flags);
|
||||
|
@ -162,7 +162,7 @@ public:
|
|||
}
|
||||
|
||||
SDValue getNode(unsigned OC, ArrayRef<EVT> ResVT, ArrayRef<SDValue> OpV,
|
||||
Optional<SDNodeFlags> Flags = std::nullopt) const {
|
||||
std::optional<SDNodeFlags> Flags = std::nullopt) const {
|
||||
auto N = DAG.getNode(OC, DL, ResVT, OpV);
|
||||
if (Flags)
|
||||
N->setFlags(*Flags);
|
||||
|
@ -170,7 +170,7 @@ public:
|
|||
}
|
||||
|
||||
SDValue getNode(unsigned OC, EVT ResVT, ArrayRef<SDValue> OpV,
|
||||
Optional<SDNodeFlags> Flags = std::nullopt) const {
|
||||
std::optional<SDNodeFlags> Flags = std::nullopt) const {
|
||||
auto N = DAG.getNode(OC, DL, ResVT, OpV);
|
||||
if (Flags)
|
||||
N->setFlags(*Flags);
|
||||
|
|
|
@ -835,7 +835,8 @@ public:
|
|||
auto ElemTypeName = expectIdent();
|
||||
if (ElemTypeName.empty())
|
||||
return true;
|
||||
Optional<wasm::ValType> ElemType = WebAssembly::parseType(ElemTypeName);
|
||||
std::optional<wasm::ValType> ElemType =
|
||||
WebAssembly::parseType(ElemTypeName);
|
||||
if (!ElemType)
|
||||
return error("Unknown type in .tabletype directive: ", ElemTypeTok);
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ bool WebAssemblyAsmTypeCheck::typeError(SMLoc ErrorLoc, const Twine &Msg) {
|
|||
}
|
||||
|
||||
bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc,
|
||||
Optional<wasm::ValType> EVT) {
|
||||
std::optional<wasm::ValType> EVT) {
|
||||
if (Stack.empty()) {
|
||||
return typeError(ErrorLoc,
|
||||
EVT ? StringRef("empty stack while popping ") +
|
||||
|
|
|
@ -38,7 +38,7 @@ class WebAssemblyAsmTypeCheck final {
|
|||
|
||||
void dumpTypeStack(Twine Msg);
|
||||
bool typeError(SMLoc ErrorLoc, const Twine &Msg);
|
||||
bool popType(SMLoc ErrorLoc, Optional<wasm::ValType> EVT);
|
||||
bool popType(SMLoc ErrorLoc, std::optional<wasm::ValType> EVT);
|
||||
bool popRefType(SMLoc ErrorLoc);
|
||||
bool getLocal(SMLoc ErrorLoc, const MCInst &Inst, wasm::ValType &Type);
|
||||
bool checkEnd(SMLoc ErrorLoc, bool PopVals = false);
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
using namespace llvm;
|
||||
|
||||
Optional<wasm::ValType> WebAssembly::parseType(StringRef Type) {
|
||||
std::optional<wasm::ValType> WebAssembly::parseType(StringRef Type) {
|
||||
// FIXME: can't use StringSwitch because wasm::ValType doesn't have a
|
||||
// "invalid" value.
|
||||
if (Type == "i32")
|
||||
|
|
|
@ -89,7 +89,7 @@ inline bool isRefType(wasm::ValType Type) {
|
|||
|
||||
// Convert StringRef to ValType / HealType / BlockType
|
||||
|
||||
Optional<wasm::ValType> parseType(StringRef Type);
|
||||
std::optional<wasm::ValType> parseType(StringRef Type);
|
||||
BlockType parseBlockType(StringRef Type);
|
||||
MVT parseMVT(StringRef Type);
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ using namespace llvm;
|
|||
// SelectionDAGISel::runOnMachineFunction. We have to do it in two places
|
||||
// because we want to do it while building the selection DAG for uses of alloca,
|
||||
// but not all alloca instructions are used so we have to follow up afterwards.
|
||||
Optional<unsigned>
|
||||
std::optional<unsigned>
|
||||
WebAssemblyFrameLowering::getLocalForStackObject(MachineFunction &MF,
|
||||
int FrameIndex) {
|
||||
MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
|
|
|
@ -56,8 +56,8 @@ public:
|
|||
|
||||
// Returns the index of the WebAssembly local to which the stack object
|
||||
// FrameIndex in MF should be allocated, or None.
|
||||
static Optional<unsigned> getLocalForStackObject(MachineFunction &MF,
|
||||
int FrameIndex);
|
||||
static std::optional<unsigned> getLocalForStackObject(MachineFunction &MF,
|
||||
int FrameIndex);
|
||||
|
||||
static unsigned getSPReg(const MachineFunction &MF);
|
||||
static unsigned getFPReg(const MachineFunction &MF);
|
||||
|
|
|
@ -1449,7 +1449,8 @@ static bool IsWebAssemblyGlobal(SDValue Op) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
|
||||
static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
|
||||
SelectionDAG &DAG) {
|
||||
const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
|
||||
if (!FI)
|
||||
return std::nullopt;
|
||||
|
@ -1477,7 +1478,7 @@ SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
|
|||
SN->getMemoryVT(), SN->getMemOperand());
|
||||
}
|
||||
|
||||
if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
|
||||
if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
|
||||
if (!Offset->isUndef())
|
||||
report_fatal_error("unexpected offset when storing to webassembly local",
|
||||
false);
|
||||
|
@ -1514,7 +1515,7 @@ SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
|
|||
LN->getMemoryVT(), LN->getMemOperand());
|
||||
}
|
||||
|
||||
if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
|
||||
if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
|
||||
if (!Offset->isUndef())
|
||||
report_fatal_error(
|
||||
"unexpected offset when loading from webassembly local", false);
|
||||
|
|
|
@ -212,7 +212,7 @@ void X86DynAllocaExpander::lower(MachineInstr *MI, Lowering L) {
|
|||
bool Is64BitAlloca = MI->getOpcode() == X86::DYN_ALLOCA_64;
|
||||
assert(SlotSize == 4 || SlotSize == 8);
|
||||
|
||||
Optional<MachineFunction::DebugInstrOperandPair> InstrNum;
|
||||
std::optional<MachineFunction::DebugInstrOperandPair> InstrNum;
|
||||
if (unsigned Num = MI->peekDebugInstrNum()) {
|
||||
// Operand 2 of DYN_ALLOCAs contains the stack def.
|
||||
InstrNum = {Num, 2};
|
||||
|
|
|
@ -580,7 +580,7 @@ void X86FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero,
|
|||
void X86FrameLowering::emitStackProbe(
|
||||
MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
|
||||
Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
|
||||
std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
|
||||
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
|
||||
if (STI.isTargetWindowsCoreCLR()) {
|
||||
if (InProlog) {
|
||||
|
@ -1106,7 +1106,7 @@ void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
|
|||
void X86FrameLowering::emitStackProbeCall(
|
||||
MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
|
||||
Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
|
||||
std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
|
||||
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
|
||||
|
||||
// FIXME: Add indirect thunk support and remove this.
|
||||
|
|
|
@ -57,7 +57,7 @@ public:
|
|||
void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
|
||||
bool InProlog,
|
||||
Optional<MachineFunction::DebugInstrOperandPair>
|
||||
std::optional<MachineFunction::DebugInstrOperandPair>
|
||||
InstrNum = std::nullopt) const;
|
||||
|
||||
bool stackProbeFunctionModifiesSP() const override;
|
||||
|
@ -208,7 +208,7 @@ private:
|
|||
void emitStackProbeCall(
|
||||
MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
|
||||
Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const;
|
||||
std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const;
|
||||
|
||||
/// Emit target stack probe as an inline sequence.
|
||||
void emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
|
|
|
@ -3499,17 +3499,20 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
|
|||
// If we have BMI2's BZHI, we are ok with muti-use patterns.
|
||||
// Else, if we only have BMI1's BEXTR, we require one-use.
|
||||
const bool AllowExtraUsesByDefault = Subtarget->hasBMI2();
|
||||
auto checkUses = [AllowExtraUsesByDefault](SDValue Op, unsigned NUses,
|
||||
Optional<bool> AllowExtraUses) {
|
||||
auto checkUses = [AllowExtraUsesByDefault](
|
||||
SDValue Op, unsigned NUses,
|
||||
std::optional<bool> AllowExtraUses) {
|
||||
return AllowExtraUses.value_or(AllowExtraUsesByDefault) ||
|
||||
Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
|
||||
};
|
||||
auto checkOneUse = [checkUses](SDValue Op,
|
||||
Optional<bool> AllowExtraUses = std::nullopt) {
|
||||
std::optional<bool> AllowExtraUses =
|
||||
std::nullopt) {
|
||||
return checkUses(Op, 1, AllowExtraUses);
|
||||
};
|
||||
auto checkTwoUse = [checkUses](SDValue Op,
|
||||
Optional<bool> AllowExtraUses = std::nullopt) {
|
||||
std::optional<bool> AllowExtraUses =
|
||||
std::nullopt) {
|
||||
return checkUses(Op, 2, AllowExtraUses);
|
||||
};
|
||||
|
||||
|
|
|
@ -3573,7 +3573,7 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
report_fatal_error("Cannot emit physreg copy instruction");
|
||||
}
|
||||
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
||||
if (MI.isMoveReg())
|
||||
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
||||
|
@ -3728,7 +3728,7 @@ static unsigned getLoadStoreRegOpcode(Register Reg,
|
|||
}
|
||||
}
|
||||
|
||||
Optional<ExtAddrMode>
|
||||
std::optional<ExtAddrMode>
|
||||
X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
const MCInstrDesc &Desc = MemI.getDesc();
|
||||
|
@ -3757,7 +3757,7 @@ X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
|
|||
|
||||
bool X86InstrInfo::verifyInstruction(const MachineInstr &MI,
|
||||
StringRef &ErrInfo) const {
|
||||
Optional<ExtAddrMode> AMOrNone = getAddrModeFromMemoryOp(MI, nullptr);
|
||||
std::optional<ExtAddrMode> AMOrNone = getAddrModeFromMemoryOp(MI, nullptr);
|
||||
if (!AMOrNone)
|
||||
return true;
|
||||
|
||||
|
@ -9041,7 +9041,7 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
|
|||
/// If \p DescribedReg overlaps with the MOVrr instruction's destination
|
||||
/// register then, if possible, describe the value in terms of the source
|
||||
/// register.
|
||||
static Optional<ParamLoadedValue>
|
||||
static std::optional<ParamLoadedValue>
|
||||
describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
|
||||
const TargetRegisterInfo *TRI) {
|
||||
Register DestReg = MI.getOperand(0).getReg();
|
||||
|
@ -9074,7 +9074,7 @@ describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
|
|||
return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
|
||||
}
|
||||
|
||||
Optional<ParamLoadedValue>
|
||||
std::optional<ParamLoadedValue>
|
||||
X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
|
||||
const MachineOperand *Op = nullptr;
|
||||
DIExpression *Expr = nullptr;
|
||||
|
|
|
@ -327,7 +327,7 @@ public:
|
|||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
bool AllowModify) const override;
|
||||
|
||||
Optional<ExtAddrMode>
|
||||
std::optional<ExtAddrMode>
|
||||
getAddrModeFromMemoryOp(const MachineInstr &MemI,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
|
@ -574,8 +574,8 @@ public:
|
|||
return MI.getDesc().TSFlags & X86II::LOCK;
|
||||
}
|
||||
|
||||
Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
|
||||
Register Reg) const override;
|
||||
std::optional<ParamLoadedValue>
|
||||
describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
|
||||
|
||||
protected:
|
||||
/// Commutes the operands in the given instruction by changing the operands
|
||||
|
@ -596,7 +596,7 @@ protected:
|
|||
/// If the specific machine instruction is a instruction that moves/copies
|
||||
/// value from one register to another register return destination and source
|
||||
/// registers as machine operands.
|
||||
Optional<DestSourcePair>
|
||||
std::optional<DestSourcePair>
|
||||
isCopyInstrImpl(const MachineInstr &MI) const override;
|
||||
|
||||
private:
|
||||
|
|
|
@ -65,8 +65,8 @@ class X86MCInstLower {
|
|||
public:
|
||||
X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
|
||||
|
||||
Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI,
|
||||
const MachineOperand &MO) const;
|
||||
std::optional<MCOperand> LowerMachineOperand(const MachineInstr *MI,
|
||||
const MachineOperand &MO) const;
|
||||
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
|
||||
|
||||
MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
|
||||
|
@ -428,7 +428,7 @@ static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
|
|||
return Subtarget.is64Bit() ? X86::RET64 : X86::RET32;
|
||||
}
|
||||
|
||||
Optional<MCOperand>
|
||||
std::optional<MCOperand>
|
||||
X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
|
||||
const MachineOperand &MO) const {
|
||||
switch (MO.getType()) {
|
||||
|
|
|
@ -117,7 +117,7 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
|
|||
/// determine if we should insert tilerelease in frame lowering.
|
||||
bool HasVirtualTileReg = false;
|
||||
|
||||
Optional<int> SwiftAsyncContextFrameIdx;
|
||||
std::optional<int> SwiftAsyncContextFrameIdx;
|
||||
|
||||
// Preallocated fields are only used during isel.
|
||||
// FIXME: Can we find somewhere else to store these?
|
||||
|
@ -222,7 +222,7 @@ public:
|
|||
bool hasVirtualTileReg() const { return HasVirtualTileReg; }
|
||||
void setHasVirtualTileReg(bool v) { HasVirtualTileReg = v; }
|
||||
|
||||
Optional<int> getSwiftAsyncContextFrameIdx() const {
|
||||
std::optional<int> getSwiftAsyncContextFrameIdx() const {
|
||||
return SwiftAsyncContextFrameIdx;
|
||||
}
|
||||
void setSwiftAsyncContextFrameIdx(int v) { SwiftAsyncContextFrameIdx = v; }
|
||||
|
|
|
@ -1782,7 +1782,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
|
|||
|
||||
// See if we can sink hardening the loaded value.
|
||||
auto SinkCheckToSingleUse =
|
||||
[&](MachineInstr &MI) -> Optional<MachineInstr *> {
|
||||
[&](MachineInstr &MI) -> std::optional<MachineInstr *> {
|
||||
Register DefReg = MI.getOperand(0).getReg();
|
||||
|
||||
// We need to find a single use which we can sink the check. We can
|
||||
|
@ -1854,7 +1854,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
|
|||
};
|
||||
|
||||
MachineInstr *MI = &InitialMI;
|
||||
while (Optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
|
||||
while (std::optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
|
||||
// Update which MI we're checking now.
|
||||
MI = *SingleUse;
|
||||
if (!MI)
|
||||
|
|
Loading…
Reference in New Issue