From b0df70403d20e8aad5eb1abe147441d8763f333b Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Sun, 4 Dec 2022 22:43:14 +0000 Subject: [PATCH] [Target] llvm::Optional => std::optional The updated functions are mostly internal with a few exceptions (virtual functions in TargetInstrInfo.h, TargetRegisterInfo.h). To minimize changes to LLVMCodeGen, GlobalISel files are skipped. https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716 --- bolt/include/bolt/Core/MCPlus.h | 11 ---- llvm/include/llvm/ADT/Optional.h | 11 ++++ llvm/include/llvm/CodeGen/TargetInstrInfo.h | 16 +++--- .../include/llvm/CodeGen/TargetRegisterInfo.h | 2 +- .../AsmPrinter/AsmPrinterInlineAsm.cpp | 2 +- llvm/lib/CodeGen/MachineCopyPropagation.cpp | 52 ++++++++++--------- llvm/lib/CodeGen/ModuloSchedule.cpp | 4 +- llvm/lib/CodeGen/TargetInstrInfo.cpp | 2 +- llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp | 2 +- .../AArch64/AArch64CompressJumpTables.cpp | 4 +- .../Target/AArch64/AArch64FalkorHWPFFix.cpp | 15 +++--- .../Target/AArch64/AArch64FrameLowering.cpp | 4 +- .../Target/AArch64/AArch64ISelLowering.cpp | 35 +++++++------ llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 22 ++++---- llvm/lib/Target/AArch64/AArch64InstrInfo.h | 16 +++--- .../AArch64/AArch64LoadStoreOptimizer.cpp | 15 +++--- .../Target/AArch64/AArch64MIPeepholeOpt.cpp | 14 ++--- .../AArch64/AArch64MachineFunctionInfo.h | 14 ++--- .../Target/AArch64/AArch64RegisterInfo.cpp | 2 +- llvm/lib/Target/AArch64/AArch64RegisterInfo.h | 2 +- .../AArch64/AArch64StackTaggingPreRA.cpp | 6 +-- .../AArch64/AsmParser/AArch64AsmParser.cpp | 20 +++---- .../Target/AArch64/Utils/AArch64BaseInfo.h | 4 +- .../AArch64/Utils/AArch64SMEAttributes.cpp | 8 +-- .../AArch64/Utils/AArch64SMEAttributes.h | 5 +- llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp | 10 ++-- llvm/lib/Target/ARM/ARMBaseInstrInfo.h | 10 ++-- .../Target/ARM/MVEGatherScatterLowering.cpp | 26 +++++----- llvm/lib/Target/DirectX/DXILResource.h | 2 +- llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp | 6 +-- llvm/lib/Target/M68k/M68kFrameLowering.cpp | 2 +- llvm/lib/Target/M68k/M68kMCInstLower.cpp | 4 +- llvm/lib/Target/M68k/M68kMCInstLower.h | 4 +- llvm/lib/Target/Mips/Mips16InstrInfo.cpp | 2 +- llvm/lib/Target/Mips/Mips16InstrInfo.h | 3 +- llvm/lib/Target/Mips/MipsInstrInfo.cpp | 6 +-- llvm/lib/Target/Mips/MipsInstrInfo.h | 8 +-- llvm/lib/Target/Mips/MipsSEInstrInfo.cpp | 2 +- llvm/lib/Target/Mips/MipsSEInstrInfo.h | 2 +- llvm/lib/Target/Mips/MipsTargetStreamer.h | 2 +- llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 26 +++++----- llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 6 +-- .../PowerPC/MCTargetDesc/PPCELFStreamer.cpp | 7 +-- .../PowerPC/MCTargetDesc/PPCELFStreamer.h | 4 +- llvm/lib/Target/PowerPC/PPCFastISel.cpp | 7 +-- llvm/lib/Target/PowerPC/PPCInstrInfo.cpp | 6 +-- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 48 ++++++++--------- llvm/lib/Target/RISCV/RISCVISelLowering.h | 2 +- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 4 +- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 4 +- llvm/lib/Target/RISCV/RISCVInstrInfo.h | 5 +- llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp | 12 ++--- llvm/lib/Target/SPIRV/SPIRVBuiltins.h | 12 ++--- llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h | 4 +- llvm/lib/Target/VE/VECustomDAG.cpp | 8 +-- llvm/lib/Target/VE/VECustomDAG.h | 14 ++--- .../AsmParser/WebAssemblyAsmParser.cpp | 3 +- .../AsmParser/WebAssemblyAsmTypeCheck.cpp | 2 +- .../AsmParser/WebAssemblyAsmTypeCheck.h | 2 +- .../Utils/WebAssemblyTypeUtilities.cpp | 2 +- .../Utils/WebAssemblyTypeUtilities.h | 2 +- .../WebAssembly/WebAssemblyFrameLowering.cpp | 2 +- .../WebAssembly/WebAssemblyFrameLowering.h | 4 +- .../WebAssembly/WebAssemblyISelLowering.cpp | 7 +-- llvm/lib/Target/X86/X86DynAllocaExpander.cpp | 2 +- llvm/lib/Target/X86/X86FrameLowering.cpp | 4 +- llvm/lib/Target/X86/X86FrameLowering.h | 4 +- llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 11 ++-- llvm/lib/Target/X86/X86InstrInfo.cpp | 10 ++-- llvm/lib/Target/X86/X86InstrInfo.h | 8 +-- llvm/lib/Target/X86/X86MCInstLower.cpp | 6 +-- llvm/lib/Target/X86/X86MachineFunctionInfo.h | 4 +- .../X86/X86SpeculativeLoadHardening.cpp | 4 +- 73 files changed, 326 insertions(+), 297 deletions(-) diff --git a/bolt/include/bolt/Core/MCPlus.h b/bolt/include/bolt/Core/MCPlus.h index e3ddb192821e..b4a72ac274fa 100644 --- a/bolt/include/bolt/Core/MCPlus.h +++ b/bolt/include/bolt/Core/MCPlus.h @@ -21,17 +21,6 @@ #include namespace llvm { - -template () - << std::declval())> -raw_ostream &operator<<(raw_ostream &OS, const std::optional &O) { - if (O) - OS << *O; - else - OS << std::nullopt; - return OS; -} - namespace bolt { // NOTE: using SmallVector for instruction list results in a memory regression. diff --git a/llvm/include/llvm/ADT/Optional.h b/llvm/include/llvm/ADT/Optional.h index 2df8d35cf1ae..f666f522f002 100644 --- a/llvm/include/llvm/ADT/Optional.h +++ b/llvm/include/llvm/ADT/Optional.h @@ -23,6 +23,7 @@ #include "llvm/Support/type_traits.h" #include #include +#include #include namespace llvm { @@ -490,6 +491,16 @@ raw_ostream &operator<<(raw_ostream &OS, const Optional &O) { return OS; } +template () + << std::declval())> +raw_ostream &operator<<(raw_ostream &OS, const std::optional &O) { + if (O) + OS << *O; + else + OS << std::nullopt; + return OS; +} + } // end namespace llvm #endif // LLVM_ADT_OPTIONAL_H diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index 8f6e72bbcc43..3aa8c576f01d 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -754,7 +754,7 @@ public: /// /// Note: This hook is guaranteed to be called from the innermost to the /// outermost prologue of the loop being software pipelined. - virtual Optional + virtual std::optional createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl &Cond) = 0; @@ -1009,7 +1009,7 @@ protected: /// If the specific machine instruction is a instruction that moves/copies /// value from one register to another register return destination and source /// registers as machine operands. - virtual Optional + virtual std::optional isCopyInstrImpl(const MachineInstr &MI) const { return std::nullopt; } @@ -1032,7 +1032,7 @@ public: /// For COPY-instruction the method naturally returns destination and source /// registers as machine operands, for all other instructions the method calls /// target-dependent implementation. - Optional isCopyInstr(const MachineInstr &MI) const { + std::optional isCopyInstr(const MachineInstr &MI) const { if (MI.isCopy()) { return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; } @@ -1043,8 +1043,8 @@ public: /// immediate value and a physical register, and stores the result in /// the given physical register \c Reg, return a pair of the source /// register and the offset which has been added. - virtual Optional isAddImmediate(const MachineInstr &MI, - Register Reg) const { + virtual std::optional isAddImmediate(const MachineInstr &MI, + Register Reg) const { return std::nullopt; } @@ -1380,7 +1380,7 @@ public: /// MachineInstr that is accessing memory. These values are returned as a /// struct ExtAddrMode which contains all relevant information to make up the /// address. - virtual Optional + virtual std::optional getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const { return std::nullopt; @@ -1984,8 +1984,8 @@ public: /// Produce the expression describing the \p MI loading a value into /// the physical register \p Reg. This hook should only be used with /// \p MIs belonging to VReg-less functions. - virtual Optional describeLoadedValue(const MachineInstr &MI, - Register Reg) const; + virtual std::optional + describeLoadedValue(const MachineInstr &MI, Register Reg) const; /// Given the generic extension instruction \p ExtMI, returns true if this /// extension is a likely candidate for being folded into an another diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h index 4ad48f79ee42..02e8af9ccd72 100644 --- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -531,7 +531,7 @@ public: /// The absence of an explanation does not mean that the register is not /// reserved (meaning, you should check that PhysReg is in fact reserved /// before calling this). - virtual llvm::Optional + virtual std::optional explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const { return {}; } diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp index 617ec13b7974..c1588aaea05e 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp @@ -405,7 +405,7 @@ void AsmPrinter::emitInlineAsm(const MachineInstr *MI) const { DiagnosticInfoInlineAsm(LocCookie, Note, DiagnosticSeverity::DS_Note)); for (const Register RR : RestrRegs) { - if (llvm::Optional reason = + if (std::optional reason = TRI->explainReservedReg(*MF, RR)) { MMI->getModule()->getContext().diagnose(DiagnosticInfoInlineAsm( LocCookie, *reason, DiagnosticSeverity::DS_Note)); diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp index b46fc43e3497..871824553aa4 100644 --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -88,14 +88,14 @@ static cl::opt MCPUseCopyInstr("mcp-use-is-copy-instr", cl::init(false), namespace { -static Optional isCopyInstr(const MachineInstr &MI, - const TargetInstrInfo &TII, - bool UseCopyInstr) { +static std::optional isCopyInstr(const MachineInstr &MI, + const TargetInstrInfo &TII, + bool UseCopyInstr) { if (UseCopyInstr) return TII.isCopyInstr(MI); if (MI.isCopy()) - return Optional( + return std::optional( DestSourcePair{MI.getOperand(0), MI.getOperand(1)}); return std::nullopt; @@ -137,7 +137,7 @@ public: auto I = Copies.find(*RUI); if (I != Copies.end()) { if (MachineInstr *MI = I->second.MI) { - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*MI, TII, UseCopyInstr); assert(CopyOperands && "Expect copy"); @@ -166,7 +166,7 @@ public: // When we clobber the destination of a copy, we need to clobber the // whole register it defined. if (MachineInstr *MI = I->second.MI) { - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*MI, TII, UseCopyInstr); markRegsUnavailable({CopyOperands->Destination->getReg().asMCReg()}, TRI); @@ -180,7 +180,8 @@ public: /// Add this copy's registers into the tracker's copy maps. void trackCopy(MachineInstr *MI, const TargetRegisterInfo &TRI, const TargetInstrInfo &TII, bool UseCopyInstr) { - Optional CopyOperands = isCopyInstr(*MI, TII, UseCopyInstr); + std::optional CopyOperands = + isCopyInstr(*MI, TII, UseCopyInstr); assert(CopyOperands && "Tracking non-copy?"); MCRegister Src = CopyOperands->Source->getReg().asMCReg(); @@ -236,7 +237,7 @@ public: if (!AvailCopy) return nullptr; - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*AvailCopy, TII, UseCopyInstr); Register AvailSrc = CopyOperands->Source->getReg(); Register AvailDef = CopyOperands->Destination->getReg(); @@ -266,7 +267,7 @@ public: if (!AvailCopy) return nullptr; - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*AvailCopy, TII, UseCopyInstr); Register AvailSrc = CopyOperands->Source->getReg(); Register AvailDef = CopyOperands->Destination->getReg(); @@ -383,7 +384,7 @@ static bool isNopCopy(const MachineInstr &PreviousCopy, MCRegister Src, MCRegister Def, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, bool UseCopyInstr) { - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(PreviousCopy, *TII, UseCopyInstr); MCRegister PreviousSrc = CopyOperands->Source->getReg().asMCReg(); MCRegister PreviousDef = CopyOperands->Destination->getReg().asMCReg(); @@ -422,7 +423,8 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, // Copy was redundantly redefining either Src or Def. Remove earlier kill // flags between Copy and PrevCopy because the value will be reused now. - Optional CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr); + std::optional CopyOperands = + isCopyInstr(Copy, *TII, UseCopyInstr); assert(CopyOperands); Register CopyDef = CopyOperands->Destination->getReg(); @@ -439,8 +441,8 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, bool MachineCopyPropagation::isBackwardPropagatableRegClassCopy( const MachineInstr &Copy, const MachineInstr &UseI, unsigned UseIdx) { - - Optional CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr); + std::optional CopyOperands = + isCopyInstr(Copy, *TII, UseCopyInstr); Register Def = CopyOperands->Destination->getReg(); if (const TargetRegisterClass *URC = @@ -458,8 +460,8 @@ bool MachineCopyPropagation::isBackwardPropagatableRegClassCopy( bool MachineCopyPropagation::isForwardableRegClassCopy(const MachineInstr &Copy, const MachineInstr &UseI, unsigned UseIdx) { - - Optional CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr); + std::optional CopyOperands = + isCopyInstr(Copy, *TII, UseCopyInstr); Register CopySrcReg = CopyOperands->Source->getReg(); // If the new register meets the opcode register constraints, then allow @@ -587,7 +589,7 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) { if (!Copy) continue; - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*Copy, *TII, UseCopyInstr); Register CopyDstReg = CopyOperands->Destination->getReg(); const MachineOperand &CopySrc = *CopyOperands->Source; @@ -654,7 +656,8 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) { for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) { // Analyze copies (which don't overlap themselves). - Optional CopyOperands = isCopyInstr(MI, *TII, UseCopyInstr); + std::optional CopyOperands = + isCopyInstr(MI, *TII, UseCopyInstr); if (CopyOperands) { Register RegSrc = CopyOperands->Source->getReg(); @@ -777,7 +780,7 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) { MaybeDeadCopies.begin(); DI != MaybeDeadCopies.end();) { MachineInstr *MaybeDead = *DI; - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*MaybeDead, *TII, UseCopyInstr); MCRegister Reg = CopyOperands->Destination->getReg().asMCReg(); assert(!MRI->isReserved(Reg)); @@ -816,7 +819,7 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) { LLVM_DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: "; MaybeDead->dump()); - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*MaybeDead, *TII, UseCopyInstr); assert(CopyOperands); @@ -845,7 +848,8 @@ static bool isBackwardPropagatableCopy(MachineInstr &MI, const MachineRegisterInfo &MRI, const TargetInstrInfo &TII, bool UseCopyInstr) { - Optional CopyOperands = isCopyInstr(MI, TII, UseCopyInstr); + std::optional CopyOperands = + isCopyInstr(MI, TII, UseCopyInstr); assert(CopyOperands && "MI is expected to be a COPY"); Register Def = CopyOperands->Destination->getReg(); @@ -887,7 +891,7 @@ void MachineCopyPropagation::propagateDefs(MachineInstr &MI) { if (!Copy) continue; - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*Copy, *TII, UseCopyInstr); Register Def = CopyOperands->Destination->getReg(); Register Src = CopyOperands->Source->getReg(); @@ -925,7 +929,8 @@ void MachineCopyPropagation::BackwardCopyPropagateBlock( for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) { // Ignore non-trivial COPYs. - Optional CopyOperands = isCopyInstr(MI, *TII, UseCopyInstr); + std::optional CopyOperands = + isCopyInstr(MI, *TII, UseCopyInstr); if (CopyOperands && MI.getNumOperands() == 2) { Register DefReg = CopyOperands->Destination->getReg(); Register SrcReg = CopyOperands->Source->getReg(); @@ -986,8 +991,7 @@ void MachineCopyPropagation::BackwardCopyPropagateBlock( } for (auto *Copy : MaybeDeadCopies) { - - Optional CopyOperands = + std::optional CopyOperands = isCopyInstr(*Copy, *TII, UseCopyInstr); Register Src = CopyOperands->Source->getReg(); Register Def = CopyOperands->Destination->getReg(); diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp index c7fde45eba6a..195fd45c223c 100644 --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -880,7 +880,7 @@ void ModuloScheduleExpander::addBranches(MachineBasicBlock &PreheaderBB, MachineBasicBlock *Epilog = EpilogBBs[i]; SmallVector Cond; - Optional StaticallyGreater = + std::optional StaticallyGreater = LoopInfo->createTripCountGreaterCondition(j + 1, *Prolog, Cond); unsigned numAdded = 0; if (!StaticallyGreater) { @@ -1963,7 +1963,7 @@ void PeelingModuloScheduleExpander::fixupBranches() { MachineBasicBlock *Epilog = *EI; SmallVector Cond; TII->removeBranch(*Prolog); - Optional StaticallyGreater = + std::optional StaticallyGreater = LoopInfo->createTripCountGreaterCondition(TC, *Prolog, Cond); if (!StaticallyGreater) { LLVM_DEBUG(dbgs() << "Dynamic: TC > " << TC << "\n"); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index ff624dac93ae..d15369f09e48 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -1171,7 +1171,7 @@ bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, return (DefCycle != -1 && DefCycle <= 1); } -Optional +std::optional TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { const MachineFunction *MF = MI.getMF(); diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp index 75a70c3cd2d7..1aee0a851f1b 100644 --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -255,7 +255,7 @@ void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) { void AArch64AsmPrinter::emitFunctionHeaderComment() { const AArch64FunctionInfo *FI = MF->getInfo(); - Optional OutlinerString = FI->getOutliningStyle(); + std::optional OutlinerString = FI->getOutliningStyle(); if (OutlinerString != std::nullopt) OutStreamer->getCommentOS() << ' ' << OutlinerString; } diff --git a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp index ac8d753ac4e1..05919c8faf95 100644 --- a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp +++ b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp @@ -39,7 +39,7 @@ class AArch64CompressJumpTables : public MachineFunctionPass { /// Returns the size in instructions of the block \p MBB, or None if we /// couldn't get a safe upper bound. - Optional computeBlockSize(MachineBasicBlock &MBB); + std::optional computeBlockSize(MachineBasicBlock &MBB); /// Gather information about the function, returns false if we can't perform /// this optimization for some reason. @@ -69,7 +69,7 @@ char AArch64CompressJumpTables::ID = 0; INITIALIZE_PASS(AArch64CompressJumpTables, DEBUG_TYPE, "AArch64 compress jump tables pass", false, false) -Optional +std::optional AArch64CompressJumpTables::computeBlockSize(MachineBasicBlock &MBB) { int Size = 0; for (const MachineInstr &MI : MBB) { diff --git a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp index 418b201437bf..c11a0d647734 100644 --- a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp +++ b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp @@ -234,7 +234,7 @@ static unsigned makeTag(unsigned Dest, unsigned Base, unsigned Offset) { return (Dest & 0xf) | ((Base & 0xf) << 4) | ((Offset & 0x3f) << 8); } -static Optional getLoadInfo(const MachineInstr &MI) { +static std::optional getLoadInfo(const MachineInstr &MI) { int DestRegIdx; int BaseRegIdx; int OffsetIdx; @@ -656,8 +656,9 @@ static Optional getLoadInfo(const MachineInstr &MI) { return LI; } -static Optional getTag(const TargetRegisterInfo *TRI, - const MachineInstr &MI, const LoadInfo &LI) { +static std::optional getTag(const TargetRegisterInfo *TRI, + const MachineInstr &MI, + const LoadInfo &LI) { unsigned Dest = LI.DestReg ? TRI->getEncodingValue(LI.DestReg) : 0; unsigned Base = TRI->getEncodingValue(LI.BaseReg); unsigned Off; @@ -679,10 +680,10 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) { TagMap.clear(); for (MachineBasicBlock *MBB : L.getBlocks()) for (MachineInstr &MI : *MBB) { - Optional LInfo = getLoadInfo(MI); + std::optional LInfo = getLoadInfo(MI); if (!LInfo) continue; - Optional Tag = getTag(TRI, MI, *LInfo); + std::optional Tag = getTag(TRI, MI, *LInfo); if (!Tag) continue; TagMap[*Tag].push_back(&MI); @@ -719,11 +720,11 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) { if (!TII->isStridedAccess(MI)) continue; - Optional OptLdI = getLoadInfo(MI); + std::optional OptLdI = getLoadInfo(MI); if (!OptLdI) continue; LoadInfo LdI = *OptLdI; - Optional OptOldTag = getTag(TRI, MI, LdI); + std::optional OptOldTag = getTag(TRI, MI, LdI); if (!OptOldTag) continue; auto &OldCollisions = TagMap[*OptOldTag]; diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 4d2faaa97d89..572445ab76c1 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -1475,7 +1475,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, // Set tagged base pointer to the requested stack slot. // Ideally it should match SP value after prologue. - Optional TBPI = AFI->getTaggedBasePointerIndex(); + std::optional TBPI = AFI->getTaggedBasePointerIndex(); if (TBPI) AFI->setTaggedBasePointerOffset(-MFI.getObjectOffset(*TBPI)); else @@ -3933,7 +3933,7 @@ void AArch64FrameLowering::orderFrameObjects( // and save one instruction when generating the base pointer because IRG does // not allow an immediate offset. const AArch64FunctionInfo &AFI = *MF.getInfo(); - Optional TBPI = AFI.getTaggedBasePointerIndex(); + std::optional TBPI = AFI.getTaggedBasePointerIndex(); if (TBPI) { FrameObjects[*TBPI].ObjectFirst = true; FrameObjects[*TBPI].GroupFirst = true; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 89a7d72a5cac..9a9cbcf31130 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4333,7 +4333,8 @@ static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, // Returns lane if Op extracts from a two-element vector and lane is constant // (i.e., extractelt(<2 x Ty> %v, ConstantLane)), and None otherwise. -static Optional getConstantLaneNumOfExtractHalfOperand(SDValue &Op) { +static std::optional +getConstantLaneNumOfExtractHalfOperand(SDValue &Op) { SDNode *OpNode = Op.getNode(); if (OpNode->getOpcode() != ISD::EXTRACT_VECTOR_ELT) return std::nullopt; @@ -4656,7 +4657,7 @@ SDValue AArch64TargetLowering::getPStateSM(SelectionDAG &DAG, SDValue Chain, Mask); } -static Optional getCalleeAttrsFromExternalFunction(SDValue V) { +static std::optional getCalleeAttrsFromExternalFunction(SDValue V) { if (auto *ES = dyn_cast(V)) { StringRef S(ES->getSymbol()); if (S == "__arm_sme_state" || S == "__arm_tpidr2_save") @@ -4739,8 +4740,10 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SDValue LHS = Op.getOperand(1); SDValue RHS = Op.getOperand(2); - Optional LHSLane = getConstantLaneNumOfExtractHalfOperand(LHS); - Optional RHSLane = getConstantLaneNumOfExtractHalfOperand(RHS); + std::optional LHSLane = + getConstantLaneNumOfExtractHalfOperand(LHS); + std::optional RHSLane = + getConstantLaneNumOfExtractHalfOperand(RHS); assert((!LHSLane || *LHSLane < 2) && "Expect lane to be None or 0 or 1"); assert((!RHSLane || *RHSLane < 2) && "Expect lane to be None or 0 or 1"); @@ -4749,9 +4752,10 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // instructions execute on SIMD registers. So canonicalize i64 to v1i64, // which ISel recognizes better. For example, generate a ldr into d* // registers as opposed to a GPR load followed by a fmov. - auto TryVectorizeOperand = - [](SDValue N, Optional NLane, Optional OtherLane, - const SDLoc &dl, SelectionDAG &DAG) -> SDValue { + auto TryVectorizeOperand = [](SDValue N, std::optional NLane, + std::optional OtherLane, + const SDLoc &dl, + SelectionDAG &DAG) -> SDValue { // If the operand is an higher half itself, rewrite it to // extract_high_v2i64; this way aarch64_neon_pmull64 could // re-use the dag-combiner function with aarch64_neon_{pmull,smull,umull}. @@ -4822,7 +4826,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, unsigned ElementSize = 128 / Op.getValueType().getVectorMinNumElements(); unsigned NumActiveElems = Op.getConstantOperandVal(2) - Op.getConstantOperandVal(1); - Optional PredPattern = + std::optional PredPattern = getSVEPredPatternFromNumElements(NumActiveElems); if ((PredPattern != std::nullopt) && NumActiveElems <= (MinSVEVectorSize / ElementSize)) @@ -7055,7 +7059,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, SMEAttrs CalleeAttrs, CallerAttrs(MF.getFunction()); if (CLI.CB) CalleeAttrs = SMEAttrs(*CLI.CB); - else if (Optional Attrs = + else if (std::optional Attrs = getCalleeAttrsFromExternalFunction(CLI.Callee)) CalleeAttrs = *Attrs; @@ -7084,7 +7088,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, } SDValue PStateSM; - Optional RequiresSMChange = CallerAttrs.requiresSMChange(CalleeAttrs); + std::optional RequiresSMChange = + CallerAttrs.requiresSMChange(CalleeAttrs); if (RequiresSMChange) PStateSM = getPStateSM(DAG, Chain, CallerAttrs, DL, MVT::i64); @@ -9011,7 +9016,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op, // splice predicate. However, we can only do this if we can guarantee that // there are enough elements in the vector, hence we check the index <= min // number of elements. - Optional PredPattern; + std::optional PredPattern; if (Ty.isScalableVector() && IdxVal < 0 && (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) != std::nullopt) { @@ -12561,7 +12566,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, if (Vec0.isUndef()) return Op; - Optional PredPattern = + std::optional PredPattern = getSVEPredPatternFromNumElements(InVT.getVectorNumElements()); auto PredTy = VT.changeVectorElementType(MVT::i1); SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern); @@ -14112,7 +14117,7 @@ bool AArch64TargetLowering::lowerInterleavedLoad( Value *PTrue = nullptr; if (UseScalable) { - Optional PgPattern = + std::optional PgPattern = getSVEPredPatternFromNumElements(FVTy->getNumElements()); if (Subtarget->getMinSVEVectorSizeInBits() == Subtarget->getMaxSVEVectorSizeInBits() && @@ -14304,7 +14309,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI, Value *PTrue = nullptr; if (UseScalable) { - Optional PgPattern = + std::optional PgPattern = getSVEPredPatternFromNumElements(SubVecTy->getNumElements()); if (Subtarget->getMinSVEVectorSizeInBits() == Subtarget->getMaxSVEVectorSizeInBits() && @@ -22274,7 +22279,7 @@ static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL, DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!"); - Optional PgPattern = + std::optional PgPattern = getSVEPredPatternFromNumElements(VT.getVectorNumElements()); assert(PgPattern && "Unexpected element count for SVE predicate"); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 9752019b47aa..353692c5d003 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1646,7 +1646,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) { /// \returns None otherwise. /// /// Collect instructions using that flags in \p CCUseInstrs if provided. -Optional +std::optional llvm::examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl *CCUseInstrs) { @@ -1701,7 +1701,7 @@ static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr, if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode)) return false; - Optional NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI); + std::optional NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI); if (!NZVCUsed || NZVCUsed->C || NZVCUsed->V) return false; @@ -1787,7 +1787,7 @@ static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr, if (MIUsedNZCV.C || MIUsedNZCV.V) return false; - Optional NZCVUsedAfterCmp = + std::optional NZCVUsedAfterCmp = examineCFlagsUse(MI, CmpInstr, TRI, &CCUseInstrs); // Condition flags are not used in CmpInstr basic block successors and only // Z or N flags allowed to be used after CmpInstr within its basic block @@ -2214,7 +2214,7 @@ bool AArch64InstrInfo::hasUnscaledLdStOffset(unsigned Opc) { } } -Optional AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) { +std::optional AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) { switch (Opc) { default: return {}; case AArch64::PRFMui: return AArch64::PRFUMi; @@ -2594,7 +2594,7 @@ bool AArch64InstrInfo::getMemOperandsWithOffsetWidth( return true; } -Optional +std::optional AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const { const MachineOperand *Base; // Filled with the base operand of MI. @@ -4699,7 +4699,7 @@ int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, // If the offset doesn't match the scale, we rewrite the instruction to // use the unscaled instruction instead. Likewise, if we have a negative // offset and there is an unscaled op to use. - Optional UnscaledOp = + std::optional UnscaledOp = AArch64InstrInfo::getUnscaledLdSt(MI.getOpcode()); bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0); if (useUnscaledOp && @@ -8080,7 +8080,7 @@ bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault( return MF.getFunction().hasMinSize(); } -Optional +std::optional AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { // AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg @@ -8100,8 +8100,8 @@ AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { return std::nullopt; } -Optional AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, - Register Reg) const { +std::optional +AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const { int Sign = 1; int64_t Offset = 0; @@ -8139,7 +8139,7 @@ Optional AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, /// If the given ORR instruction is a copy, and \p DescribedReg overlaps with /// the destination register then, if possible, describe the value in terms of /// the source register. -static Optional +static std::optional describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) { @@ -8174,7 +8174,7 @@ describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg, return std::nullopt; } -Optional +std::optional AArch64InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { const MachineFunction *MF = MI.getMF(); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index 96e16b0d1ee9..65792f324449 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -85,7 +85,7 @@ public: /// Returns the unscaled load/store for the scaled load/store opcode, /// if there is a corresponding unscaled variant available. - static Optional getUnscaledLdSt(unsigned Opc); + static std::optional getUnscaledLdSt(unsigned Opc); /// Scaling factor for (scaled or unscaled) load or store. static int getMemScale(unsigned Opc); @@ -133,7 +133,7 @@ public: /// Hint that pairing the given load or store is unprofitable. static void suppressLdStPair(MachineInstr &MI); - Optional + std::optional getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override; @@ -314,11 +314,11 @@ public: /// on Windows. static bool isSEHInstruction(const MachineInstr &MI); - Optional isAddImmediate(const MachineInstr &MI, - Register Reg) const override; + std::optional isAddImmediate(const MachineInstr &MI, + Register Reg) const override; - Optional describeLoadedValue(const MachineInstr &MI, - Register Reg) const override; + std::optional + describeLoadedValue(const MachineInstr &MI, Register Reg) const override; unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override; @@ -339,7 +339,7 @@ protected: /// If the specific machine instruction is an instruction that moves/copies /// value from one register to another register return destination and source /// registers as machine operands. - Optional + std::optional isCopyInstrImpl(const MachineInstr &MI) const override; private: @@ -392,7 +392,7 @@ struct UsedNZCV { /// \returns None otherwise. /// /// Collect instructions using that flags in \p CCUseInstrs if provided. -Optional +std::optional examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl *CCUseInstrs = nullptr); diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index 2aaab7652721..1859195de86e 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -99,7 +99,7 @@ using LdStPairFlags = struct LdStPairFlags { // If not none, RenameReg can be used to rename the result register of the // first store in a pair. Currently this only works when merging stores // forward. - Optional RenameReg = std::nullopt; + std::optional RenameReg = std::nullopt; LdStPairFlags() = default; @@ -111,7 +111,7 @@ using LdStPairFlags = struct LdStPairFlags { void setRenameReg(MCPhysReg R) { RenameReg = R; } void clearRenameReg() { RenameReg = std::nullopt; } - Optional getRenameReg() const { return RenameReg; } + std::optional getRenameReg() const { return RenameReg; } }; struct AArch64LoadStoreOpt : public MachineFunctionPass { @@ -846,7 +846,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, bool MergeForward = Flags.getMergeForward(); - Optional RenameReg = Flags.getRenameReg(); + std::optional RenameReg = Flags.getRenameReg(); if (MergeForward && RenameReg) { MCRegister RegToRename = getLdStRegOp(*I).getReg(); DefinedInBB.addReg(*RenameReg); @@ -1470,7 +1470,7 @@ canRenameUpToDef(MachineInstr &FirstMI, LiveRegUnits &UsedInBetween, // * not used in \p UsedInBetween; UsedInBetween must contain all accessed // registers in the range the rename register will be used, // * is available in all used register classes (checked using RequiredClasses). -static Optional tryToFindRegisterToRename( +static std::optional tryToFindRegisterToRename( const MachineFunction &MF, Register Reg, LiveRegUnits &DefinedInBB, LiveRegUnits &UsedInBetween, SmallPtrSetImpl &RequiredClasses, @@ -1719,9 +1719,10 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, RequiredClasses, TRI)}; if (*MaybeCanRename) { - Optional MaybeRenameReg = tryToFindRegisterToRename( - *FirstMI.getParent()->getParent(), Reg, DefinedInBB, - UsedInBetween, RequiredClasses, TRI); + std::optional MaybeRenameReg = + tryToFindRegisterToRename(*FirstMI.getParent()->getParent(), + Reg, DefinedInBB, UsedInBetween, + RequiredClasses, TRI); if (MaybeRenameReg) { Flags.setRenameReg(*MaybeRenameReg); Flags.setMergeForward(true); diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp index d377b7323474..17a8da83b1fa 100644 --- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp +++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp @@ -65,7 +65,7 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass { using OpcodePair = std::pair; template using SplitAndOpcFunc = - std::function(T, unsigned, T &, T &)>; + std::function(T, unsigned, T &, T &)>; using BuildMIFunc = std::function; @@ -173,7 +173,8 @@ bool AArch64MIPeepholeOpt::visitAND( return splitTwoPartImm( MI, - [Opc](T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional { + [Opc](T Imm, unsigned RegSize, T &Imm0, + T &Imm1) -> std::optional { if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1)) return std::make_pair(Opc, Opc); return std::nullopt; @@ -337,7 +338,7 @@ bool AArch64MIPeepholeOpt::visitADDSUB( return splitTwoPartImm( MI, [PosOpc, NegOpc](T Imm, unsigned RegSize, T &Imm0, - T &Imm1) -> Optional { + T &Imm1) -> std::optional { if (splitAddSubImm(Imm, RegSize, Imm0, Imm1)) return std::make_pair(PosOpc, PosOpc); if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1)) @@ -367,8 +368,9 @@ bool AArch64MIPeepholeOpt::visitADDSSUBS( // that the condition code usages are only for Equal and Not Equal return splitTwoPartImm( MI, - [PosOpcs, NegOpcs, &MI, &TRI = TRI, &MRI = MRI]( - T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional { + [PosOpcs, NegOpcs, &MI, &TRI = TRI, + &MRI = MRI](T Imm, unsigned RegSize, T &Imm0, + T &Imm1) -> std::optional { OpcodePair OP; if (splitAddSubImm(Imm, RegSize, Imm0, Imm1)) OP = PosOpcs; @@ -379,7 +381,7 @@ bool AArch64MIPeepholeOpt::visitADDSSUBS( // Check conditional uses last since it is expensive for scanning // proceeding instructions MachineInstr &SrcMI = *MRI->getUniqueVRegDef(MI.getOperand(1).getReg()); - Optional NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI); + std::optional NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI); if (!NZCVUsed || NZCVUsed->C || NZCVUsed->V) return std::nullopt; return OP; diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h index fc2015cf2709..7fc6480300b6 100644 --- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h +++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h @@ -140,7 +140,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { SmallVector ForwardedMustTailRegParms; /// FrameIndex for the tagged base pointer. - Optional TaggedBasePointerIndex; + std::optional TaggedBasePointerIndex; /// Offset from SP-at-entry to the tagged base pointer. /// Tagged base pointer is set up to point to the first (lowest address) @@ -149,7 +149,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// OutliningStyle denotes, if a function was outined, how it was outlined, /// e.g. Tail Call, Thunk, or Function if none apply. - Optional OutliningStyle; + std::optional OutliningStyle; // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus // CalleeSavedStackSize) to the address of the frame record. @@ -189,10 +189,10 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// True if the function need unwind information. - mutable Optional NeedsDwarfUnwindInfo; + mutable std::optional NeedsDwarfUnwindInfo; /// True if the function need asynchronous unwind information. - mutable Optional NeedsAsyncDwarfUnwindInfo; + mutable std::optional NeedsAsyncDwarfUnwindInfo; public: explicit AArch64FunctionInfo(MachineFunction &MF); @@ -251,7 +251,9 @@ public: uint64_t getLocalStackSize() const { return LocalStackSize; } void setOutliningStyle(std::string Style) { OutliningStyle = Style; } - Optional getOutliningStyle() const { return OutliningStyle; } + std::optional getOutliningStyle() const { + return OutliningStyle; + } void setCalleeSavedStackSize(unsigned Size) { CalleeSavedStackSize = Size; @@ -407,7 +409,7 @@ public: return ForwardedMustTailRegParms; } - Optional getTaggedBasePointerIndex() const { + std::optional getTaggedBasePointerIndex() const { return TaggedBasePointerIndex; } void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; } diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp index f6d53d3e33ab..299892ad4ede 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -354,7 +354,7 @@ const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const { return CSR_AArch64_StackProbe_Windows_RegMask; } -llvm::Optional +std::optional AArch64RegisterInfo::explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const { if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19)) diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.h b/llvm/lib/Target/AArch64/AArch64RegisterInfo.h index 4ffd4142f74b..e93352c13b3c 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.h +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.h @@ -94,7 +94,7 @@ public: BitVector getStrictlyReservedRegs(const MachineFunction &MF) const; BitVector getReservedRegs(const MachineFunction &MF) const override; - llvm::Optional + std::optional explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override; bool isAsmClobberable(const MachineFunction &MF, diff --git a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp index 1eebbe0d62f2..b7700baf79fc 100644 --- a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp +++ b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp @@ -74,7 +74,7 @@ public: bool mayUseUncheckedLoadStore(); void uncheckUsesOf(unsigned TaggedReg, int FI); void uncheckLoadsAndStores(); - Optional findFirstSlotCandidate(); + std::optional findFirstSlotCandidate(); bool runOnMachineFunction(MachineFunction &Func) override; StringRef getPassName() const override { @@ -238,7 +238,7 @@ static bool isSlotPreAllocated(MachineFrameInfo *MFI, int FI) { // eliminates a vreg (by replacing it with direct uses of IRG, which is usually // live almost everywhere anyway), and therefore needs to happen before // regalloc. -Optional AArch64StackTaggingPreRA::findFirstSlotCandidate() { +std::optional AArch64StackTaggingPreRA::findFirstSlotCandidate() { // Find the best (FI, Tag) pair to pin to offset 0. // Looking at the possible uses of a tagged address, the advantage of pinning // is: @@ -378,7 +378,7 @@ bool AArch64StackTaggingPreRA::runOnMachineFunction(MachineFunction &Func) { // Find a slot that is used with zero tag offset, like ADDG #fi, 0. // If the base tagged pointer is set up to the address of this slot, // the ADDG instruction can be eliminated. - Optional BaseSlot = findFirstSlotCandidate(); + std::optional BaseSlot = findFirstSlotCandidate(); if (BaseSlot) AFI->setTaggedBasePointerIndex(*BaseSlot); diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index fe1cd530637e..1c08c778f153 100644 --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -2581,8 +2581,8 @@ static unsigned MatchNeonVectorRegName(StringRef Name) { /// is a valid vector kind. Where the number of elements in a vector /// or the vector width is implicit or explicitly unknown (but still a /// valid suffix kind), 0 is used. -static Optional> parseVectorKind(StringRef Suffix, - RegKind VectorKind) { +static std::optional> parseVectorKind(StringRef Suffix, + RegKind VectorKind) { std::pair Res = {-1, -1}; switch (VectorKind) { @@ -2633,7 +2633,7 @@ static Optional> parseVectorKind(StringRef Suffix, if (Res == std::make_pair(-1, -1)) return std::nullopt; - return Optional>(Res); + return std::optional>(Res); } static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { @@ -3026,19 +3026,19 @@ AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { auto LookupByName = [](StringRef N) { if (IsSVEPrefetch) { if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N)) - return Optional(Res->Encoding); + return std::optional(Res->Encoding); } else if (auto Res = AArch64PRFM::lookupPRFMByName(N)) - return Optional(Res->Encoding); - return Optional(); + return std::optional(Res->Encoding); + return std::optional(); }; auto LookupByEncoding = [](unsigned E) { if (IsSVEPrefetch) { if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E)) - return Optional(Res->Name); + return std::optional(Res->Name); } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E)) - return Optional(Res->Name); - return Optional(); + return std::optional(Res->Name); + return std::optional(); }; unsigned MaxVal = IsSVEPrefetch ? 15 : 31; @@ -4326,7 +4326,7 @@ AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) { return MatchOperand_NoMatch; StringRef Tail = Name.drop_front(DotPosition); - const Optional> &KindRes = + const std::optional> &KindRes = parseVectorKind(Tail, RegKind::Matrix); if (!KindRes) { TokError("Expected the register to be followed by element width suffix"); diff --git a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h index 2be5c62653e5..18717b0c0aa8 100644 --- a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h +++ b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h @@ -536,7 +536,7 @@ inline unsigned getNumElementsFromSVEPredPattern(unsigned Pattern) { } /// Return specific VL predicate pattern based on the number of elements. -inline Optional +inline std::optional getSVEPredPatternFromNumElements(unsigned MinNumElts) { switch (MinNumElts) { default: @@ -839,7 +839,7 @@ inline static StringRef AArch64PACKeyIDToString(AArch64PACKey::ID KeyID) { } /// Return numeric key ID for 2-letter identifier string. -inline static Optional +inline static std::optional AArch64StringToPACKeyID(StringRef Name) { if (Name == "ia") return AArch64PACKey::IA; diff --git a/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp b/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp index a3a9da8b8a8a..6288d0b690c7 100644 --- a/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp +++ b/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp @@ -49,13 +49,15 @@ SMEAttrs::SMEAttrs(const AttributeList &Attrs) { Bitmask |= ZA_Preserved; } -Optional SMEAttrs::requiresSMChange(const SMEAttrs &Callee, - bool BodyOverridesInterface) const { +std::optional +SMEAttrs::requiresSMChange(const SMEAttrs &Callee, + bool BodyOverridesInterface) const { // If the transition is not through a call (e.g. when considering inlining) // and Callee has a streaming body, then we can ignore the interface of // Callee. if (BodyOverridesInterface && Callee.hasStreamingBody()) { - return hasStreamingInterfaceOrBody() ? std::nullopt : Optional(true); + return hasStreamingInterfaceOrBody() ? std::nullopt + : std::optional(true); } if (Callee.hasStreamingCompatibleInterface()) diff --git a/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.h b/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.h index 03d9187fae9a..0983d09166d5 100644 --- a/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.h +++ b/llvm/lib/Target/AArch64/Utils/AArch64SMEAttributes.h @@ -68,8 +68,9 @@ public: /// interface. This can be useful when considering e.g. inlining, where we /// explicitly want the body to overrule the interface (because after inlining /// the interface is no longer relevant). - Optional requiresSMChange(const SMEAttrs &Callee, - bool BodyOverridesInterface = false) const; + std::optional + requiresSMChange(const SMEAttrs &Callee, + bool BodyOverridesInterface = false) const; // Interfaces to query PSTATE.ZA bool hasNewZAInterface() const { return Bitmask & ZA_New; } diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index e87825e50ad0..b94630295103 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1053,7 +1053,7 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Mov->addRegisterKilled(SrcReg, TRI); } -Optional +std::optional ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { // VMOVRRD is also a copy instruction but it requires // special way of handling. It is more complex copy version @@ -1069,7 +1069,7 @@ ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; } -Optional +std::optional ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { if (auto DstSrcPair = isCopyInstrImpl(MI)) { @@ -5555,8 +5555,8 @@ ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { return makeArrayRef(TargetFlags); } -Optional ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, - Register Reg) const { +std::optional +ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const { int Sign = 1; unsigned Opcode = MI.getOpcode(); int64_t Offset = 0; @@ -6815,7 +6815,7 @@ public: return true; } - Optional createTripCountGreaterCondition( + std::optional createTripCountGreaterCondition( int TC, MachineBasicBlock &MBB, SmallVectorImpl &Cond) override { diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h index 453e3fa1b99b..1b6e935c77ea 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h @@ -104,13 +104,13 @@ protected: /// If the specific machine instruction is an instruction that moves/copies /// value from one register to another register return destination and source /// registers as machine operands. - Optional + std::optional isCopyInstrImpl(const MachineInstr &MI) const override; /// Specialization of \ref TargetInstrInfo::describeLoadedValue, used to /// enhance debug entry value descriptions for ARM targets. - Optional describeLoadedValue(const MachineInstr &MI, - Register Reg) const override; + std::optional + describeLoadedValue(const MachineInstr &MI, Register Reg) const override; public: // Return whether the target has an explicit NOP encoding. @@ -531,8 +531,8 @@ public: return MI.getOperand(3).getReg(); } - Optional isAddImmediate(const MachineInstr &MI, - Register Reg) const override; + std::optional isAddImmediate(const MachineInstr &MI, + Register Reg) const override; }; /// Get the operands corresponding to the given \p Pred value. By default, the diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp index 296801094fbe..57692de3b447 100644 --- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp +++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp @@ -98,7 +98,7 @@ private: int computeScale(unsigned GEPElemSize, unsigned MemoryElemSize); // If the value is a constant, or derived from constants via additions // and multilications, return its numeric value - Optional getIfConst(const Value *V); + std::optional getIfConst(const Value *V); // If Inst is an add instruction, check whether one summand is a // constant. If so, scale this constant and return it together with // the other summand. @@ -335,31 +335,31 @@ int MVEGatherScatterLowering::computeScale(unsigned GEPElemSize, return -1; } -Optional MVEGatherScatterLowering::getIfConst(const Value *V) { +std::optional MVEGatherScatterLowering::getIfConst(const Value *V) { const Constant *C = dyn_cast(V); if (C && C->getSplatValue()) - return Optional{C->getUniqueInteger().getSExtValue()}; + return std::optional{C->getUniqueInteger().getSExtValue()}; if (!isa(V)) - return Optional{}; + return std::optional{}; const Instruction *I = cast(V); if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Shl) { - Optional Op0 = getIfConst(I->getOperand(0)); - Optional Op1 = getIfConst(I->getOperand(1)); + std::optional Op0 = getIfConst(I->getOperand(0)); + std::optional Op1 = getIfConst(I->getOperand(1)); if (!Op0 || !Op1) - return Optional{}; + return std::optional{}; if (I->getOpcode() == Instruction::Add) - return Optional{Op0.value() + Op1.value()}; + return std::optional{Op0.value() + Op1.value()}; if (I->getOpcode() == Instruction::Mul) - return Optional{Op0.value() * Op1.value()}; + return std::optional{Op0.value() * Op1.value()}; if (I->getOpcode() == Instruction::Shl) - return Optional{Op0.value() << Op1.value()}; + return std::optional{Op0.value() << Op1.value()}; if (I->getOpcode() == Instruction::Or) - return Optional{Op0.value() | Op1.value()}; + return std::optional{Op0.value() | Op1.value()}; } - return Optional{}; + return std::optional{}; } // Return true if I is an Or instruction that is equivalent to an add, due to @@ -381,7 +381,7 @@ MVEGatherScatterLowering::getVarAndConst(Value *Inst, int TypeScale) { return ReturnFalse; Value *Summand; - Optional Const; + std::optional Const; // Find out which operand the value that is increased is if ((Const = getIfConst(Add->getOperand(0)))) Summand = Add->getOperand(1); diff --git a/llvm/lib/Target/DirectX/DXILResource.h b/llvm/lib/Target/DirectX/DXILResource.h index f570c3cdf68e..1936717b62ac 100644 --- a/llvm/lib/Target/DirectX/DXILResource.h +++ b/llvm/lib/Target/DirectX/DXILResource.h @@ -77,7 +77,7 @@ protected: public: struct ExtendedProperties { - llvm::Optional ElementType; + std::optional ElementType; // The value ordering of this enumeration is part of the DXIL ABI. Elements // can only be added to the end, and not removed. diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 72b8bb877821..09390b901b35 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -751,9 +751,9 @@ public: return MI == EndLoop; } - Optional - createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, - SmallVectorImpl &Cond) override { + std::optional createTripCountGreaterCondition( + int TC, MachineBasicBlock &MBB, + SmallVectorImpl &Cond) override { if (TripCount == -1) { // Check if we're done with the loop. Register Done = TII->createVR(MF, MVT::i1); diff --git a/llvm/lib/Target/M68k/M68kFrameLowering.cpp b/llvm/lib/Target/M68k/M68kFrameLowering.cpp index 5ecda433512c..6347a52e1918 100644 --- a/llvm/lib/Target/M68k/M68kFrameLowering.cpp +++ b/llvm/lib/Target/M68k/M68kFrameLowering.cpp @@ -673,7 +673,7 @@ void M68kFrameLowering::emitEpilogue(MachineFunction &MF, const MachineFrameInfo &MFI = MF.getFrameInfo(); M68kMachineFunctionInfo *MMFI = MF.getInfo(); MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); - Optional RetOpcode; + std::optional RetOpcode; if (MBBI != MBB.end()) RetOpcode = MBBI->getOpcode(); DebugLoc DL; diff --git a/llvm/lib/Target/M68k/M68kMCInstLower.cpp b/llvm/lib/Target/M68k/M68kMCInstLower.cpp index 93a60ab739b8..b3388df93748 100644 --- a/llvm/lib/Target/M68k/M68kMCInstLower.cpp +++ b/llvm/lib/Target/M68k/M68kMCInstLower.cpp @@ -110,7 +110,7 @@ MCOperand M68kMCInstLower::LowerSymbolOperand(const MachineOperand &MO, return MCOperand::createExpr(Expr); } -Optional +std::optional M68kMCInstLower::LowerOperand(const MachineInstr *MI, const MachineOperand &MO) const { switch (MO.getType()) { @@ -148,7 +148,7 @@ void M68kMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); - Optional MCOp = LowerOperand(MI, MO); + std::optional MCOp = LowerOperand(MI, MO); if (MCOp.has_value() && MCOp.value().isValid()) OutMI.addOperand(MCOp.value()); diff --git a/llvm/lib/Target/M68k/M68kMCInstLower.h b/llvm/lib/Target/M68k/M68kMCInstLower.h index 76d9a36f70ef..2d19a8d6732d 100644 --- a/llvm/lib/Target/M68k/M68kMCInstLower.h +++ b/llvm/lib/Target/M68k/M68kMCInstLower.h @@ -44,8 +44,8 @@ public: MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; - Optional LowerOperand(const MachineInstr *MI, - const MachineOperand &MO) const; + std::optional LowerOperand(const MachineInstr *MI, + const MachineOperand &MO) const; void Lower(const MachineInstr *MI, MCInst &OutMI) const; }; diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp index 222d7acce322..8bc319752e7d 100644 --- a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp +++ b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp @@ -96,7 +96,7 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MIB.addReg(SrcReg, getKillRegState(KillSrc)); } -Optional +std::optional Mips16InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { if (MI.isMoveReg()) return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.h b/llvm/lib/Target/Mips/Mips16InstrInfo.h index 294afd6460f6..e57d21b48a17 100644 --- a/llvm/lib/Target/Mips/Mips16InstrInfo.h +++ b/llvm/lib/Target/Mips/Mips16InstrInfo.h @@ -106,7 +106,8 @@ protected: /// If the specific machine instruction is a instruction that moves/copies /// value from one register to another register return destination and source /// registers as machine operands. - Optional isCopyInstrImpl(const MachineInstr &MI) const override; + std::optional + isCopyInstrImpl(const MachineInstr &MI) const override; private: unsigned getAnalyzableBrOpc(unsigned Opc) const override; diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp index a7b1bcf9c20f..85e78c9d7268 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp @@ -933,7 +933,7 @@ MipsInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { return makeArrayRef(Flags); } -Optional +std::optional MipsInstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { DIExpression *Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {}); @@ -962,8 +962,8 @@ MipsInstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { return TargetInstrInfo::describeLoadedValue(MI, Reg); } -Optional MipsInstrInfo::isAddImmediate(const MachineInstr &MI, - Register Reg) const { +std::optional MipsInstrInfo::isAddImmediate(const MachineInstr &MI, + Register Reg) const { // TODO: Handle cases where Reg is a super- or sub-register of the // destination register. const MachineOperand &Op0 = MI.getOperand(0); diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.h b/llvm/lib/Target/Mips/MipsInstrInfo.h index 8b98ad3dceea..d12e54e1202a 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.h +++ b/llvm/lib/Target/Mips/MipsInstrInfo.h @@ -185,11 +185,11 @@ public: ArrayRef> getSerializableDirectMachineOperandTargetFlags() const override; - Optional isAddImmediate(const MachineInstr &MI, - Register Reg) const override; + std::optional isAddImmediate(const MachineInstr &MI, + Register Reg) const override; - Optional describeLoadedValue(const MachineInstr &MI, - Register Reg) const override; + std::optional + describeLoadedValue(const MachineInstr &MI, Register Reg) const override; protected: bool isZeroImm(const MachineOperand &op) const; diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp index 4c93a51358ce..f752ab2d2549 100644 --- a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -221,7 +221,7 @@ static bool isReadOrWriteToDSPReg(const MachineInstr &MI, bool &isWrite) { /// We check for the common case of 'or', as it's MIPS' preferred instruction /// for GPRs but we have to check the operands to ensure that is the case. /// Other move instructions for MIPS are directly identifiable. -Optional +std::optional MipsSEInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { bool isDSPControlWrite = false; // Condition is made to match the creation of WRDSP/RDDSP copy instruction diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.h b/llvm/lib/Target/Mips/MipsSEInstrInfo.h index 44a6dac2ccbd..6d5a958b33f9 100644 --- a/llvm/lib/Target/Mips/MipsSEInstrInfo.h +++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.h @@ -81,7 +81,7 @@ protected: /// If the specific machine instruction is a instruction that moves/copies /// value from one register to another register return destination and source /// registers as machine operands. - Optional + std::optional isCopyInstrImpl(const MachineInstr &MI) const override; private: diff --git a/llvm/lib/Target/Mips/MipsTargetStreamer.h b/llvm/lib/Target/Mips/MipsTargetStreamer.h index 2f4b6eb37aa1..c9fafa24f069 100644 --- a/llvm/lib/Target/Mips/MipsTargetStreamer.h +++ b/llvm/lib/Target/Mips/MipsTargetStreamer.h @@ -183,7 +183,7 @@ public: } protected: - llvm::Optional ABI; + std::optional ABI; MipsABIFlagsSection ABIFlagsSection; bool GPRInfoSet; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index fb71c739b085..5b6e2cd901aa 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -808,10 +808,12 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) { // Helper function template to reduce amount of boilerplate code for // opcode selection. -static Optional pickOpcodeForVT( - MVT::SimpleValueType VT, unsigned Opcode_i8, unsigned Opcode_i16, - unsigned Opcode_i32, Optional Opcode_i64, unsigned Opcode_f16, - unsigned Opcode_f16x2, unsigned Opcode_f32, Optional Opcode_f64) { +static std::optional +pickOpcodeForVT(MVT::SimpleValueType VT, unsigned Opcode_i8, + unsigned Opcode_i16, unsigned Opcode_i32, + std::optional Opcode_i64, unsigned Opcode_f16, + unsigned Opcode_f16x2, unsigned Opcode_f32, + std::optional Opcode_f64) { switch (VT) { case MVT::i1: case MVT::i8: @@ -924,7 +926,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) { SDValue N1 = N->getOperand(1); SDValue Addr; SDValue Offset, Base; - Optional Opcode; + std::optional Opcode; MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy; if (SelectDirectAddr(N1, Addr)) { @@ -1003,7 +1005,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) { SDValue Chain = N->getOperand(0); SDValue Op1 = N->getOperand(1); SDValue Addr, Offset, Base; - Optional Opcode; + std::optional Opcode; SDLoc DL(N); SDNode *LD; MemSDNode *MemSD = cast(N); @@ -1266,7 +1268,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) { Mem = cast(N); } - Optional Opcode; + std::optional Opcode; SDLoc DL(N); SDNode *LD; SDValue Base, Offset, Addr; @@ -1770,7 +1772,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) { SDValue BasePtr = ST->getBasePtr(); SDValue Addr; SDValue Offset, Base; - Optional Opcode; + std::optional Opcode; MVT::SimpleValueType SourceVT = Value.getNode()->getSimpleValueType(0).SimpleTy; @@ -1873,7 +1875,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) { SDValue Chain = N->getOperand(0); SDValue Op1 = N->getOperand(1); SDValue Addr, Offset, Base; - Optional Opcode; + std::optional Opcode; SDLoc DL(N); SDNode *ST; EVT EltVT = Op1.getValueType(); @@ -2113,7 +2115,7 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) { EVT EltVT = Node->getValueType(0); EVT MemVT = Mem->getMemoryVT(); - Optional Opcode; + std::optional Opcode; switch (VecSize) { default: @@ -2198,7 +2200,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) { // Determine target opcode // If we have an i1, use an 8-bit store. The lowering code in // NVPTXISelLowering will have already emitted an upcast. - Optional Opcode = 0; + std::optional Opcode = 0; switch (NumElts) { default: return false; @@ -2275,7 +2277,7 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) { // Determine target opcode // If we have an i1, use an 8-bit store. The lowering code in // NVPTXISelLowering will have already emitted an upcast. - Optional Opcode = 0; + std::optional Opcode = 0; switch (N->getOpcode()) { default: switch (NumElts) { diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 47c1eae590b2..fc6bd946c252 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1790,7 +1790,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } SmallVector ProxyRegOps; - SmallVector, 16> ProxyRegTruncates; + SmallVector, 16> ProxyRegTruncates; // Generate loads from param memory/moves from registers for result if (Ins.size() > 0) { @@ -1873,9 +1873,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, ProxyRegOps.push_back(RetVal.getValue(j)); if (needTruncate) - ProxyRegTruncates.push_back(Optional(Ins[VecIdx + j].VT)); + ProxyRegTruncates.push_back(std::optional(Ins[VecIdx + j].VT)); else - ProxyRegTruncates.push_back(Optional()); + ProxyRegTruncates.push_back(std::optional()); } Chain = RetVal.getValue(NumElts); diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp index 1f533c105b0e..741ec22d29cc 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp @@ -92,7 +92,8 @@ void PPCELFStreamer::emitInstruction(const MCInst &Inst, // instruction pair, return a value, otherwise return None. A true returned // value means the instruction is the PLDpc and a false value means it is // the user instruction. - Optional IsPartOfGOTToPCRelPair = isPartOfGOTToPCRelPair(Inst, STI); + std::optional IsPartOfGOTToPCRelPair = + isPartOfGOTToPCRelPair(Inst, STI); // User of the GOT-indirect address. // For example, the load that will get the relocation as follows: @@ -196,8 +197,8 @@ void PPCELFStreamer::emitGOTToPCRelLabel(const MCInst &Inst) { // at the opcode and in the case of PLDpc we will return true. For the load // (or store) this function will return false indicating it has found the second // instruciton in the pair. -Optional llvm::isPartOfGOTToPCRelPair(const MCInst &Inst, - const MCSubtargetInfo &STI) { +std::optional llvm::isPartOfGOTToPCRelPair(const MCInst &Inst, + const MCSubtargetInfo &STI) { // Need at least two operands. if (Inst.getNumOperands() < 2) return std::nullopt; diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h index b3e12413eacf..7d786ac13bb9 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h @@ -48,8 +48,8 @@ private: // Check if the instruction Inst is part of a pair of instructions that make up // a link time GOT PC Rel optimization. -Optional isPartOfGOTToPCRelPair(const MCInst &Inst, - const MCSubtargetInfo &STI); +std::optional isPartOfGOTToPCRelPair(const MCInst &Inst, + const MCSubtargetInfo &STI); MCELFStreamer *createPPCELFStreamer(MCContext &Context, std::unique_ptr MAB, diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index 975dc9a4d4ec..73ef8b11c229 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -198,8 +198,8 @@ class PPCFastISel final : public FastISel { } // end anonymous namespace -static Optional getComparePred(CmpInst::Predicate Pred) { - switch (Pred) { +static std::optional getComparePred(CmpInst::Predicate Pred) { + switch (Pred) { // These are not representable with any single compare. case CmpInst::FCMP_FALSE: case CmpInst::FCMP_TRUE: @@ -770,7 +770,8 @@ bool PPCFastISel::SelectBranch(const Instruction *I) { // For now, just try the simplest case where it's fed by a compare. if (const CmpInst *CI = dyn_cast(BI->getCondition())) { if (isValueAvailable(CI)) { - Optional OptPPCPred = getComparePred(CI->getPredicate()); + std::optional OptPPCPred = + getComparePred(CI->getPredicate()); if (!OptPPCPred) return false; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index 152a43849c2f..45d9bd2adfe2 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -5555,9 +5555,9 @@ public: return MI == EndLoop; } - Optional - createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, - SmallVectorImpl &Cond) override { + std::optional createTripCountGreaterCondition( + int TC, MachineBasicBlock &MBB, + SmallVectorImpl &Cond) override { if (TripCount == -1) { // Since BDZ/BDZ8 that we will insert will also decrease the ctr by 1, // so we don't need to generate any thing here. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 72a7914ef37d..7bf4d4ab9d6b 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8533,7 +8533,7 @@ struct NodeExtensionHelper { /// SExt. If \p SExt is None, this returns the source of this operand. /// \see ::getSource(). SDValue getOrCreateExtendedOp(const SDNode *Root, SelectionDAG &DAG, - Optional SExt) const { + std::optional SExt) const { if (!SExt.has_value()) return OrigOperand; @@ -8623,7 +8623,7 @@ struct NodeExtensionHelper { } } - using CombineToTry = std::function( + using CombineToTry = std::function( SDNode * /*Root*/, const NodeExtensionHelper & /*LHS*/, const NodeExtensionHelper & /*RHS*/)>; @@ -8798,8 +8798,8 @@ struct CombineResult { unsigned TargetOpcode; // No value means no extension is needed. If extension is needed, the value // indicates if it needs to be sign extended. - Optional SExtLHS; - Optional SExtRHS; + std::optional SExtLHS; + std::optional SExtRHS; /// Root of the combine. SDNode *Root; /// LHS of the TargetOpcode. @@ -8808,8 +8808,8 @@ struct CombineResult { NodeExtensionHelper RHS; CombineResult(unsigned TargetOpcode, SDNode *Root, - const NodeExtensionHelper &LHS, Optional SExtLHS, - const NodeExtensionHelper &RHS, Optional SExtRHS) + const NodeExtensionHelper &LHS, std::optional SExtLHS, + const NodeExtensionHelper &RHS, std::optional SExtRHS) : TargetOpcode(TargetOpcode), SExtLHS(SExtLHS), SExtRHS(SExtRHS), Root(Root), LHS(LHS), RHS(RHS) {} @@ -8837,7 +8837,7 @@ struct CombineResult { /// /// \returns None if the pattern doesn't match or a CombineResult that can be /// used to apply the pattern. -static Optional +static std::optional canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS, const NodeExtensionHelper &RHS, bool AllowSExt, bool AllowZExt) { @@ -8863,7 +8863,7 @@ canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS, /// /// \returns None if the pattern doesn't match or a CombineResult that can be /// used to apply the pattern. -static Optional +static std::optional canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS, const NodeExtensionHelper &RHS) { return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true, @@ -8874,9 +8874,9 @@ canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS, /// /// \returns None if the pattern doesn't match or a CombineResult that can be /// used to apply the pattern. -static Optional canFoldToVW_W(SDNode *Root, - const NodeExtensionHelper &LHS, - const NodeExtensionHelper &RHS) { +static std::optional +canFoldToVW_W(SDNode *Root, const NodeExtensionHelper &LHS, + const NodeExtensionHelper &RHS) { if (!RHS.areVLAndMaskCompatible(Root)) return std::nullopt; @@ -8899,7 +8899,7 @@ static Optional canFoldToVW_W(SDNode *Root, /// /// \returns None if the pattern doesn't match or a CombineResult that can be /// used to apply the pattern. -static Optional +static std::optional canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS, const NodeExtensionHelper &RHS) { return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true, @@ -8910,7 +8910,7 @@ canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS, /// /// \returns None if the pattern doesn't match or a CombineResult that can be /// used to apply the pattern. -static Optional +static std::optional canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS, const NodeExtensionHelper &RHS) { return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/false, @@ -8921,9 +8921,9 @@ canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS, /// /// \returns None if the pattern doesn't match or a CombineResult that can be /// used to apply the pattern. -static Optional canFoldToVW_SU(SDNode *Root, - const NodeExtensionHelper &LHS, - const NodeExtensionHelper &RHS) { +static std::optional +canFoldToVW_SU(SDNode *Root, const NodeExtensionHelper &LHS, + const NodeExtensionHelper &RHS) { if (!LHS.SupportsSExt || !RHS.SupportsZExt) return std::nullopt; if (!LHS.areVLAndMaskCompatible(Root) || !RHS.areVLAndMaskCompatible(Root)) @@ -9020,7 +9020,7 @@ combineBinOp_VLToVWBinOp_VL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { for (NodeExtensionHelper::CombineToTry FoldingStrategy : FoldingStrategies) { - Optional Res = FoldingStrategy(N, LHS, RHS); + std::optional Res = FoldingStrategy(N, LHS, RHS); if (Res) { Matched = true; CombinesToApply.push_back(*Res); @@ -11224,7 +11224,7 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, } static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, - Optional FirstMaskArgument, + std::optional FirstMaskArgument, CCState &State, const RISCVTargetLowering &TLI) { const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); if (RC == &RISCV::VRRegClass) { @@ -11249,7 +11249,7 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - Optional FirstMaskArgument) { + std::optional FirstMaskArgument) { unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); assert(XLen == 32 || XLen == 64); MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; @@ -11471,7 +11471,7 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, } template -static Optional preAssignMask(const ArgTy &Args) { +static std::optional preAssignMask(const ArgTy &Args) { for (const auto &ArgIdx : enumerate(Args)) { MVT ArgVT = ArgIdx.value().VT; if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1) @@ -11487,7 +11487,7 @@ void RISCVTargetLowering::analyzeInputArgs( unsigned NumArgs = Ins.size(); FunctionType *FType = MF.getFunction().getFunctionType(); - Optional FirstMaskArgument; + std::optional FirstMaskArgument; if (Subtarget.hasVInstructions()) FirstMaskArgument = preAssignMask(Ins); @@ -11518,7 +11518,7 @@ void RISCVTargetLowering::analyzeOutputArgs( CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const { unsigned NumArgs = Outs.size(); - Optional FirstMaskArgument; + std::optional FirstMaskArgument; if (Subtarget.hasVInstructions()) FirstMaskArgument = preAssignMask(Outs); @@ -11703,7 +11703,7 @@ static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - Optional FirstMaskArgument) { + std::optional FirstMaskArgument) { // X5 and X6 might be used for save-restore libcall. static const MCPhysReg GPRList[] = { @@ -12387,7 +12387,7 @@ bool RISCVTargetLowering::CanLowerReturn( SmallVector RVLocs; CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); - Optional FirstMaskArgument; + std::optional FirstMaskArgument; if (Subtarget.hasVInstructions()) FirstMaskArgument = preAssignMask(Outs); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 35c18b9afb6e..a1c79d9ebc06 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -612,7 +612,7 @@ private: ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - Optional FirstMaskArgument); + std::optional FirstMaskArgument); void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo, const SmallVectorImpl &Ins, bool IsRet, diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index f03ed1413c86..8d2a7f1e00ff 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -87,7 +87,7 @@ static bool isScalarMoveInstr(const MachineInstr &MI) { /// Get the EEW for a load or store instruction. Return None if MI is not /// a load or store which ignores SEW. -static Optional getEEWForLoadStore(const MachineInstr &MI) { +static std::optional getEEWForLoadStore(const MachineInstr &MI) { switch (getRVVMCOpcode(MI.getOpcode())) { default: return std::nullopt; @@ -659,7 +659,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags, InstrInfo.setAVLReg(RISCV::NoRegister); } #ifndef NDEBUG - if (Optional EEW = getEEWForLoadStore(MI)) { + if (std::optional EEW = getEEWForLoadStore(MI)) { assert(SEW == EEW && "Initial SEW doesn't match expected EEW"); } #endif diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 582eabdb3cc9..39c63c751e6d 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1102,7 +1102,7 @@ bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { return MI.isAsCheapAsAMove(); } -Optional +std::optional RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { if (MI.isMoveReg()) return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; @@ -2383,7 +2383,7 @@ bool RISCV::isRVVSpill(const MachineInstr &MI) { return true; } -Optional> +std::optional> RISCV::isRVVSpillForZvlsseg(unsigned Opcode) { switch (Opcode) { default: diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h index 90a9b5a9a621..fb8e8bf9883d 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -112,7 +112,7 @@ public: bool isAsCheapAsAMove(const MachineInstr &MI) const override; - Optional + std::optional isCopyInstrImpl(const MachineInstr &MI) const override; bool verifyInstruction(const MachineInstr &MI, @@ -222,7 +222,8 @@ bool isZEXT_B(const MachineInstr &MI); // expect to see a FrameIndex operand. bool isRVVSpill(const MachineInstr &MI); -Optional> isRVVSpillForZvlsseg(unsigned Opcode); +std::optional> +isRVVSpillForZvlsseg(unsigned Opcode); bool isFaultFirstLoad(const MachineInstr &MI); diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp index e9418fd023ac..94f403183e5a 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp @@ -1721,12 +1721,12 @@ static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, /// Lowers a builtin funtion call using the provided \p DemangledCall skeleton /// and external instruction \p Set. namespace SPIRV { -Optional lowerBuiltin(const StringRef DemangledCall, - SPIRV::InstructionSet::InstructionSet Set, - MachineIRBuilder &MIRBuilder, - const Register OrigRet, const Type *OrigRetTy, - const SmallVectorImpl &Args, - SPIRVGlobalRegistry *GR) { +std::optional lowerBuiltin(const StringRef DemangledCall, + SPIRV::InstructionSet::InstructionSet Set, + MachineIRBuilder &MIRBuilder, + const Register OrigRet, const Type *OrigRetTy, + const SmallVectorImpl &Args, + SPIRVGlobalRegistry *GR) { LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n"); // SPIR-V type and return register. diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h index 2f622e744330..26ffe6965951 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h @@ -31,12 +31,12 @@ namespace SPIRV { /// Register(0) otherwise. /// \p OrigRetTy is the type of the \p OrigRet. /// \p Args are the arguments of the lowered builtin call. -Optional lowerBuiltin(const StringRef DemangledCall, - InstructionSet::InstructionSet Set, - MachineIRBuilder &MIRBuilder, - const Register OrigRet, const Type *OrigRetTy, - const SmallVectorImpl &Args, - SPIRVGlobalRegistry *GR); +std::optional lowerBuiltin(const StringRef DemangledCall, + InstructionSet::InstructionSet Set, + MachineIRBuilder &MIRBuilder, + const Register OrigRet, const Type *OrigRetTy, + const SmallVectorImpl &Args, + SPIRVGlobalRegistry *GR); /// Handles the translation of the provided special opaque/builtin type \p Type /// to SPIR-V type. Generates the corresponding machine instructions for the /// target type or gets the already existing OpType<...> register from the diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h index 2cbec9b543c6..359a1b628ad9 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h @@ -44,13 +44,13 @@ enum ModuleSectionType { struct Requirements { const bool IsSatisfiable; - const Optional Cap; + const std::optional Cap; const ExtensionList Exts; const unsigned MinVer; // 0 if no min version is required. const unsigned MaxVer; // 0 if no max version is required. Requirements(bool IsSatisfiable = false, - Optional Cap = {}, + std::optional Cap = {}, ExtensionList Exts = {}, unsigned MinVer = 0, unsigned MaxVer = 0) : IsSatisfiable(IsSatisfiable), Cap(Cap), Exts(Exts), MinVer(MinVer), diff --git a/llvm/lib/Target/VE/VECustomDAG.cpp b/llvm/lib/Target/VE/VECustomDAG.cpp index 6cf5536145ec..873ffea498de 100644 --- a/llvm/lib/Target/VE/VECustomDAG.cpp +++ b/llvm/lib/Target/VE/VECustomDAG.cpp @@ -59,7 +59,7 @@ bool isMaskArithmetic(SDValue Op) { } /// \returns the VVP_* SDNode opcode corresponsing to \p OC. -Optional getVVPOpcode(unsigned Opcode) { +std::optional getVVPOpcode(unsigned Opcode) { switch (Opcode) { case ISD::MLOAD: return VEISD::VVP_LOAD; @@ -163,7 +163,7 @@ bool isVVPReductionOp(unsigned Opcode) { } // Return the AVL operand position for this VVP or VEC Op. -Optional getAVLPos(unsigned Opc) { +std::optional getAVLPos(unsigned Opc) { // This is only available for VP SDNodes auto PosOpt = ISD::getVPExplicitVectorLengthIdx(Opc); if (PosOpt) @@ -188,7 +188,7 @@ Optional getAVLPos(unsigned Opc) { return std::nullopt; } -Optional getMaskPos(unsigned Opc) { +std::optional getMaskPos(unsigned Opc) { // This is only available for VP SDNodes auto PosOpt = ISD::getVPMaskIdx(Opc); if (PosOpt) @@ -240,7 +240,7 @@ SDValue getMemoryPtr(SDValue Op) { return SDValue(); } -Optional getIdiomaticVectorType(SDNode *Op) { +std::optional getIdiomaticVectorType(SDNode *Op) { unsigned OC = Op->getOpcode(); // For memory ops -> the transfered data type diff --git a/llvm/lib/Target/VE/VECustomDAG.h b/llvm/lib/Target/VE/VECustomDAG.h index f99bffac37eb..2ba2128cc7cd 100644 --- a/llvm/lib/Target/VE/VECustomDAG.h +++ b/llvm/lib/Target/VE/VECustomDAG.h @@ -21,7 +21,7 @@ namespace llvm { -Optional getVVPOpcode(unsigned Opcode); +std::optional getVVPOpcode(unsigned Opcode); bool isVVPUnaryOp(unsigned Opcode); bool isVVPBinaryOp(unsigned Opcode); @@ -71,7 +71,7 @@ bool maySafelyIgnoreMask(SDValue Op); // /// AVL Functions { // The AVL operand position of this node. -Optional getAVLPos(unsigned); +std::optional getAVLPos(unsigned); // Whether this is a LEGALAVL node. bool isLegalAVL(SDValue AVL); @@ -80,7 +80,7 @@ bool isLegalAVL(SDValue AVL); SDValue getNodeAVL(SDValue); // Mask position of this node. -Optional getMaskPos(unsigned); +std::optional getMaskPos(unsigned); SDValue getNodeMask(SDValue); @@ -92,7 +92,7 @@ std::pair getAnnotatedNodeAVL(SDValue); /// Node Properties { -Optional getIdiomaticVectorType(SDNode *Op); +std::optional getIdiomaticVectorType(SDNode *Op); SDValue getLoadStoreStride(SDValue Op, VECustomDAG &CDAG); @@ -154,7 +154,7 @@ public: /// getNode { SDValue getNode(unsigned OC, SDVTList VTL, ArrayRef OpV, - Optional Flags = std::nullopt) const { + std::optional Flags = std::nullopt) const { auto N = DAG.getNode(OC, DL, VTL, OpV); if (Flags) N->setFlags(*Flags); @@ -162,7 +162,7 @@ public: } SDValue getNode(unsigned OC, ArrayRef ResVT, ArrayRef OpV, - Optional Flags = std::nullopt) const { + std::optional Flags = std::nullopt) const { auto N = DAG.getNode(OC, DL, ResVT, OpV); if (Flags) N->setFlags(*Flags); @@ -170,7 +170,7 @@ public: } SDValue getNode(unsigned OC, EVT ResVT, ArrayRef OpV, - Optional Flags = std::nullopt) const { + std::optional Flags = std::nullopt) const { auto N = DAG.getNode(OC, DL, ResVT, OpV); if (Flags) N->setFlags(*Flags); diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp index 9c76c54ed253..6c74e4429f6f 100644 --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -835,7 +835,8 @@ public: auto ElemTypeName = expectIdent(); if (ElemTypeName.empty()) return true; - Optional ElemType = WebAssembly::parseType(ElemTypeName); + std::optional ElemType = + WebAssembly::parseType(ElemTypeName); if (!ElemType) return error("Unknown type in .tabletype directive: ", ElemTypeTok); diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp index d63de9545719..6788ab35b225 100644 --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp @@ -83,7 +83,7 @@ bool WebAssemblyAsmTypeCheck::typeError(SMLoc ErrorLoc, const Twine &Msg) { } bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc, - Optional EVT) { + std::optional EVT) { if (Stack.empty()) { return typeError(ErrorLoc, EVT ? StringRef("empty stack while popping ") + diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h index 3be966b5739c..9c190e6beae7 100644 --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h @@ -38,7 +38,7 @@ class WebAssemblyAsmTypeCheck final { void dumpTypeStack(Twine Msg); bool typeError(SMLoc ErrorLoc, const Twine &Msg); - bool popType(SMLoc ErrorLoc, Optional EVT); + bool popType(SMLoc ErrorLoc, std::optional EVT); bool popRefType(SMLoc ErrorLoc); bool getLocal(SMLoc ErrorLoc, const MCInst &Inst, wasm::ValType &Type); bool checkEnd(SMLoc ErrorLoc, bool PopVals = false); diff --git a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp index 579101ffd46c..998905402b39 100644 --- a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp +++ b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp @@ -21,7 +21,7 @@ using namespace llvm; -Optional WebAssembly::parseType(StringRef Type) { +std::optional WebAssembly::parseType(StringRef Type) { // FIXME: can't use StringSwitch because wasm::ValType doesn't have a // "invalid" value. if (Type == "i32") diff --git a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h index 86211700c70a..c2540f6d159e 100644 --- a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h +++ b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h @@ -89,7 +89,7 @@ inline bool isRefType(wasm::ValType Type) { // Convert StringRef to ValType / HealType / BlockType -Optional parseType(StringRef Type); +std::optional parseType(StringRef Type); BlockType parseBlockType(StringRef Type); MVT parseMVT(StringRef Type); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp index 40a4b7dbd862..e60f1397b993 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp @@ -50,7 +50,7 @@ using namespace llvm; // SelectionDAGISel::runOnMachineFunction. We have to do it in two places // because we want to do it while building the selection DAG for uses of alloca, // but not all alloca instructions are used so we have to follow up afterwards. -Optional +std::optional WebAssemblyFrameLowering::getLocalForStackObject(MachineFunction &MF, int FrameIndex) { MachineFrameInfo &MFI = MF.getFrameInfo(); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h index d30a3fa52d73..13a05b0e4561 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h @@ -56,8 +56,8 @@ public: // Returns the index of the WebAssembly local to which the stack object // FrameIndex in MF should be allocated, or None. - static Optional getLocalForStackObject(MachineFunction &MF, - int FrameIndex); + static std::optional getLocalForStackObject(MachineFunction &MF, + int FrameIndex); static unsigned getSPReg(const MachineFunction &MF); static unsigned getFPReg(const MachineFunction &MF); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index 2751be219474..a2c7ee19d56f 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -1449,7 +1449,8 @@ static bool IsWebAssemblyGlobal(SDValue Op) { return false; } -static Optional IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) { +static std::optional IsWebAssemblyLocal(SDValue Op, + SelectionDAG &DAG) { const FrameIndexSDNode *FI = dyn_cast(Op); if (!FI) return std::nullopt; @@ -1477,7 +1478,7 @@ SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op, SN->getMemoryVT(), SN->getMemOperand()); } - if (Optional Local = IsWebAssemblyLocal(Base, DAG)) { + if (std::optional Local = IsWebAssemblyLocal(Base, DAG)) { if (!Offset->isUndef()) report_fatal_error("unexpected offset when storing to webassembly local", false); @@ -1514,7 +1515,7 @@ SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op, LN->getMemoryVT(), LN->getMemOperand()); } - if (Optional Local = IsWebAssemblyLocal(Base, DAG)) { + if (std::optional Local = IsWebAssemblyLocal(Base, DAG)) { if (!Offset->isUndef()) report_fatal_error( "unexpected offset when loading from webassembly local", false); diff --git a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp index e7202df81c74..2bcdca51031e 100644 --- a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp +++ b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp @@ -212,7 +212,7 @@ void X86DynAllocaExpander::lower(MachineInstr *MI, Lowering L) { bool Is64BitAlloca = MI->getOpcode() == X86::DYN_ALLOCA_64; assert(SlotSize == 4 || SlotSize == 8); - Optional InstrNum; + std::optional InstrNum; if (unsigned Num = MI->peekDebugInstrNum()) { // Operand 2 of DYN_ALLOCAs contains the stack def. InstrNum = {Num, 2}; diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index 2f1f21f8231d..dd853dd08632 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -580,7 +580,7 @@ void X86FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero, void X86FrameLowering::emitStackProbe( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, - Optional InstrNum) const { + std::optional InstrNum) const { const X86Subtarget &STI = MF.getSubtarget(); if (STI.isTargetWindowsCoreCLR()) { if (InProlog) { @@ -1106,7 +1106,7 @@ void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64( void X86FrameLowering::emitStackProbeCall( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, - Optional InstrNum) const { + std::optional InstrNum) const { bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; // FIXME: Add indirect thunk support and remove this. diff --git a/llvm/lib/Target/X86/X86FrameLowering.h b/llvm/lib/Target/X86/X86FrameLowering.h index bc209fa9e913..3b76f2950eb9 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.h +++ b/llvm/lib/Target/X86/X86FrameLowering.h @@ -57,7 +57,7 @@ public: void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, - Optional + std::optional InstrNum = std::nullopt) const; bool stackProbeFunctionModifiesSP() const override; @@ -208,7 +208,7 @@ private: void emitStackProbeCall( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, - Optional InstrNum) const; + std::optional InstrNum) const; /// Emit target stack probe as an inline sequence. void emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB, diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 6249276adfa4..e4d381509b21 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -3499,17 +3499,20 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) { // If we have BMI2's BZHI, we are ok with muti-use patterns. // Else, if we only have BMI1's BEXTR, we require one-use. const bool AllowExtraUsesByDefault = Subtarget->hasBMI2(); - auto checkUses = [AllowExtraUsesByDefault](SDValue Op, unsigned NUses, - Optional AllowExtraUses) { + auto checkUses = [AllowExtraUsesByDefault]( + SDValue Op, unsigned NUses, + std::optional AllowExtraUses) { return AllowExtraUses.value_or(AllowExtraUsesByDefault) || Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo()); }; auto checkOneUse = [checkUses](SDValue Op, - Optional AllowExtraUses = std::nullopt) { + std::optional AllowExtraUses = + std::nullopt) { return checkUses(Op, 1, AllowExtraUses); }; auto checkTwoUse = [checkUses](SDValue Op, - Optional AllowExtraUses = std::nullopt) { + std::optional AllowExtraUses = + std::nullopt) { return checkUses(Op, 2, AllowExtraUses); }; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 6c932a58ac15..eb2cf174dc99 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -3573,7 +3573,7 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, report_fatal_error("Cannot emit physreg copy instruction"); } -Optional +std::optional X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { if (MI.isMoveReg()) return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; @@ -3728,7 +3728,7 @@ static unsigned getLoadStoreRegOpcode(Register Reg, } } -Optional +std::optional X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const { const MCInstrDesc &Desc = MemI.getDesc(); @@ -3757,7 +3757,7 @@ X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI, bool X86InstrInfo::verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const { - Optional AMOrNone = getAddrModeFromMemoryOp(MI, nullptr); + std::optional AMOrNone = getAddrModeFromMemoryOp(MI, nullptr); if (!AMOrNone) return true; @@ -9041,7 +9041,7 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { /// If \p DescribedReg overlaps with the MOVrr instruction's destination /// register then, if possible, describe the value in terms of the source /// register. -static Optional +static std::optional describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, const TargetRegisterInfo *TRI) { Register DestReg = MI.getOperand(0).getReg(); @@ -9074,7 +9074,7 @@ describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); } -Optional +std::optional X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { const MachineOperand *Op = nullptr; DIExpression *Expr = nullptr; diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 3a185cdd63a2..bf86e5db9bd6 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -327,7 +327,7 @@ public: SmallVectorImpl &Cond, bool AllowModify) const override; - Optional + std::optional getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override; @@ -574,8 +574,8 @@ public: return MI.getDesc().TSFlags & X86II::LOCK; } - Optional describeLoadedValue(const MachineInstr &MI, - Register Reg) const override; + std::optional + describeLoadedValue(const MachineInstr &MI, Register Reg) const override; protected: /// Commutes the operands in the given instruction by changing the operands @@ -596,7 +596,7 @@ protected: /// If the specific machine instruction is a instruction that moves/copies /// value from one register to another register return destination and source /// registers as machine operands. - Optional + std::optional isCopyInstrImpl(const MachineInstr &MI) const override; private: diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index 73cc632c970b..7afbaeeec227 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -65,8 +65,8 @@ class X86MCInstLower { public: X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); - Optional LowerMachineOperand(const MachineInstr *MI, - const MachineOperand &MO) const; + std::optional LowerMachineOperand(const MachineInstr *MI, + const MachineOperand &MO) const; void Lower(const MachineInstr *MI, MCInst &OutMI) const; MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; @@ -428,7 +428,7 @@ static unsigned getRetOpcode(const X86Subtarget &Subtarget) { return Subtarget.is64Bit() ? X86::RET64 : X86::RET32; } -Optional +std::optional X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, const MachineOperand &MO) const { switch (MO.getType()) { diff --git a/llvm/lib/Target/X86/X86MachineFunctionInfo.h b/llvm/lib/Target/X86/X86MachineFunctionInfo.h index 99cc9f525b2c..9b09f0c53f62 100644 --- a/llvm/lib/Target/X86/X86MachineFunctionInfo.h +++ b/llvm/lib/Target/X86/X86MachineFunctionInfo.h @@ -117,7 +117,7 @@ class X86MachineFunctionInfo : public MachineFunctionInfo { /// determine if we should insert tilerelease in frame lowering. bool HasVirtualTileReg = false; - Optional SwiftAsyncContextFrameIdx; + std::optional SwiftAsyncContextFrameIdx; // Preallocated fields are only used during isel. // FIXME: Can we find somewhere else to store these? @@ -222,7 +222,7 @@ public: bool hasVirtualTileReg() const { return HasVirtualTileReg; } void setHasVirtualTileReg(bool v) { HasVirtualTileReg = v; } - Optional getSwiftAsyncContextFrameIdx() const { + std::optional getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; } void setSwiftAsyncContextFrameIdx(int v) { SwiftAsyncContextFrameIdx = v; } diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp index 621da60aac92..b10572dac174 100644 --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -1782,7 +1782,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( // See if we can sink hardening the loaded value. auto SinkCheckToSingleUse = - [&](MachineInstr &MI) -> Optional { + [&](MachineInstr &MI) -> std::optional { Register DefReg = MI.getOperand(0).getReg(); // We need to find a single use which we can sink the check. We can @@ -1854,7 +1854,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( }; MachineInstr *MI = &InitialMI; - while (Optional SingleUse = SinkCheckToSingleUse(*MI)) { + while (std::optional SingleUse = SinkCheckToSingleUse(*MI)) { // Update which MI we're checking now. MI = *SingleUse; if (!MI)