[mlir] Use std::nullopt instead of None (NFC)
This patch mechanically replaces None with std::nullopt where the compiler would warn if None were deprecated. The intent is to reduce the amount of manual work required in migrating from Optional to std::optional. This is part of an effort to migrate from llvm::Optional to std::optional: https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716
This commit is contained in:
parent
1d650d4f1d
commit
1a36588ec6
|
@ -30,7 +30,7 @@ public:
|
|||
static IntegerValueRange getMaxRange(Value value);
|
||||
|
||||
/// Create an integer value range lattice value.
|
||||
IntegerValueRange(Optional<ConstantIntRanges> value = None)
|
||||
IntegerValueRange(Optional<ConstantIntRanges> value = std::nullopt)
|
||||
: value(std::move(value)) {}
|
||||
|
||||
/// Whether the range is uninitialized. This happens when the state hasn't
|
||||
|
|
|
@ -143,7 +143,7 @@ public:
|
|||
/// Finalize the most recently started operation definition.
|
||||
void finalizeOperationDefinition(
|
||||
Operation *op, SMRange nameLoc, SMLoc endLoc,
|
||||
ArrayRef<std::pair<unsigned, SMLoc>> resultGroups = llvm::None);
|
||||
ArrayRef<std::pair<unsigned, SMLoc>> resultGroups = std::nullopt);
|
||||
|
||||
/// Start a definition for a region nested under the current operation.
|
||||
void startRegionDefinition();
|
||||
|
|
|
@ -44,7 +44,7 @@ static llvm::ArrayRef<CppTy> unwrapList(size_t size, CTy *first,
|
|||
"incompatible C and C++ types");
|
||||
|
||||
if (size == 0)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
assert(storage.empty() && "expected to populate storage");
|
||||
storage.reserve(size);
|
||||
|
|
|
@ -21,7 +21,7 @@ class OperationPass;
|
|||
/// If log1pBenefit is present, use it instead of benefit for the Log1p op.
|
||||
void populateMathToLibmConversionPatterns(
|
||||
RewritePatternSet &patterns, PatternBenefit benefit,
|
||||
llvm::Optional<PatternBenefit> log1pBenefit = llvm::None);
|
||||
llvm::Optional<PatternBenefit> log1pBenefit = std::nullopt);
|
||||
|
||||
/// Create a pass to convert Math operations to libm calls.
|
||||
std::unique_ptr<OperationPass<ModuleOp>> createConvertMathToLibmPass();
|
||||
|
|
|
@ -143,7 +143,7 @@ struct DependenceComponent {
|
|||
Optional<int64_t> lb;
|
||||
// The upper bound of the dependence distance (inclusive).
|
||||
Optional<int64_t> ub;
|
||||
DependenceComponent() : lb(llvm::None), ub(llvm::None) {}
|
||||
DependenceComponent() : lb(std::nullopt), ub(std::nullopt) {}
|
||||
};
|
||||
|
||||
/// Checks whether two accesses to the same memref access the same element.
|
||||
|
|
|
@ -55,7 +55,7 @@ public:
|
|||
assert(valArgs.empty() || valArgs.size() == getNumDimAndSymbolVars());
|
||||
values.reserve(numReservedCols);
|
||||
if (valArgs.empty())
|
||||
values.resize(getNumDimAndSymbolVars(), None);
|
||||
values.resize(getNumDimAndSymbolVars(), std::nullopt);
|
||||
else
|
||||
values.append(valArgs.begin(), valArgs.end());
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ public:
|
|||
: IntegerPolyhedron(fac) {
|
||||
assert(valArgs.empty() || valArgs.size() == getNumDimAndSymbolVars());
|
||||
if (valArgs.empty())
|
||||
values.resize(getNumDimAndSymbolVars(), None);
|
||||
values.resize(getNumDimAndSymbolVars(), std::nullopt);
|
||||
else
|
||||
values.append(valArgs.begin(), valArgs.end());
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ private:
|
|||
SmallVectorImpl<Value> &newValues) -> Optional<LogicalResult> {
|
||||
if (T derivedType = type.dyn_cast<T>())
|
||||
return callback(builder, loc, derivedType, value, newValues);
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ public:
|
|||
static Optional<AffineMap> getIndexingMap(OpView opView) {
|
||||
auto owner = dyn_cast<LinalgOp>(getOwner(opView));
|
||||
if (!owner)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
if (OpOperand *operand = opView.dyn_cast<OpOperand *>())
|
||||
return owner.getMatchingIndexingMap(operand);
|
||||
return owner.getMatchingIndexingMap(owner.getDpsInitOperand(
|
||||
|
@ -91,14 +91,14 @@ public:
|
|||
static Optional<unsigned> getOperandNumber(OpView opView) {
|
||||
if (OpOperand *operand = opView.dyn_cast<OpOperand *>())
|
||||
return operand->getOperandNumber();
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
// Return the result number if the `opView` is an OpResult. Otherwise return
|
||||
// llvm::None.
|
||||
static Optional<unsigned> getResultNumber(OpView opView) {
|
||||
if (OpResult result = opView.dyn_cast<Value>().cast<OpResult>())
|
||||
return result.getResultNumber();
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Return the owner of the dependent OpView.
|
||||
|
|
|
@ -248,7 +248,7 @@ using CopyCallbackFn =
|
|||
|
||||
struct LinalgPromotionOptions {
|
||||
/// Indices of subViews to promote. If `None`, try to promote all operands.
|
||||
Optional<DenseSet<unsigned>> operandsToPromote = None;
|
||||
Optional<DenseSet<unsigned>> operandsToPromote = std::nullopt;
|
||||
LinalgPromotionOptions &setOperandsToPromote(ArrayRef<int64_t> operands) {
|
||||
operandsToPromote = DenseSet<unsigned>();
|
||||
operandsToPromote->insert(operands.begin(), operands.end());
|
||||
|
@ -259,7 +259,7 @@ struct LinalgPromotionOptions {
|
|||
/// Otherwise the partial view will be used. The decision is defaulted to
|
||||
/// `useFullTileBuffersDefault` when `useFullTileBuffers` is None and for
|
||||
/// operands missing from `useFullTileBuffers`.
|
||||
Optional<llvm::SmallBitVector> useFullTileBuffers = None;
|
||||
Optional<llvm::SmallBitVector> useFullTileBuffers = std::nullopt;
|
||||
LinalgPromotionOptions &setUseFullTileBuffers(ArrayRef<bool> useFullTiles) {
|
||||
unsigned size = useFullTiles.size();
|
||||
llvm::SmallBitVector tmp(size, false);
|
||||
|
@ -276,7 +276,7 @@ struct LinalgPromotionOptions {
|
|||
return *this;
|
||||
}
|
||||
/// Alignment of promoted buffer. If `None` do not specify alignment.
|
||||
Optional<unsigned> alignment = None;
|
||||
Optional<unsigned> alignment = std::nullopt;
|
||||
LinalgPromotionOptions &setAlignment(unsigned align) {
|
||||
alignment = align;
|
||||
return *this;
|
||||
|
@ -290,8 +290,8 @@ struct LinalgPromotionOptions {
|
|||
/// Callback function to do the allocation of the promoted buffer. If None,
|
||||
/// then the default allocation scheme of allocating a memref<?xi8> buffer
|
||||
/// followed by a view operation is used.
|
||||
Optional<AllocBufferCallbackFn> allocationFn = None;
|
||||
Optional<DeallocBufferCallbackFn> deallocationFn = None;
|
||||
Optional<AllocBufferCallbackFn> allocationFn = std::nullopt;
|
||||
Optional<DeallocBufferCallbackFn> deallocationFn = std::nullopt;
|
||||
LinalgPromotionOptions &
|
||||
setAllocationDeallocationFns(AllocBufferCallbackFn const &allocFn,
|
||||
DeallocBufferCallbackFn const &deallocFn) {
|
||||
|
@ -301,8 +301,8 @@ struct LinalgPromotionOptions {
|
|||
}
|
||||
/// Callback function to do the copy of data to and from the promoted
|
||||
/// subview. If None then a memref.copy is used.
|
||||
Optional<CopyCallbackFn> copyInFn = None;
|
||||
Optional<CopyCallbackFn> copyOutFn = None;
|
||||
Optional<CopyCallbackFn> copyInFn = std::nullopt;
|
||||
Optional<CopyCallbackFn> copyOutFn = std::nullopt;
|
||||
LinalgPromotionOptions &setCopyInOutFns(CopyCallbackFn const ©In,
|
||||
CopyCallbackFn const ©Out) {
|
||||
copyInFn = copyIn;
|
||||
|
@ -598,7 +598,7 @@ struct LinalgTilingAndFusionOptions {
|
|||
SmallVector<int64_t> tileInterchange;
|
||||
/// When specified, specifies distribution of generated tile loops to
|
||||
/// processors.
|
||||
Optional<LinalgLoopDistributionOptions> tileDistribution = None;
|
||||
Optional<LinalgLoopDistributionOptions> tileDistribution = std::nullopt;
|
||||
LinalgTilingAndFusionOptions &
|
||||
setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) {
|
||||
tileDistribution = std::move(distributionOptions);
|
||||
|
@ -651,7 +651,7 @@ struct LinalgTilingOptions {
|
|||
|
||||
/// When specified, specifies distribution of generated tile loops to
|
||||
/// processors.
|
||||
Optional<LinalgLoopDistributionOptions> distribution = None;
|
||||
Optional<LinalgLoopDistributionOptions> distribution = std::nullopt;
|
||||
|
||||
LinalgTilingOptions &
|
||||
setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) {
|
||||
|
|
|
@ -56,7 +56,7 @@ public:
|
|||
/// the given `storage` class. This method does not guarantee the uniqueness
|
||||
/// of extensions; the same extension may be appended multiple times.
|
||||
void getExtensions(ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
|
||||
/// The capability requirements for each type are following the
|
||||
/// ((Capability::A OR Extension::B) AND (Capability::C OR Capability::D))
|
||||
|
@ -68,7 +68,7 @@ public:
|
|||
/// uniqueness of capabilities; the same capability may be appended multiple
|
||||
/// times.
|
||||
void getCapabilities(CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
|
||||
/// Returns the size in bytes for each type. If no size can be calculated,
|
||||
/// returns `llvm::None`. Note that if the type has explicit layout, it is
|
||||
|
@ -89,9 +89,9 @@ public:
|
|||
static bool isValid(IntegerType);
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
|
||||
Optional<int64_t> getSizeInBytes();
|
||||
};
|
||||
|
@ -117,9 +117,9 @@ public:
|
|||
bool hasCompileTimeKnownNumElements() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
|
||||
Optional<int64_t> getSizeInBytes();
|
||||
};
|
||||
|
@ -145,9 +145,9 @@ public:
|
|||
unsigned getArrayStride() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
|
||||
/// Returns the array size in bytes. Since array type may have an explicit
|
||||
/// stride declaration (in bytes), we also include it in the calculation.
|
||||
|
@ -188,9 +188,9 @@ public:
|
|||
// TODO: Add support for Access qualifier
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
// SPIR-V pointer type
|
||||
|
@ -206,9 +206,9 @@ public:
|
|||
StorageClass getStorageClass() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
// SPIR-V run-time array type
|
||||
|
@ -230,9 +230,9 @@ public:
|
|||
unsigned getArrayStride() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
// SPIR-V sampled image type
|
||||
|
@ -253,9 +253,9 @@ public:
|
|||
Type getImageType() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<spirv::StorageClass> storage = llvm::None);
|
||||
Optional<spirv::StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<spirv::StorageClass> storage = llvm::None);
|
||||
Optional<spirv::StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
/// SPIR-V struct type. Two kinds of struct types are supported:
|
||||
|
@ -389,9 +389,9 @@ public:
|
|||
ArrayRef<MemberDecorationInfo> memberDecorations = {});
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
llvm::hash_code
|
||||
|
@ -416,9 +416,9 @@ public:
|
|||
unsigned getColumns() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
// SPIR-V joint matrix type
|
||||
|
@ -443,9 +443,9 @@ public:
|
|||
MatrixLayout getMatrixLayout() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
// SPIR-V matrix type
|
||||
|
@ -480,9 +480,9 @@ public:
|
|||
Type getElementType() const;
|
||||
|
||||
void getExtensions(SPIRVType::ExtensionArrayRefVector &extensions,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
void getCapabilities(SPIRVType::CapabilityArrayRefVector &capabilities,
|
||||
Optional<StorageClass> storage = llvm::None);
|
||||
Optional<StorageClass> storage = std::nullopt);
|
||||
};
|
||||
|
||||
} // namespace spirv
|
||||
|
|
|
@ -174,9 +174,9 @@ public:
|
|||
dimTypes(numTensors,
|
||||
std::vector<DimLevelType>(numLoops, DimLevelType::Undef)),
|
||||
loopIdxToDim(numTensors,
|
||||
std::vector<Optional<unsigned>>(numLoops, llvm::None)),
|
||||
std::vector<Optional<unsigned>>(numLoops, std::nullopt)),
|
||||
dimToLoopIdx(numTensors,
|
||||
std::vector<Optional<unsigned>>(numLoops, llvm::None)) {}
|
||||
std::vector<Optional<unsigned>>(numLoops, std::nullopt)) {}
|
||||
|
||||
/// Adds a tensor expression. Returns its index.
|
||||
unsigned addExp(Kind k, unsigned e0, unsigned e1 = -1u, Value v = Value(),
|
||||
|
|
|
@ -68,7 +68,7 @@ Optional<SmallVector<Value>> checkHasDynamicBatchDims(PatternRewriter &rewriter,
|
|||
if (llvm::any_of(dynTy.getShape().drop_front(), ShapedType::isDynamic)) {
|
||||
(void)rewriter.notifyMatchFailure(
|
||||
op, "input can only be dynamic for batch size");
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ public:
|
|||
|
||||
/// Attaches a note to the last diagnostic.
|
||||
/// Expects this object to be a silenceable failure.
|
||||
Diagnostic &attachNote(Optional<Location> loc = llvm::None) {
|
||||
Diagnostic &attachNote(Optional<Location> loc = std::nullopt) {
|
||||
assert(isSilenceableFailure() &&
|
||||
"can only attach notes to silenceable failures");
|
||||
return diagnostics.back().attachNote(loc);
|
||||
|
@ -212,7 +212,7 @@ public:
|
|||
}
|
||||
|
||||
/// Attaches a note to the error.
|
||||
Diagnostic &attachNote(Optional<Location> loc = llvm::None) {
|
||||
Diagnostic &attachNote(Optional<Location> loc = std::nullopt) {
|
||||
return diag.attachNote(loc);
|
||||
}
|
||||
|
||||
|
|
|
@ -360,14 +360,14 @@ private:
|
|||
if (srcSubShape == resultSubShape)
|
||||
composedReassociation.push_back(srcIndices);
|
||||
else
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Find reassociation to collapse `srcSubShape` into `resultSubShape`.
|
||||
auto subShapeReassociation =
|
||||
getReassociationIndicesForCollapse(srcSubShape, resultSubShape);
|
||||
if (!subShapeReassociation)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
// Remap the subshape indices back to the original srcShape.
|
||||
for (auto &subshape_indices : *subShapeReassociation) {
|
||||
|
|
|
@ -118,7 +118,7 @@ void populateBubbleVectorBitCastOpPatterns(RewritePatternSet &patterns,
|
|||
/// VectorToSCF, which reduces the rank of vector transfer ops.
|
||||
void populateVectorTransferLoweringPatterns(
|
||||
RewritePatternSet &patterns,
|
||||
llvm::Optional<unsigned> maxTransferRank = llvm::None,
|
||||
llvm::Optional<unsigned> maxTransferRank = std::nullopt,
|
||||
PatternBenefit benefit = 1);
|
||||
|
||||
/// These patterns materialize masks for various vector ops such as transfers.
|
||||
|
|
|
@ -67,7 +67,7 @@ struct ExecutionEngineOptions {
|
|||
|
||||
/// `jitCodeGenOptLevel`, when provided, is used as the optimization level for
|
||||
/// target code generation.
|
||||
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel = llvm::None;
|
||||
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel = std::nullopt;
|
||||
|
||||
/// If `sharedLibPaths` are provided, the underlying JIT-compilation will
|
||||
/// open and link the shared libraries for symbol resolution.
|
||||
|
@ -123,7 +123,7 @@ public:
|
|||
/// Invokes the function with the given name passing it the list of opaque
|
||||
/// pointers to the actual arguments.
|
||||
llvm::Error invokePacked(StringRef name,
|
||||
MutableArrayRef<void *> args = llvm::None);
|
||||
MutableArrayRef<void *> args = std::nullopt);
|
||||
|
||||
/// Trait that defines how a given type is passed to the JIT code. This
|
||||
/// defaults to passing the address but can be specialized.
|
||||
|
|
|
@ -106,7 +106,7 @@ class BlockRange final
|
|||
Block *, Block *, Block *> {
|
||||
public:
|
||||
using RangeBaseT::RangeBaseT;
|
||||
BlockRange(ArrayRef<Block *> blocks = llvm::None);
|
||||
BlockRange(ArrayRef<Block *> blocks = std::nullopt);
|
||||
BlockRange(SuccessorRange successors);
|
||||
template <typename Arg, typename = std::enable_if_t<std::is_constructible<
|
||||
ArrayRef<Block *>, Arg>::value>>
|
||||
|
|
|
@ -408,15 +408,15 @@ public:
|
|||
/// 'parent'. `locs` contains the locations of the inserted arguments, and
|
||||
/// should match the size of `argTypes`.
|
||||
Block *createBlock(Region *parent, Region::iterator insertPt = {},
|
||||
TypeRange argTypes = llvm::None,
|
||||
ArrayRef<Location> locs = llvm::None);
|
||||
TypeRange argTypes = std::nullopt,
|
||||
ArrayRef<Location> locs = std::nullopt);
|
||||
|
||||
/// Add new block with 'argTypes' arguments and set the insertion point to the
|
||||
/// end of it. The block is placed before 'insertBefore'. `locs` contains the
|
||||
/// locations of the inserted arguments, and should match the size of
|
||||
/// `argTypes`.
|
||||
Block *createBlock(Block *insertBefore, TypeRange argTypes = llvm::None,
|
||||
ArrayRef<Location> locs = llvm::None);
|
||||
Block *createBlock(Block *insertBefore, TypeRange argTypes = std::nullopt,
|
||||
ArrayRef<Location> locs = std::nullopt);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Operation Creation
|
||||
|
|
|
@ -312,7 +312,7 @@ auto ElementsAttr::try_value_begin() const
|
|||
FailureOr<detail::ElementsAttrIndexer> indexer =
|
||||
getValuesImpl(TypeID::get<T>());
|
||||
if (failed(indexer))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return iterator<T>(std::move(*indexer), 0);
|
||||
}
|
||||
} // namespace mlir.
|
||||
|
|
|
@ -244,7 +244,7 @@ public:
|
|||
/// Attaches a note to this diagnostic. A new location may be optionally
|
||||
/// provided, if not, then the location defaults to the one specified for this
|
||||
/// diagnostic. Notes may not be attached to other notes.
|
||||
Diagnostic &attachNote(Optional<Location> noteLoc = llvm::None);
|
||||
Diagnostic &attachNote(Optional<Location> noteLoc = std::nullopt);
|
||||
|
||||
using note_iterator = llvm::pointee_iterator<NoteVector::iterator>;
|
||||
using const_note_iterator =
|
||||
|
@ -342,7 +342,7 @@ public:
|
|||
}
|
||||
|
||||
/// Attaches a note to this diagnostic.
|
||||
Diagnostic &attachNote(Optional<Location> noteLoc = llvm::None) {
|
||||
Diagnostic &attachNote(Optional<Location> noteLoc = std::nullopt) {
|
||||
assert(isActive() && "diagnostic not active");
|
||||
return impl->attachNote(noteLoc);
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ struct FieldParser<
|
|||
return {Optional<IntT>(value)};
|
||||
return failure();
|
||||
}
|
||||
return {llvm::None};
|
||||
return {std::nullopt};
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -62,13 +62,13 @@ void setAllResultAttrDicts(Operation *op, ArrayRef<Attribute> attrs);
|
|||
/// Return all of the attributes for the argument at 'index'.
|
||||
inline ArrayRef<NamedAttribute> getArgAttrs(Operation *op, unsigned index) {
|
||||
auto argDict = getArgAttrDict(op, index);
|
||||
return argDict ? argDict.getValue() : llvm::None;
|
||||
return argDict ? argDict.getValue() : std::nullopt;
|
||||
}
|
||||
|
||||
/// Return all of the attributes for the result at 'index'.
|
||||
inline ArrayRef<NamedAttribute> getResultAttrs(Operation *op, unsigned index) {
|
||||
auto resultDict = getResultAttrDict(op, index);
|
||||
return resultDict ? resultDict.getValue() : llvm::None;
|
||||
return resultDict ? resultDict.getValue() : std::nullopt;
|
||||
}
|
||||
|
||||
/// Insert the specified arguments and update the function type attribute.
|
||||
|
|
|
@ -77,7 +77,7 @@ struct constant_op_binder {
|
|||
|
||||
// Fold the constant to an attribute.
|
||||
SmallVector<OpFoldResult, 1> foldedOp;
|
||||
LogicalResult result = op->fold(/*operands=*/llvm::None, foldedOp);
|
||||
LogicalResult result = op->fold(/*operands=*/std::nullopt, foldedOp);
|
||||
(void)result;
|
||||
assert(succeeded(result) && "expected ConstantLike op to be foldable");
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ public:
|
|||
OptionalParseResult(ParseResult result) : impl(result) {}
|
||||
OptionalParseResult(const InFlightDiagnostic &)
|
||||
: OptionalParseResult(failure()) {}
|
||||
OptionalParseResult(std::nullopt_t) : impl(llvm::None) {}
|
||||
OptionalParseResult(std::nullopt_t) : impl(std::nullopt) {}
|
||||
|
||||
/// Returns true if we contain a valid ParseResult value.
|
||||
bool has_value() const { return impl.has_value(); }
|
||||
|
@ -94,7 +94,7 @@ public:
|
|||
MLIRContext *getContext() { return getOperation()->getContext(); }
|
||||
|
||||
/// Print the operation to the given stream.
|
||||
void print(raw_ostream &os, OpPrintingFlags flags = llvm::None) {
|
||||
void print(raw_ostream &os, OpPrintingFlags flags = std::nullopt) {
|
||||
state->print(os, flags);
|
||||
}
|
||||
void print(raw_ostream &os, AsmState &asmState) {
|
||||
|
|
|
@ -1334,12 +1334,12 @@ public:
|
|||
/// skip parsing that component.
|
||||
virtual ParseResult parseGenericOperationAfterOpName(
|
||||
OperationState &result,
|
||||
Optional<ArrayRef<UnresolvedOperand>> parsedOperandType = llvm::None,
|
||||
Optional<ArrayRef<Block *>> parsedSuccessors = llvm::None,
|
||||
Optional<ArrayRef<UnresolvedOperand>> parsedOperandType = std::nullopt,
|
||||
Optional<ArrayRef<Block *>> parsedSuccessors = std::nullopt,
|
||||
Optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions =
|
||||
llvm::None,
|
||||
Optional<ArrayRef<NamedAttribute>> parsedAttributes = llvm::None,
|
||||
Optional<FunctionType> parsedFnType = llvm::None) = 0;
|
||||
std::nullopt,
|
||||
Optional<ArrayRef<NamedAttribute>> parsedAttributes = std::nullopt,
|
||||
Optional<FunctionType> parsedFnType = std::nullopt) = 0;
|
||||
|
||||
/// Parse a single SSA value operand name along with a result number if
|
||||
/// `allowResultNumber` is true.
|
||||
|
|
|
@ -240,7 +240,7 @@ public:
|
|||
/// take O(N) where N is the number of operations within the parent block.
|
||||
bool isBeforeInBlock(Operation *other);
|
||||
|
||||
void print(raw_ostream &os, const OpPrintingFlags &flags = llvm::None);
|
||||
void print(raw_ostream &os, const OpPrintingFlags &flags = std::nullopt);
|
||||
void print(raw_ostream &os, AsmState &state);
|
||||
void dump();
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ protected:
|
|||
/// may not be populated.
|
||||
struct Impl {
|
||||
Impl(StringAttr name)
|
||||
: name(name), dialect(nullptr), interfaceMap(llvm::None) {}
|
||||
: name(name), dialect(nullptr), interfaceMap(std::nullopt) {}
|
||||
|
||||
/// The name of the operation.
|
||||
StringAttr name;
|
||||
|
|
|
@ -92,7 +92,7 @@ public:
|
|||
Optional<OperationName> getRootKind() const {
|
||||
if (rootKind == RootKind::OperationName)
|
||||
return OperationName::getFromOpaquePointer(rootValue);
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Return the interface ID used to match the root operation of this pattern.
|
||||
|
@ -101,7 +101,7 @@ public:
|
|||
Optional<TypeID> getRootInterfaceID() const {
|
||||
if (rootKind == RootKind::InterfaceID)
|
||||
return TypeID::getFromOpaquePointer(rootValue);
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Return the trait ID used to match the root operation of this pattern.
|
||||
|
@ -110,7 +110,7 @@ public:
|
|||
Optional<TypeID> getRootTraitID() const {
|
||||
if (rootKind == RootKind::TraitID)
|
||||
return TypeID::getFromOpaquePointer(rootValue);
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Return the benefit (the inverse of "cost") of matching this pattern. The
|
||||
|
@ -465,12 +465,12 @@ public:
|
|||
/// 'argValues' is used to replace the block arguments of 'source' after
|
||||
/// merging.
|
||||
virtual void mergeBlocks(Block *source, Block *dest,
|
||||
ValueRange argValues = llvm::None);
|
||||
ValueRange argValues = std::nullopt);
|
||||
|
||||
// Merge the operations of block 'source' before the operation 'op'. Source
|
||||
// block should not have existing predecessors or successors.
|
||||
void mergeBlockBefore(Block *source, Operation *op,
|
||||
ValueRange argValues = llvm::None);
|
||||
ValueRange argValues = std::nullopt);
|
||||
|
||||
/// Split the operations starting at "before" (inclusive) out of the given
|
||||
/// block into a new block, and return it.
|
||||
|
@ -1587,7 +1587,8 @@ public:
|
|||
RewritePatternSet &add(ConstructorArg &&arg, ConstructorArgs &&...args) {
|
||||
// The following expands a call to emplace_back for each of the pattern
|
||||
// types 'Ts'.
|
||||
(addImpl<Ts>(/*debugLabels=*/llvm::None, std::forward<ConstructorArg>(arg),
|
||||
(addImpl<Ts>(/*debugLabels=*/std::nullopt,
|
||||
std::forward<ConstructorArg>(arg),
|
||||
std::forward<ConstructorArgs>(args)...),
|
||||
...);
|
||||
return *this;
|
||||
|
@ -1666,7 +1667,7 @@ public:
|
|||
RewritePatternSet &insert(ConstructorArg &&arg, ConstructorArgs &&...args) {
|
||||
// The following expands a call to emplace_back for each of the pattern
|
||||
// types 'Ts'.
|
||||
(addImpl<Ts>(/*debugLabels=*/llvm::None, arg, args...), ...);
|
||||
(addImpl<Ts>(/*debugLabels=*/std::nullopt, arg, args...), ...);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
|
|
@ -338,7 +338,7 @@ class RegionRange
|
|||
public:
|
||||
using RangeBaseT::RangeBaseT;
|
||||
|
||||
RegionRange(MutableArrayRef<Region> regions = llvm::None);
|
||||
RegionRange(MutableArrayRef<Region> regions = std::nullopt);
|
||||
|
||||
template <typename Arg, typename = std::enable_if_t<std::is_constructible<
|
||||
ArrayRef<std::unique_ptr<Region>>, Arg>::value>>
|
||||
|
|
|
@ -301,7 +301,7 @@ public:
|
|||
/// Return the users of the provided symbol operation.
|
||||
ArrayRef<Operation *> getUsers(Operation *symbol) const {
|
||||
auto it = symbolToUsers.find(symbol);
|
||||
return it != symbolToUsers.end() ? it->second.getArrayRef() : llvm::None;
|
||||
return it != symbolToUsers.end() ? it->second.getArrayRef() : std::nullopt;
|
||||
}
|
||||
|
||||
/// Return true if the given symbol has no uses.
|
||||
|
|
|
@ -36,7 +36,7 @@ class TypeRange : public llvm::detail::indexed_accessor_range_base<
|
|||
Type, Type, Type> {
|
||||
public:
|
||||
using RangeBaseT::RangeBaseT;
|
||||
TypeRange(ArrayRef<Type> types = llvm::None);
|
||||
TypeRange(ArrayRef<Type> types = std::nullopt);
|
||||
explicit TypeRange(OperandRange values);
|
||||
explicit TypeRange(ResultRange values);
|
||||
explicit TypeRange(ValueRange values);
|
||||
|
|
|
@ -122,13 +122,13 @@ public:
|
|||
/// and range length. `operandSegments` is an optional set of operand segments
|
||||
/// to be updated when mutating the operand list.
|
||||
MutableOperandRange(Operation *owner, unsigned start, unsigned length,
|
||||
ArrayRef<OperandSegment> operandSegments = llvm::None);
|
||||
ArrayRef<OperandSegment> operandSegments = std::nullopt);
|
||||
MutableOperandRange(Operation *owner);
|
||||
|
||||
/// Slice this range into a sub range, with the additional operand segment.
|
||||
MutableOperandRange
|
||||
slice(unsigned subStart, unsigned subLen,
|
||||
Optional<OperandSegment> segment = llvm::None) const;
|
||||
Optional<OperandSegment> segment = std::nullopt) const;
|
||||
|
||||
/// Append the given values to the range.
|
||||
void append(ValueRange values);
|
||||
|
@ -369,7 +369,7 @@ public:
|
|||
: ValueRange(ResultRange(values)) {}
|
||||
ValueRange(ArrayRef<BlockArgument> values)
|
||||
: ValueRange(ArrayRef<Value>(values.data(), values.size())) {}
|
||||
ValueRange(ArrayRef<Value> values = llvm::None);
|
||||
ValueRange(ArrayRef<Value> values = std::nullopt);
|
||||
ValueRange(OperandRange values);
|
||||
ValueRange(ResultRange values);
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ public:
|
|||
|
||||
/// Returns the unknown invocation bounds, i.e., there is no information on
|
||||
/// how many times a region may be invoked.
|
||||
static InvocationBounds getUnknown() { return {0, llvm::None}; }
|
||||
static InvocationBounds getUnknown() { return {0, std::nullopt}; }
|
||||
|
||||
private:
|
||||
/// The minimum number of times the successor region will be invoked.
|
||||
|
|
|
@ -171,7 +171,7 @@ public:
|
|||
Optional<std::reference_wrapper<AnalysisT>> getCachedAnalysis() const {
|
||||
auto res = analyses.find(TypeID::get<AnalysisT>());
|
||||
if (res == analyses.end())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return {static_cast<AnalysisModel<AnalysisT> &>(*res->second).analysis};
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ public:
|
|||
return parentAM->analyses.getCachedAnalysis<AnalysisT>();
|
||||
curParent = parentAM;
|
||||
}
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Query for the given analysis for the current operation.
|
||||
|
@ -352,7 +352,7 @@ public:
|
|||
assert(op->getParentOp() == impl->getOperation());
|
||||
auto it = impl->childAnalyses.find(op);
|
||||
if (it == impl->childAnalyses.end())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return it->second->analyses.getCachedAnalysis<AnalysisT>();
|
||||
}
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ public:
|
|||
}
|
||||
|
||||
protected:
|
||||
explicit Pass(TypeID passID, Optional<StringRef> opName = llvm::None)
|
||||
explicit Pass(TypeID passID, Optional<StringRef> opName = std::nullopt)
|
||||
: passID(passID), opName(opName) {}
|
||||
Pass(const Pass &other) : Pass(other.passID, other.opName) {}
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ private:
|
|||
for (auto &it : this->Values)
|
||||
if (it.V.compare(value))
|
||||
return it.Name;
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -49,8 +49,8 @@ public:
|
|||
/// their type name.
|
||||
FrozenRewritePatternSet(
|
||||
RewritePatternSet &&patterns,
|
||||
ArrayRef<std::string> disabledPatternLabels = llvm::None,
|
||||
ArrayRef<std::string> enabledPatternLabels = llvm::None);
|
||||
ArrayRef<std::string> disabledPatternLabels = std::nullopt,
|
||||
ArrayRef<std::string> enabledPatternLabels = std::nullopt);
|
||||
|
||||
/// Return the op specific native patterns held by this list.
|
||||
const OpSpecificNativePatternListT &getOpSpecificNativePatterns() const {
|
||||
|
|
|
@ -97,7 +97,7 @@ public:
|
|||
template <typename T>
|
||||
ArrayRef<T> copyInto(ArrayRef<T> elements) {
|
||||
if (elements.empty())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
auto result = allocator.Allocate<T>(elements.size());
|
||||
std::uninitialized_copy(elements.begin(), elements.end(), result);
|
||||
return ArrayRef<T>(result, elements.size());
|
||||
|
@ -178,7 +178,7 @@ public:
|
|||
}
|
||||
template <typename Storage>
|
||||
void registerSingletonStorageType(TypeID id) {
|
||||
registerSingletonStorageType<Storage>(id, llvm::None);
|
||||
registerSingletonStorageType<Storage>(id, std::nullopt);
|
||||
}
|
||||
/// Utility override when the storage type represents the type id.
|
||||
template <typename Storage>
|
||||
|
|
|
@ -259,9 +259,9 @@ public:
|
|||
// Pair representing either a index to an argument or a type constraint. Only
|
||||
// one of these entries should have the non-default value.
|
||||
struct ArgOrType {
|
||||
explicit ArgOrType(int index) : index(index), constraint(None) {}
|
||||
explicit ArgOrType(int index) : index(index), constraint(std::nullopt) {}
|
||||
explicit ArgOrType(TypeConstraint constraint)
|
||||
: index(None), constraint(constraint) {}
|
||||
: index(std::nullopt), constraint(constraint) {}
|
||||
bool isArg() const {
|
||||
assert(constraint.has_value() ^ index.has_value());
|
||||
return index.has_value();
|
||||
|
|
|
@ -291,17 +291,17 @@ public:
|
|||
return SymbolInfo(op, Kind::Attr, DagAndConstant(nullptr, index));
|
||||
}
|
||||
static SymbolInfo getAttr() {
|
||||
return SymbolInfo(nullptr, Kind::Attr, llvm::None);
|
||||
return SymbolInfo(nullptr, Kind::Attr, std::nullopt);
|
||||
}
|
||||
static SymbolInfo getOperand(DagNode node, const Operator *op, int index) {
|
||||
return SymbolInfo(op, Kind::Operand,
|
||||
DagAndConstant(node.getAsOpaquePointer(), index));
|
||||
}
|
||||
static SymbolInfo getResult(const Operator *op) {
|
||||
return SymbolInfo(op, Kind::Result, llvm::None);
|
||||
return SymbolInfo(op, Kind::Result, std::nullopt);
|
||||
}
|
||||
static SymbolInfo getValue() {
|
||||
return SymbolInfo(nullptr, Kind::Value, llvm::None);
|
||||
return SymbolInfo(nullptr, Kind::Value, std::nullopt);
|
||||
}
|
||||
static SymbolInfo getMultipleValues(int numValues) {
|
||||
return SymbolInfo(nullptr, Kind::MultipleValues,
|
||||
|
|
|
@ -44,7 +44,7 @@ public:
|
|||
|
||||
/// Attach a note to this diagnostic.
|
||||
Diagnostic &attachNote(const Twine &msg,
|
||||
Optional<SMRange> noteLoc = llvm::None) {
|
||||
Optional<SMRange> noteLoc = std::nullopt) {
|
||||
assert(getSeverity() != Severity::DK_Note &&
|
||||
"cannot attach a Note to a Note");
|
||||
notes.emplace_back(
|
||||
|
|
|
@ -886,8 +886,8 @@ public:
|
|||
ArrayRef<VariableDecl *> results,
|
||||
const CompoundStmt *body,
|
||||
Type resultType) {
|
||||
return createImpl(ctx, name, inputs, /*nativeInputTypes=*/llvm::None,
|
||||
results, /*codeBlock=*/llvm::None, body, resultType);
|
||||
return createImpl(ctx, name, inputs, /*nativeInputTypes=*/std::nullopt,
|
||||
results, /*codeBlock=*/std::nullopt, body, resultType);
|
||||
}
|
||||
|
||||
/// Return the name of the constraint.
|
||||
|
@ -1008,7 +1008,7 @@ public:
|
|||
/// Return the name of this operation, or none if the name is unknown.
|
||||
Optional<StringRef> getName() const {
|
||||
const Name *name = Decl::getName();
|
||||
return name ? Optional<StringRef>(name->getName()) : llvm::None;
|
||||
return name ? Optional<StringRef>(name->getName()) : std::nullopt;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1093,7 +1093,7 @@ public:
|
|||
ArrayRef<VariableDecl *> results,
|
||||
const CompoundStmt *body,
|
||||
Type resultType) {
|
||||
return createImpl(ctx, name, inputs, results, /*codeBlock=*/llvm::None,
|
||||
return createImpl(ctx, name, inputs, results, /*codeBlock=*/std::nullopt,
|
||||
body, resultType);
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ public:
|
|||
/// Return an instance of the Operation type with an optional operation name.
|
||||
/// If no name is provided, this type may refer to any operation.
|
||||
static OperationType get(Context &context,
|
||||
Optional<StringRef> name = llvm::None,
|
||||
Optional<StringRef> name = std::nullopt,
|
||||
const ods::Operation *odsOp = nullptr);
|
||||
|
||||
/// Return the name of this operation type, or None if it doesn't have on.
|
||||
|
@ -247,7 +247,7 @@ public:
|
|||
static TupleType get(Context &context, ArrayRef<Type> elementTypes,
|
||||
ArrayRef<StringRef> elementNames);
|
||||
static TupleType get(Context &context,
|
||||
ArrayRef<Type> elementTypes = llvm::None);
|
||||
ArrayRef<Type> elementTypes = std::nullopt);
|
||||
|
||||
/// Return the element types of this tuple.
|
||||
ArrayRef<Type> getElementTypes() const;
|
||||
|
|
|
@ -100,11 +100,11 @@ struct TranslateToMLIRRegistration {
|
|||
TranslateToMLIRRegistration(
|
||||
llvm::StringRef name, llvm::StringRef description,
|
||||
const TranslateSourceMgrToMLIRFunction &function,
|
||||
Optional<llvm::Align> inputAlignment = llvm::None);
|
||||
Optional<llvm::Align> inputAlignment = std::nullopt);
|
||||
TranslateToMLIRRegistration(
|
||||
llvm::StringRef name, llvm::StringRef description,
|
||||
const TranslateStringRefToMLIRFunction &function,
|
||||
Optional<llvm::Align> inputAlignment = llvm::None);
|
||||
Optional<llvm::Align> inputAlignment = std::nullopt);
|
||||
};
|
||||
|
||||
struct TranslateFromMLIRRegistration {
|
||||
|
|
|
@ -284,7 +284,7 @@ private:
|
|||
ArrayRef<Type> callStack) -> Optional<LogicalResult> {
|
||||
T derivedType = type.dyn_cast<T>();
|
||||
if (!derivedType)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return callback(derivedType, results, callStack);
|
||||
};
|
||||
}
|
||||
|
@ -306,7 +306,7 @@ private:
|
|||
Location loc) -> Optional<Value> {
|
||||
if (T derivedType = resultType.dyn_cast<T>())
|
||||
return callback(builder, derivedType, inputs, loc);
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -208,14 +208,14 @@ LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
|
|||
Operation *inlinePoint, BlockAndValueMapping &mapper,
|
||||
ValueRange resultsToReplace,
|
||||
TypeRange regionResultTypes,
|
||||
Optional<Location> inlineLoc = llvm::None,
|
||||
Optional<Location> inlineLoc = std::nullopt,
|
||||
bool shouldCloneInlinedRegion = true);
|
||||
LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
|
||||
Block *inlineBlock, Block::iterator inlinePoint,
|
||||
BlockAndValueMapping &mapper,
|
||||
ValueRange resultsToReplace,
|
||||
TypeRange regionResultTypes,
|
||||
Optional<Location> inlineLoc = llvm::None,
|
||||
Optional<Location> inlineLoc = std::nullopt,
|
||||
bool shouldCloneInlinedRegion = true);
|
||||
|
||||
/// This function is an overload of the above 'inlineRegion' that allows for
|
||||
|
@ -224,13 +224,13 @@ LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
|
|||
LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
|
||||
Operation *inlinePoint, ValueRange inlinedOperands,
|
||||
ValueRange resultsToReplace,
|
||||
Optional<Location> inlineLoc = llvm::None,
|
||||
Optional<Location> inlineLoc = std::nullopt,
|
||||
bool shouldCloneInlinedRegion = true);
|
||||
LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
|
||||
Block *inlineBlock, Block::iterator inlinePoint,
|
||||
ValueRange inlinedOperands,
|
||||
ValueRange resultsToReplace,
|
||||
Optional<Location> inlineLoc = llvm::None,
|
||||
Optional<Location> inlineLoc = std::nullopt,
|
||||
bool shouldCloneInlinedRegion = true);
|
||||
|
||||
/// This function inlines a given region, 'src', of a callable operation,
|
||||
|
|
|
@ -55,8 +55,8 @@ std::unique_ptr<Pass> createCanonicalizerPass();
|
|||
/// set to their type name.
|
||||
std::unique_ptr<Pass>
|
||||
createCanonicalizerPass(const GreedyRewriteConfig &config,
|
||||
ArrayRef<std::string> disabledPatterns = llvm::None,
|
||||
ArrayRef<std::string> enabledPatterns = llvm::None);
|
||||
ArrayRef<std::string> disabledPatterns = std::nullopt,
|
||||
ArrayRef<std::string> enabledPatterns = std::nullopt);
|
||||
|
||||
/// Creates a pass to perform control-flow sinking.
|
||||
std::unique_ptr<Pass> createControlFlowSinkPass();
|
||||
|
|
|
@ -70,7 +70,7 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
|
|||
}
|
||||
return inputIndex - firstInputIndex;
|
||||
}
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
};
|
||||
|
||||
// Check branches from the parent operation.
|
||||
|
@ -80,7 +80,7 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
|
|||
regionIndex = region->getRegionNumber();
|
||||
}
|
||||
if (Optional<unsigned> operandIndex =
|
||||
getOperandIndexIfPred(/*predIndex=*/llvm::None)) {
|
||||
getOperandIndexIfPred(/*predIndex=*/std::nullopt)) {
|
||||
collectUnderlyingAddressValues(
|
||||
branch.getSuccessorEntryOperands(regionIndex)[*operandIndex], maxDepth,
|
||||
visited, output);
|
||||
|
|
|
@ -95,7 +95,8 @@ void AbstractSparseDataFlowAnalysis::visitOperation(Operation *op) {
|
|||
// The results of a region branch operation are determined by control-flow.
|
||||
if (auto branch = dyn_cast<RegionBranchOpInterface>(op)) {
|
||||
return visitRegionSuccessors({branch}, branch,
|
||||
/*successorIndex=*/llvm::None, resultLattices);
|
||||
/*successorIndex=*/std::nullopt,
|
||||
resultLattices);
|
||||
}
|
||||
|
||||
// The results of a call operation are determined by the callgraph.
|
||||
|
|
|
@ -1405,7 +1405,7 @@ Optional<MPInt> IntegerRelation::getConstantBoundOnDimSize(
|
|||
// representation of the local vars.
|
||||
if (!std::all_of(eq.begin() + getNumDimAndSymbolVars(), eq.end() - 1,
|
||||
[](const MPInt &coeff) { return coeff == 0; }))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
// This variable can only take a single value.
|
||||
if (lb) {
|
||||
|
@ -1442,7 +1442,7 @@ Optional<MPInt> IntegerRelation::getConstantBoundOnDimSize(
|
|||
}
|
||||
if (r == e)
|
||||
// If it doesn't, there isn't a bound on it.
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
// Positions of constraints that are lower/upper bounds on the variable.
|
||||
SmallVector<unsigned, 4> lbIndices, ubIndices;
|
||||
|
@ -1477,7 +1477,7 @@ Optional<MPInt> IntegerRelation::getConstantBoundOnDimSize(
|
|||
atIneq(lbPos, pos));
|
||||
// This bound is non-negative by definition.
|
||||
diff = std::max<MPInt>(diff, MPInt(0));
|
||||
if (minDiff == None || diff < minDiff) {
|
||||
if (minDiff == std::nullopt || diff < minDiff) {
|
||||
minDiff = diff;
|
||||
minLbPosition = lbPos;
|
||||
minUbPosition = ubPos;
|
||||
|
@ -1536,7 +1536,7 @@ IntegerRelation::computeConstantLowerOrUpperBound(unsigned pos) {
|
|||
}
|
||||
if (r == e)
|
||||
// If it doesn't, there isn't a bound on it.
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
Optional<MPInt> minOrMaxConst;
|
||||
|
||||
|
@ -1563,10 +1563,10 @@ IntegerRelation::computeConstantLowerOrUpperBound(unsigned pos) {
|
|||
isLower ? ceilDiv(-atIneq(r, getNumCols() - 1), atIneq(r, 0))
|
||||
: floorDiv(atIneq(r, getNumCols() - 1), -atIneq(r, 0));
|
||||
if (isLower) {
|
||||
if (minOrMaxConst == None || boundConst > minOrMaxConst)
|
||||
if (minOrMaxConst == std::nullopt || boundConst > minOrMaxConst)
|
||||
minOrMaxConst = boundConst;
|
||||
} else {
|
||||
if (minOrMaxConst == None || boundConst < minOrMaxConst)
|
||||
if (minOrMaxConst == std::nullopt || boundConst < minOrMaxConst)
|
||||
minOrMaxConst = boundConst;
|
||||
}
|
||||
}
|
||||
|
@ -1589,7 +1589,7 @@ Optional<MPInt> IntegerRelation::getConstantBound(BoundType type,
|
|||
Optional<MPInt> ub =
|
||||
IntegerRelation(*this)
|
||||
.computeConstantLowerOrUpperBound</*isLower=*/false>(pos);
|
||||
return (lb && ub && *lb == *ub) ? Optional<MPInt>(*ub) : None;
|
||||
return (lb && ub && *lb == *ub) ? Optional<MPInt>(*ub) : std::nullopt;
|
||||
}
|
||||
|
||||
// A simple (naive and conservative) check for hyper-rectangularity.
|
||||
|
|
|
@ -430,5 +430,5 @@ PWMAFunction::valueAt(ArrayRef<MPInt> point) const {
|
|||
for (const Piece &piece : pieces)
|
||||
if (piece.domain.containsPoint(point))
|
||||
return piece.output.valueAt(point);
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
|
|
@ -369,7 +369,7 @@ static PresburgerRelation getSetDifference(IntegerRelation b,
|
|||
unsigned simplexSnapshot = simplex.getSnapshot();
|
||||
IntegerRelation::CountsSnapshot bCounts = b.getCounts();
|
||||
frames.push_back(Frame{simplexSnapshot, bCounts, sI, ineqsToProcess,
|
||||
/*lastIneqProcessed=*/llvm::None});
|
||||
/*lastIneqProcessed=*/std::nullopt});
|
||||
// We have completed the initial setup for this level.
|
||||
// Fallthrough to the main recursive part below.
|
||||
}
|
||||
|
|
|
@ -381,7 +381,7 @@ SmallVector<Optional<MPInt>, 4>
|
|||
DivisionRepr::divValuesAt(ArrayRef<MPInt> point) const {
|
||||
assert(point.size() == getNumNonDivs() && "Incorrect point size");
|
||||
|
||||
SmallVector<Optional<MPInt>, 4> divValues(getNumDivs(), None);
|
||||
SmallVector<Optional<MPInt>, 4> divValues(getNumDivs(), std::nullopt);
|
||||
bool changed = true;
|
||||
while (changed) {
|
||||
changed = false;
|
||||
|
|
|
@ -357,7 +357,7 @@ static Optional<APInt> buildAttributeAPInt(Type type, bool isNegative,
|
|||
APInt result;
|
||||
bool isHex = spelling.size() > 1 && spelling[1] == 'x';
|
||||
if (spelling.getAsInteger(isHex ? 0 : 10, result))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
// Extend or truncate the bitwidth to the right size.
|
||||
unsigned width = type.isIndex() ? IndexType::kInternalStorageBitWidth
|
||||
|
@ -369,7 +369,7 @@ static Optional<APInt> buildAttributeAPInt(Type type, bool isNegative,
|
|||
// The parser can return an unnecessarily wide result with leading zeros.
|
||||
// This isn't a problem, but truncating off bits is bad.
|
||||
if (result.countLeadingZeros() < result.getBitWidth() - width)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
result = result.trunc(width);
|
||||
}
|
||||
|
@ -378,18 +378,18 @@ static Optional<APInt> buildAttributeAPInt(Type type, bool isNegative,
|
|||
// 0 bit integers cannot be negative and manipulation of their sign bit will
|
||||
// assert, so short-cut validation here.
|
||||
if (isNegative)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
} else if (isNegative) {
|
||||
// The value is negative, we have an overflow if the sign bit is not set
|
||||
// in the negated apInt.
|
||||
result.negate();
|
||||
if (!result.isSignBitSet())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
} else if ((type.isSignedInteger() || type.isIndex()) &&
|
||||
result.isSignBitSet()) {
|
||||
// The value is a positive signed integer or index,
|
||||
// we have an overflow if the sign bit is set.
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -1175,7 +1175,7 @@ Attribute Parser::parseStridedLayoutAttr() {
|
|||
SMLoc loc = getToken().getLoc();
|
||||
auto emitWrongTokenError = [&] {
|
||||
emitError(loc, "expected a 64-bit signed integer or '?'");
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
};
|
||||
|
||||
bool negative = consumeIf(Token::minus);
|
||||
|
|
|
@ -250,7 +250,7 @@ OptionalParseResult Parser::parseOptionalInteger(APInt &result) {
|
|||
|
||||
Token curToken = getToken();
|
||||
if (curToken.isNot(Token::integer, Token::minus))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
bool negative = consumeIf(Token::minus);
|
||||
Token curTok = getToken();
|
||||
|
@ -533,12 +533,12 @@ public:
|
|||
/// skip parsing that component.
|
||||
ParseResult parseGenericOperationAfterOpName(
|
||||
OperationState &result,
|
||||
Optional<ArrayRef<UnresolvedOperand>> parsedOperandUseInfo = llvm::None,
|
||||
Optional<ArrayRef<Block *>> parsedSuccessors = llvm::None,
|
||||
Optional<ArrayRef<UnresolvedOperand>> parsedOperandUseInfo = std::nullopt,
|
||||
Optional<ArrayRef<Block *>> parsedSuccessors = std::nullopt,
|
||||
Optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions =
|
||||
llvm::None,
|
||||
Optional<ArrayRef<NamedAttribute>> parsedAttributes = llvm::None,
|
||||
Optional<FunctionType> parsedFnType = llvm::None);
|
||||
std::nullopt,
|
||||
Optional<ArrayRef<NamedAttribute>> parsedAttributes = std::nullopt,
|
||||
Optional<FunctionType> parsedFnType = std::nullopt);
|
||||
|
||||
/// Parse an operation instance that is in the generic form and insert it at
|
||||
/// the provided insertion point.
|
||||
|
@ -1073,7 +1073,7 @@ Value OperationParser::createForwardRefPlaceholder(SMLoc loc, Type type) {
|
|||
auto name = OperationName("builtin.unrealized_conversion_cast", getContext());
|
||||
auto *op = Operation::create(
|
||||
getEncodedSourceLocation(loc), name, type, /*operands=*/{},
|
||||
/*attributes=*/llvm::None, /*successors=*/{}, /*numRegions=*/0);
|
||||
/*attributes=*/std::nullopt, /*successors=*/{}, /*numRegions=*/0);
|
||||
forwardRefPlaceholders[op->getResult(0)] = loc;
|
||||
return op->getResult(0);
|
||||
}
|
||||
|
@ -1524,7 +1524,7 @@ public:
|
|||
bool allowResultNumber = true) override {
|
||||
if (parser.getToken().isOrIsCodeCompletionFor(Token::percent_identifier))
|
||||
return parseOperand(result, allowResultNumber);
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Parse zero or more SSA comma-separated operand references with a specified
|
||||
|
@ -1657,7 +1657,7 @@ public:
|
|||
bool allowAttrs) override {
|
||||
if (parser.getToken().is(Token::percent_identifier))
|
||||
return parseArgument(result, allowType, allowAttrs);
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
ParseResult parseArgumentList(SmallVectorImpl<Argument> &result,
|
||||
|
@ -1697,7 +1697,7 @@ public:
|
|||
ArrayRef<Argument> arguments,
|
||||
bool enableNameShadowing) override {
|
||||
if (parser.getToken().isNot(Token::l_brace))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return parseRegion(region, arguments, enableNameShadowing);
|
||||
}
|
||||
|
||||
|
@ -1709,7 +1709,7 @@ public:
|
|||
ArrayRef<Argument> arguments,
|
||||
bool enableNameShadowing = false) override {
|
||||
if (parser.getToken().isNot(Token::l_brace))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
std::unique_ptr<Region> newRegion = std::make_unique<Region>();
|
||||
if (parseRegion(*newRegion, arguments, enableNameShadowing))
|
||||
return failure();
|
||||
|
@ -1730,7 +1730,7 @@ public:
|
|||
/// Parse an optional operation successor and its operand list.
|
||||
OptionalParseResult parseOptionalSuccessor(Block *&dest) override {
|
||||
if (!parser.getToken().isOrIsCodeCompletionFor(Token::caret_identifier))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return parseSuccessor(dest);
|
||||
}
|
||||
|
||||
|
@ -1759,7 +1759,7 @@ public:
|
|||
SmallVectorImpl<Argument> &lhs,
|
||||
SmallVectorImpl<UnresolvedOperand> &rhs) override {
|
||||
if (failed(parseOptionalLParen()))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
auto parseElt = [&]() -> ParseResult {
|
||||
if (parseArgument(lhs.emplace_back()) || parseEqual() ||
|
||||
|
@ -2391,7 +2391,7 @@ public:
|
|||
// TODO: We could avoid an additional alloc+copy here if we pre-allocated
|
||||
// the buffer to use during hex processing.
|
||||
Optional<std::string> blobData =
|
||||
value.is(Token::string) ? value.getHexStringValue() : llvm::None;
|
||||
value.is(Token::string) ? value.getHexStringValue() : std::nullopt;
|
||||
if (!blobData)
|
||||
return p.emitError(value.getLoc(),
|
||||
"expected hex string blob for key '" + key + "'");
|
||||
|
|
|
@ -236,7 +236,7 @@ public:
|
|||
AttributeT &attr,
|
||||
Type type = {}) {
|
||||
if (getToken().isNot(kind))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
if (Attribute parsedAttr = parseAttribute(type)) {
|
||||
attr = parsedAttr.cast<AttributeT>();
|
||||
|
|
|
@ -30,7 +30,7 @@ Optional<unsigned> Token::getUnsignedIntegerValue() const {
|
|||
|
||||
unsigned result = 0;
|
||||
if (spelling.getAsInteger(isHex ? 0 : 10, result))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ Optional<uint64_t> Token::getUInt64IntegerValue(StringRef spelling) {
|
|||
|
||||
uint64_t result = 0;
|
||||
if (spelling.getAsInteger(isHex ? 0 : 10, result))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ Optional<uint64_t> Token::getUInt64IntegerValue(StringRef spelling) {
|
|||
Optional<double> Token::getFloatingPointValue() const {
|
||||
double result = 0;
|
||||
if (spelling.getAsDouble(result))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -60,14 +60,14 @@ Optional<unsigned> Token::getIntTypeBitwidth() const {
|
|||
unsigned bitwidthStart = (spelling[0] == 'i' ? 1 : 2);
|
||||
unsigned result = 0;
|
||||
if (spelling.drop_front(bitwidthStart).getAsInteger(10, result))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
|
||||
Optional<bool> Token::getIntTypeSignedness() const {
|
||||
assert(getKind() == inttype);
|
||||
if (spelling[0] == 'i')
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
if (spelling[0] == 's')
|
||||
return true;
|
||||
assert(spelling[0] == 'u');
|
||||
|
@ -138,7 +138,7 @@ Optional<std::string> Token::getHexStringValue() const {
|
|||
std::string hex;
|
||||
if (!bytes.consume_front("0x") || (bytes.size() & 1) ||
|
||||
!llvm::tryGetFromHex(bytes, hex))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return hex;
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ Optional<unsigned> Token::getHashIdentifierNumber() const {
|
|||
assert(getKind() == hash_identifier);
|
||||
unsigned result = 0;
|
||||
if (spelling.drop_front().getAsInteger(10, result))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ OptionalParseResult Parser::parseOptionalType(Type &type) {
|
|||
return failure(!(type = parseType()));
|
||||
|
||||
default:
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ MlirPass mlirCreateExternalPass(MlirTypeID passID, MlirStringRef name,
|
|||
void *userData) {
|
||||
return wrap(static_cast<mlir::Pass *>(new mlir::ExternalPass(
|
||||
unwrap(passID), unwrap(name), unwrap(argument), unwrap(description),
|
||||
opName.length > 0 ? Optional<StringRef>(unwrap(opName)) : None,
|
||||
opName.length > 0 ? Optional<StringRef>(unwrap(opName)) : std::nullopt,
|
||||
{dependentDialects, static_cast<size_t>(nDependentDialects)}, callbacks,
|
||||
userData)));
|
||||
}
|
||||
|
|
|
@ -402,7 +402,7 @@ static Optional<StringRef> mfmaOpToIntrinsic(MFMAOp mfma, Chipset chipset) {
|
|||
if (m == 4 && n == 4 && k == 4 && b == 4)
|
||||
return ROCDL::mfma_f64_4x4x4f64::getOperationName();
|
||||
}
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
|
|
@ -304,7 +304,7 @@ public:
|
|||
if (type.isa<CoroHandleType>())
|
||||
return AsyncAPI::opaquePointerType(type.getContext());
|
||||
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
|
|
@ -66,7 +66,7 @@ struct AssertOpLowering : public ConvertOpToLLVMPattern<cf::AssertOp> {
|
|||
|
||||
// Generate IR to call `abort`.
|
||||
Block *failureBlock = rewriter.createBlock(opBlock->getParent());
|
||||
rewriter.create<LLVM::CallOp>(loc, abortFunc, llvm::None);
|
||||
rewriter.create<LLVM::CallOp>(loc, abortFunc, std::nullopt);
|
||||
rewriter.create<LLVM::UnreachableOp>(loc);
|
||||
|
||||
// Generate assertion test.
|
||||
|
|
|
@ -183,7 +183,7 @@ struct LowerGpuOpsToNVVMOpsPass
|
|||
converter.addConversion([&](MemRefType type) -> Optional<Type> {
|
||||
if (type.getMemorySpaceAsInt() !=
|
||||
gpu::GPUDialect::getPrivateAddressSpace())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return converter.convertType(MemRefType::Builder(type).setMemorySpace(
|
||||
IntegerAttr::get(IntegerType::get(m.getContext(), 64), 0)));
|
||||
});
|
||||
|
|
|
@ -224,7 +224,7 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, TypeConverter &typeConverter,
|
|||
auto newFuncOp = rewriter.create<spirv::FuncOp>(
|
||||
funcOp.getLoc(), funcOp.getName(),
|
||||
rewriter.getFunctionType(signatureConverter.getConvertedTypes(),
|
||||
llvm::None));
|
||||
std::nullopt));
|
||||
for (const auto &namedAttr : funcOp->getAttrs()) {
|
||||
if (namedAttr.getName() == FunctionOpInterface::getTypeAttrName() ||
|
||||
namedAttr.getName() == SymbolTable::getSymbolAttrName())
|
||||
|
@ -329,7 +329,7 @@ LogicalResult GPUModuleConversion::matchAndRewrite(
|
|||
// Add a keyword to the module name to avoid symbolic conflict.
|
||||
std::string spvModuleName = (kSPIRVModule + moduleOp.getName()).str();
|
||||
auto spvModule = rewriter.create<spirv::ModuleOp>(
|
||||
moduleOp.getLoc(), addressingModel, *memoryModel, llvm::None,
|
||||
moduleOp.getLoc(), addressingModel, *memoryModel, std::nullopt,
|
||||
StringRef(spvModuleName));
|
||||
|
||||
// Move the region from the module op into the SPIR-V module.
|
||||
|
|
|
@ -43,7 +43,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
// order and those should take priority.
|
||||
addConversion([](Type type) {
|
||||
return LLVM::isCompatibleType(type) ? llvm::Optional<Type>(type)
|
||||
: llvm::None;
|
||||
: std::nullopt;
|
||||
});
|
||||
|
||||
// LLVM container types may (recursively) contain other types that must be
|
||||
|
@ -53,7 +53,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
return type;
|
||||
if (auto pointee = convertType(type.getElementType()))
|
||||
return LLVM::LLVMPointerType::get(pointee, type.getAddressSpace());
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
});
|
||||
addConversion([&](LLVM::LLVMStructType type, SmallVectorImpl<Type> &results,
|
||||
ArrayRef<Type> callStack) -> llvm::Optional<LogicalResult> {
|
||||
|
@ -82,7 +82,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
SmallVector<Type> convertedElemTypes;
|
||||
convertedElemTypes.reserve(type.getBody().size());
|
||||
if (failed(convertTypes(type.getBody(), convertedElemTypes)))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
if (failed(convertedType.setBody(convertedElemTypes, type.isPacked())))
|
||||
return failure();
|
||||
|
@ -93,7 +93,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
SmallVector<Type> convertedSubtypes;
|
||||
convertedSubtypes.reserve(type.getBody().size());
|
||||
if (failed(convertTypes(type.getBody(), convertedSubtypes)))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
results.push_back(LLVM::LLVMStructType::getLiteral(
|
||||
type.getContext(), convertedSubtypes, type.isPacked()));
|
||||
|
@ -102,17 +102,17 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
addConversion([&](LLVM::LLVMArrayType type) -> llvm::Optional<Type> {
|
||||
if (auto element = convertType(type.getElementType()))
|
||||
return LLVM::LLVMArrayType::get(element, type.getNumElements());
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
});
|
||||
addConversion([&](LLVM::LLVMFunctionType type) -> llvm::Optional<Type> {
|
||||
Type convertedResType = convertType(type.getReturnType());
|
||||
if (!convertedResType)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
SmallVector<Type> convertedArgTypes;
|
||||
convertedArgTypes.reserve(type.getNumParams());
|
||||
if (failed(convertTypes(type.getParams(), convertedArgTypes)))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
return LLVM::LLVMFunctionType::get(convertedResType, convertedArgTypes,
|
||||
type.isVarArg());
|
||||
|
@ -125,7 +125,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
[&](OpBuilder &builder, UnrankedMemRefType resultType, ValueRange inputs,
|
||||
Location loc) -> Optional<Value> {
|
||||
if (inputs.size() == 1)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return UnrankedMemRefDescriptor::pack(builder, loc, *this, resultType,
|
||||
inputs);
|
||||
});
|
||||
|
@ -135,7 +135,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
// TODO: bare ptr conversion could be handled here but we would need a way
|
||||
// to distinguish between FuncOp and other regions.
|
||||
if (inputs.size() == 1)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return MemRefDescriptor::pack(builder, loc, *this, resultType, inputs);
|
||||
});
|
||||
// Add generic source and target materializations to handle cases where
|
||||
|
@ -144,7 +144,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
ValueRange inputs,
|
||||
Location loc) -> Optional<Value> {
|
||||
if (inputs.size() != 1)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
|
||||
.getResult(0);
|
||||
|
@ -153,7 +153,7 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
|||
ValueRange inputs,
|
||||
Location loc) -> Optional<Value> {
|
||||
if (inputs.size() != 1)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
|
||||
.getResult(0);
|
||||
|
|
|
@ -451,7 +451,7 @@ private:
|
|||
.getValue()
|
||||
.getSExtValue();
|
||||
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Value extractSizeOfRankedMemRef(Type operandType, memref::DimOp dimOp,
|
||||
|
@ -1278,7 +1278,7 @@ private:
|
|||
UnrankedMemRefDescriptor::computeSizes(rewriter, loc, *getTypeConverter(),
|
||||
targetDesc, sizes);
|
||||
Value underlyingDescPtr = rewriter.create<LLVM::AllocaOp>(
|
||||
loc, getVoidPtrType(), sizes.front(), llvm::None);
|
||||
loc, getVoidPtrType(), sizes.front(), std::nullopt);
|
||||
targetDesc.setMemRefDescPtr(rewriter, loc, underlyingDescPtr);
|
||||
|
||||
// Extract pointers and offset from the source memref.
|
||||
|
@ -1362,8 +1362,8 @@ private:
|
|||
|
||||
// Hook up the cond exit to the remainder.
|
||||
rewriter.setInsertionPointToEnd(condBlock);
|
||||
rewriter.create<LLVM::CondBrOp>(loc, pred, bodyBlock, llvm::None, remainder,
|
||||
llvm::None);
|
||||
rewriter.create<LLVM::CondBrOp>(loc, pred, bodyBlock, std::nullopt,
|
||||
remainder, std::nullopt);
|
||||
|
||||
// Reset position to beginning of new remainder block.
|
||||
rewriter.setInsertionPointToStart(remainder);
|
||||
|
@ -1599,7 +1599,8 @@ static void fillInStridesForCollapsedMemDescriptor(
|
|||
initBlock->getParent(), Region::iterator(continueBlock), {});
|
||||
}
|
||||
rewriter.create<LLVM::CondBrOp>(loc, predNeOne, continueBlock,
|
||||
srcStride, nextEntryBlock, llvm::None);
|
||||
srcStride, nextEntryBlock,
|
||||
std::nullopt);
|
||||
curEntryBlock = nextEntryBlock;
|
||||
}
|
||||
}
|
||||
|
@ -1897,7 +1898,7 @@ matchSimpleAtomicOp(memref::AtomicRMWOp atomicOp) {
|
|||
case arith::AtomicRMWKind::andi:
|
||||
return LLVM::AtomicBinOp::_and;
|
||||
default:
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
llvm_unreachable("Invalid AtomicRMWKind");
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ spirv::mapMemorySpaceToVulkanStorageClass(Attribute memorySpaceAttr) {
|
|||
// Downstream callers should plug in more specialized ones.
|
||||
auto intAttr = memorySpaceAttr.dyn_cast<IntegerAttr>();
|
||||
if (!intAttr)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
unsigned memorySpace = intAttr.getInt();
|
||||
|
||||
#define STORAGE_SPACE_MAP_FN(storage, space) \
|
||||
|
@ -78,7 +78,7 @@ spirv::mapMemorySpaceToVulkanStorageClass(Attribute memorySpaceAttr) {
|
|||
default:
|
||||
break;
|
||||
}
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
#undef STORAGE_SPACE_MAP_FN
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ spirv::mapVulkanStorageClassToMemorySpace(spirv::StorageClass storageClass) {
|
|||
default:
|
||||
break;
|
||||
}
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
#undef STORAGE_SPACE_MAP_FN
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ spirv::mapMemorySpaceToOpenCLStorageClass(Attribute memorySpaceAttr) {
|
|||
// Downstream callers should plug in more specialized ones.
|
||||
auto intAttr = memorySpaceAttr.dyn_cast<IntegerAttr>();
|
||||
if (!intAttr)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
unsigned memorySpace = intAttr.getInt();
|
||||
|
||||
#define STORAGE_SPACE_MAP_FN(storage, space) \
|
||||
|
@ -132,7 +132,7 @@ spirv::mapMemorySpaceToOpenCLStorageClass(Attribute memorySpaceAttr) {
|
|||
default:
|
||||
break;
|
||||
}
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
#undef STORAGE_SPACE_MAP_FN
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ spirv::mapOpenCLStorageClassToMemorySpace(spirv::StorageClass storageClass) {
|
|||
default:
|
||||
break;
|
||||
}
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
#undef STORAGE_SPACE_MAP_FN
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ spirv::MemorySpaceToStorageClassConverter::MemorySpaceToStorageClassConverter(
|
|||
LLVM_DEBUG(llvm::dbgs()
|
||||
<< "cannot convert " << memRefType
|
||||
<< " due to being unable to find memory space in map\n");
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto storageAttr =
|
||||
|
|
|
@ -305,7 +305,7 @@ struct MmaSyncOptoNVVM : public ConvertOpToLLVMPattern<nvgpu::MmaSyncOp> {
|
|||
"could not infer the PTX type for the accumulator/result");
|
||||
|
||||
// TODO: add an attribute to the op to customize this behavior.
|
||||
Optional<NVVM::MMAIntOverflow> overflow(llvm::None);
|
||||
Optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
|
||||
if (aType.getElementType().isa<IntegerType>())
|
||||
overflow = NVVM::MMAIntOverflow::satfinite;
|
||||
|
||||
|
@ -322,7 +322,7 @@ struct MmaSyncOptoNVVM : public ConvertOpToLLVMPattern<nvgpu::MmaSyncOp> {
|
|||
Value intrinsicResult = rewriter.create<NVVM::MmaOp>(
|
||||
op.getLoc(), intrinsicResTy, matA, matB, matC,
|
||||
/*shape=*/gemmShape,
|
||||
/*b1Op=*/llvm::None,
|
||||
/*b1Op=*/std::nullopt,
|
||||
/*intOverflow=*/overflow,
|
||||
/*multiplicandPtxTypes=*/
|
||||
std::array<NVVM::MMATypes, 2>{*ptxTypeA, *ptxTypeB},
|
||||
|
@ -517,7 +517,7 @@ struct NVGPUMmaSparseSyncLowering
|
|||
return failure();
|
||||
|
||||
// TODO: add an attribute to the op to customize this behavior.
|
||||
Optional<NVVM::MMAIntOverflow> overflow(llvm::None);
|
||||
Optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
|
||||
if (aType.getElementType().isa<IntegerType>())
|
||||
overflow = NVVM::MMAIntOverflow::satfinite;
|
||||
|
||||
|
|
|
@ -614,7 +614,7 @@ SymbolRefAttr PatternLowering::generateRewriter(
|
|||
builder.setInsertionPointToEnd(rewriterModule.getBody());
|
||||
auto rewriterFunc = builder.create<pdl_interp::FuncOp>(
|
||||
pattern.getLoc(), "pdl_generated_rewriter",
|
||||
builder.getFunctionType(llvm::None, llvm::None));
|
||||
builder.getFunctionType(std::nullopt, std::nullopt));
|
||||
rewriterSymbolTable.insert(rewriterFunc);
|
||||
|
||||
// Generate the rewriter function body.
|
||||
|
@ -681,7 +681,7 @@ SymbolRefAttr PatternLowering::generateRewriter(
|
|||
// Update the signature of the rewrite function.
|
||||
rewriterFunc.setType(builder.getFunctionType(
|
||||
llvm::to_vector<8>(rewriterFunc.front().getArgumentTypes()),
|
||||
/*results=*/llvm::None));
|
||||
/*results=*/std::nullopt));
|
||||
|
||||
builder.create<pdl_interp::FinalizeOp>(rewriter.getLoc());
|
||||
return SymbolRefAttr::get(
|
||||
|
@ -968,8 +968,8 @@ void PDLToPDLInterpPass::runOnOperation() {
|
|||
auto matcherFunc = builder.create<pdl_interp::FuncOp>(
|
||||
module.getLoc(), pdl_interp::PDLInterpDialect::getMatcherFunctionName(),
|
||||
builder.getFunctionType(builder.getType<pdl::OperationType>(),
|
||||
/*results=*/llvm::None),
|
||||
/*attrs=*/llvm::None);
|
||||
/*results=*/std::nullopt),
|
||||
/*attrs=*/std::nullopt);
|
||||
|
||||
// Create a nested module to hold the functions invoked for rewriting the IR
|
||||
// after a successful match.
|
||||
|
|
|
@ -600,7 +600,7 @@ public:
|
|||
return OperandGroupPosition::get(uniquer, p, group, isVariadic);
|
||||
}
|
||||
Position *getAllOperands(OperationPosition *p) {
|
||||
return getOperandGroup(p, /*group=*/llvm::None, /*isVariadic=*/true);
|
||||
return getOperandGroup(p, /*group=*/std::nullopt, /*isVariadic=*/true);
|
||||
}
|
||||
|
||||
/// Returns a result position for a result of the given operation.
|
||||
|
@ -614,7 +614,7 @@ public:
|
|||
return ResultGroupPosition::get(uniquer, p, group, isVariadic);
|
||||
}
|
||||
Position *getAllResults(OperationPosition *p) {
|
||||
return getResultGroup(p, /*group=*/llvm::None, /*isVariadic=*/true);
|
||||
return getResultGroup(p, /*group=*/std::nullopt, /*isVariadic=*/true);
|
||||
}
|
||||
|
||||
/// Returns a type position for the given entity.
|
||||
|
|
|
@ -110,7 +110,7 @@ static void getTreePredicates(std::vector<PositionalPredicate> &predList,
|
|||
Value val, PredicateBuilder &builder,
|
||||
DenseMap<Value, Position *> &inputs,
|
||||
OperationPosition *pos,
|
||||
Optional<unsigned> ignoreOperand = llvm::None) {
|
||||
Optional<unsigned> ignoreOperand = std::nullopt) {
|
||||
assert(val.getType().isa<pdl::OperationType>() && "expected operation");
|
||||
pdl::OperationOp op = cast<pdl::OperationOp>(val.getDefiningOp());
|
||||
OperationPosition *opPos = cast<OperationPosition>(pos);
|
||||
|
@ -458,7 +458,7 @@ static void buildCostGraph(ArrayRef<Value> roots, RootOrderingGraph &graph,
|
|||
// For those, the index is empty.
|
||||
if (operands.size() == 1 &&
|
||||
operands[0].getType().isa<pdl::RangeType>()) {
|
||||
toVisit.emplace(operands[0], entry.value, llvm::None,
|
||||
toVisit.emplace(operands[0], entry.value, std::nullopt,
|
||||
entry.depth + 1);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) {
|
|||
Value lowerBound = getOrEmitLowerBound(currentLoop, builder);
|
||||
Value upperBound = getOrEmitUpperBound(currentLoop, builder);
|
||||
if (!lowerBound || !upperBound) {
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Value range = builder.create<arith::SubIOp>(currentLoop.getLoc(),
|
||||
|
@ -248,7 +248,7 @@ void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
|
|||
Location terminatorLoc = terminator.getLoc();
|
||||
terminator.erase();
|
||||
builder.setInsertionPointToEnd(innermostForOp.getBody());
|
||||
builder.create<gpu::TerminatorOp>(terminatorLoc, llvm::None);
|
||||
builder.create<gpu::TerminatorOp>(terminatorLoc, std::nullopt);
|
||||
launchOp.getBody().front().getOperations().splice(
|
||||
launchOp.getBody().front().begin(),
|
||||
innermostForOp.getBody()->getOperations());
|
||||
|
|
|
@ -412,7 +412,7 @@ WhileOpConversion::matchAndRewrite(scf::WhileOp whileOp, OpAdaptor adaptor,
|
|||
|
||||
rewriter.setInsertionPointToEnd(&beforeBlock);
|
||||
rewriter.replaceOpWithNewOp<spirv::BranchConditionalOp>(
|
||||
cond, conditionVal, &afterBlock, condArgs, &mergeBlock, llvm::None);
|
||||
cond, conditionVal, &afterBlock, condArgs, &mergeBlock, std::nullopt);
|
||||
|
||||
// Convert the scf.yield op to a branch back to the header block.
|
||||
rewriter.setInsertionPointToEnd(&afterBlock);
|
||||
|
|
|
@ -188,7 +188,7 @@ static Optional<Type>
|
|||
convertStructTypeWithOffset(spirv::StructType type,
|
||||
LLVMTypeConverter &converter) {
|
||||
if (type != VulkanLayoutUtils::decorateType(type))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
auto elementsVector = llvm::to_vector<8>(
|
||||
llvm::map_range(type.getElementTypes(), [&](Type elementType) {
|
||||
|
@ -253,7 +253,7 @@ static Optional<Type> convertArrayType(spirv::ArrayType type,
|
|||
Type elementType = type.getElementType();
|
||||
auto sizeInBytes = elementType.cast<spirv::SPIRVType>().getSizeInBytes();
|
||||
if (stride != 0 && (!sizeInBytes || *sizeInBytes != stride))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
auto llvmElementType = converter.convertType(elementType);
|
||||
unsigned numElements = type.getNumElements();
|
||||
|
@ -274,7 +274,7 @@ static Type convertPointerType(spirv::PointerType type,
|
|||
static Optional<Type> convertRuntimeArrayType(spirv::RuntimeArrayType type,
|
||||
TypeConverter &converter) {
|
||||
if (type.getArrayStride() != 0)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
auto elementType = converter.convertType(type.getElementType());
|
||||
return LLVM::LLVMArrayType::get(elementType, 0);
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ static Optional<Type> convertStructType(spirv::StructType type,
|
|||
SmallVector<spirv::StructType::MemberDecorationInfo, 4> memberDecorations;
|
||||
type.getMemberDecorations(memberDecorations);
|
||||
if (!memberDecorations.empty())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
if (type.hasOffset())
|
||||
return convertStructTypeWithOffset(type, converter);
|
||||
return convertStructTypePacked(type, converter);
|
||||
|
@ -812,7 +812,7 @@ public:
|
|||
ConversionPatternRewriter &rewriter) const override {
|
||||
if (callOp.getNumResults() == 0) {
|
||||
rewriter.replaceOpWithNewOp<LLVM::CallOp>(
|
||||
callOp, llvm::None, adaptor.getOperands(), callOp->getAttrs());
|
||||
callOp, std::nullopt, adaptor.getOperands(), callOp->getAttrs());
|
||||
return success();
|
||||
}
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
|
|||
rewriter.create<arith::ShRSIOp>(loc, resultTypes, args[0], subtract)
|
||||
->getResults();
|
||||
auto truncated =
|
||||
rewriter.create<arith::TruncIOp>(loc, i1Ty, shifted, mlir::None);
|
||||
rewriter.create<arith::TruncIOp>(loc, i1Ty, shifted, std::nullopt);
|
||||
auto isInputOdd =
|
||||
rewriter.create<arith::AndIOp>(loc, i1Ty, truncated, i1one);
|
||||
|
||||
|
@ -428,20 +428,21 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
|
|||
return args.front();
|
||||
|
||||
if (srcTy.isa<FloatType>() && dstTy.isa<FloatType>() && bitExtend)
|
||||
return rewriter.create<arith::ExtFOp>(loc, resultTypes, args, mlir::None);
|
||||
return rewriter.create<arith::ExtFOp>(loc, resultTypes, args,
|
||||
std::nullopt);
|
||||
|
||||
if (srcTy.isa<FloatType>() && dstTy.isa<FloatType>() && !bitExtend)
|
||||
return rewriter.create<arith::TruncFOp>(loc, resultTypes, args,
|
||||
mlir::None);
|
||||
std::nullopt);
|
||||
|
||||
// 1-bit integers need to be treated as signless.
|
||||
if (srcTy.isInteger(1) && arith::UIToFPOp::areCastCompatible(srcTy, dstTy))
|
||||
return rewriter.create<arith::UIToFPOp>(loc, resultTypes, args,
|
||||
mlir::None);
|
||||
std::nullopt);
|
||||
|
||||
if (srcTy.isInteger(1) && dstTy.isa<IntegerType>() && bitExtend)
|
||||
return rewriter.create<arith::ExtUIOp>(loc, resultTypes, args,
|
||||
mlir::None);
|
||||
std::nullopt);
|
||||
|
||||
// Unsigned integers need an unrealized cast so that they can be passed
|
||||
// to UIToFP.
|
||||
|
@ -459,7 +460,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
|
|||
// All other si-to-fp conversions should be handled by SIToFP.
|
||||
if (arith::SIToFPOp::areCastCompatible(srcTy, dstTy))
|
||||
return rewriter.create<arith::SIToFPOp>(loc, resultTypes, args,
|
||||
mlir::None);
|
||||
std::nullopt);
|
||||
|
||||
// Casting to boolean, floats need to only be checked as not-equal to zero.
|
||||
if (srcTy.isa<FloatType>() && dstTy.isInteger(1)) {
|
||||
|
@ -508,7 +509,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
|
|||
|
||||
if (srcTy.isa<IntegerType>() && dstTy.isa<IntegerType>() && bitExtend)
|
||||
return rewriter.create<arith::ExtSIOp>(loc, resultTypes, args,
|
||||
mlir::None);
|
||||
std::nullopt);
|
||||
|
||||
if (srcTy.isa<IntegerType>() && dstTy.isa<IntegerType>() && !bitExtend) {
|
||||
auto intMin = rewriter.create<arith::ConstantIntOp>(
|
||||
|
|
|
@ -119,10 +119,10 @@ getMemrefConstantHorizontalStride(ShapedType type) {
|
|||
SmallVector<int64_t, 2> strides;
|
||||
if (failed(getStridesAndOffset(memrefType, strides, offset)) ||
|
||||
strides.back() != 1)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
int64_t stride = strides[strides.size() - 2];
|
||||
if (stride == ShapedType::kDynamic)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
return stride;
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ convertElementwiseOpToMMA(Operation *op) {
|
|||
return gpu::MMAElementwiseOp::MINF;
|
||||
if (isa<arith::DivFOp>(op))
|
||||
return gpu::MMAElementwiseOp::DIVF;
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Return true if the op is supported as elementwise op on MMAMatrix type.
|
||||
|
|
|
@ -948,9 +948,9 @@ computeContiguousStrides(MemRefType memRefType) {
|
|||
int64_t offset;
|
||||
SmallVector<int64_t, 4> strides;
|
||||
if (failed(getStridesAndOffset(memRefType, strides, offset)))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
if (!strides.empty() && strides.back() != 1)
|
||||
return None;
|
||||
return std::nullopt;
|
||||
// If no layout or identity layout, this is contiguous by definition.
|
||||
if (memRefType.getLayout().isIdentity())
|
||||
return strides;
|
||||
|
@ -964,9 +964,9 @@ computeContiguousStrides(MemRefType memRefType) {
|
|||
if (ShapedType::isDynamic(sizes[index + 1]) ||
|
||||
ShapedType::isDynamic(strides[index]) ||
|
||||
ShapedType::isDynamic(strides[index + 1]))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
if (strides[index] != strides[index + 1] * sizes[index + 1])
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
return strides;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ static Optional<int64_t> unpackedDim(OpTy xferOp) {
|
|||
}
|
||||
assert(xferOp.isBroadcastDim(0) &&
|
||||
"Expected AffineDimExpr or AffineConstantExpr");
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Compute the permutation map for the new (N-1)-D vector transfer op. This
|
||||
|
@ -1114,7 +1114,7 @@ get1dMemrefIndices(OpBuilder &b, OpTy xferOp, Value iv,
|
|||
|
||||
assert(xferOp.isBroadcastDim(0) &&
|
||||
"Expected AffineDimExpr or AffineConstantExpr");
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Codegen strategy for TransferOp1dConversion, depending on the
|
||||
|
|
|
@ -70,10 +70,10 @@ LogicalResult RawBufferAtomicFaddOp::verify() {
|
|||
static Optional<uint32_t> getConstantUint32(Value v) {
|
||||
APInt cst;
|
||||
if (!v.getType().isInteger(32))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
if (matchPattern(v, m_ConstantInt(&cst)))
|
||||
return cst.getZExtValue();
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
template <typename OpType>
|
||||
|
|
|
@ -67,7 +67,7 @@ static Value getSupportedReduction(AffineForOp forOp, unsigned pos,
|
|||
.Default([](Operation *) -> Optional<arith::AtomicRMWKind> {
|
||||
// TODO: AtomicRMW supports other kinds of reductions this is
|
||||
// currently not detecting, add those when the need arises.
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
});
|
||||
if (!maybeKind)
|
||||
return nullptr;
|
||||
|
|
|
@ -157,7 +157,7 @@ FlatAffineValueConstraints::FlatAffineValueConstraints(IntegerSet set)
|
|||
/*numLocals=*/0)) {
|
||||
|
||||
// Resize values.
|
||||
values.resize(getNumDimAndSymbolVars(), None);
|
||||
values.resize(getNumDimAndSymbolVars(), std::nullopt);
|
||||
|
||||
// Flatten expressions and add them to the constraint system.
|
||||
std::vector<SmallVector<int64_t, 8>> flatExprs;
|
||||
|
@ -294,7 +294,7 @@ unsigned FlatAffineValueConstraints::insertVar(VarKind kind, unsigned pos,
|
|||
unsigned absolutePos = IntegerPolyhedron::insertVar(kind, pos, num);
|
||||
|
||||
if (kind != VarKind::Local) {
|
||||
values.insert(values.begin() + absolutePos, num, None);
|
||||
values.insert(values.begin() + absolutePos, num, std::nullopt);
|
||||
assert(values.size() == getNumDimAndSymbolVars());
|
||||
}
|
||||
|
||||
|
@ -312,7 +312,7 @@ unsigned FlatAffineValueConstraints::insertVar(VarKind kind, unsigned pos,
|
|||
// If a Value is provided, insert it; otherwise use None.
|
||||
for (unsigned i = 0; i < num; ++i)
|
||||
values.insert(values.begin() + absolutePos + i,
|
||||
vals[i] ? Optional<Value>(vals[i]) : None);
|
||||
vals[i] ? Optional<Value>(vals[i]) : std::nullopt);
|
||||
|
||||
assert(values.size() == getNumDimAndSymbolVars());
|
||||
return absolutePos;
|
||||
|
@ -1351,9 +1351,9 @@ void FlatAffineValueConstraints::swapVar(unsigned posA, unsigned posB) {
|
|||
|
||||
// Treat value of a local variable as None.
|
||||
if (getVarKindAt(posA) == VarKind::Local)
|
||||
values[posB] = None;
|
||||
values[posB] = std::nullopt;
|
||||
else if (getVarKindAt(posB) == VarKind::Local)
|
||||
values[posA] = None;
|
||||
values[posA] = std::nullopt;
|
||||
else
|
||||
std::swap(values[posA], values[posB]);
|
||||
}
|
||||
|
@ -1392,7 +1392,7 @@ void FlatAffineValueConstraints::clearAndCopyFrom(
|
|||
} else {
|
||||
*static_cast<IntegerRelation *>(this) = other;
|
||||
values.clear();
|
||||
values.resize(getNumDimAndSymbolVars(), None);
|
||||
values.resize(getNumDimAndSymbolVars(), std::nullopt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
|
|||
getTripCountMapAndOperands(forOp, &map, &operands);
|
||||
|
||||
if (!map)
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
// Take the min if all trip counts are constant.
|
||||
Optional<uint64_t> tripCount;
|
||||
|
@ -99,7 +99,7 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
|
|||
else
|
||||
tripCount = constExpr.getValue();
|
||||
} else
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
return tripCount;
|
||||
}
|
||||
|
|
|
@ -164,19 +164,19 @@ Optional<bool> ComputationSliceState::isSliceMaximalFastCheck() const {
|
|||
// Make sure we skip those cases by checking that the lb result is not
|
||||
// just a constant.
|
||||
lbMap.getResult(0).isa<AffineConstantExpr>())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
// Limited support: we expect the lb result to be just a loop dimension for
|
||||
// now.
|
||||
AffineDimExpr result = lbMap.getResult(0).dyn_cast<AffineDimExpr>();
|
||||
if (!result)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
// Retrieve dst loop bounds.
|
||||
AffineForOp dstLoop =
|
||||
getForInductionVarOwner(lbOperands[i][result.getPosition()]);
|
||||
if (!dstLoop)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
AffineMap dstLbMap = dstLoop.getLowerBoundMap();
|
||||
AffineMap dstUbMap = dstLoop.getUpperBoundMap();
|
||||
|
||||
|
@ -190,7 +190,7 @@ Optional<bool> ComputationSliceState::isSliceMaximalFastCheck() const {
|
|||
// constant component per bound for now.
|
||||
if (srcLbMap.getNumResults() != 1 || srcUbMap.getNumResults() != 1 ||
|
||||
dstLbMap.getNumResults() != 1 || dstUbMap.getNumResults() != 1)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
AffineExpr srcLbResult = srcLbMap.getResult(0);
|
||||
AffineExpr dstLbResult = dstLbMap.getResult(0);
|
||||
|
@ -200,7 +200,7 @@ Optional<bool> ComputationSliceState::isSliceMaximalFastCheck() const {
|
|||
!srcUbResult.isa<AffineConstantExpr>() ||
|
||||
!dstLbResult.isa<AffineConstantExpr>() ||
|
||||
!dstUbResult.isa<AffineConstantExpr>())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
// Check if src and dst loop bounds are the same. If not, we can guarantee
|
||||
// that the slice is not maximal.
|
||||
|
@ -235,20 +235,20 @@ Optional<bool> ComputationSliceState::isSliceValid() {
|
|||
// TODO: Store the source's domain to avoid computation at each depth.
|
||||
if (failed(getSourceAsConstraints(srcConstraints))) {
|
||||
LLVM_DEBUG(llvm::dbgs() << "Unable to compute source's domain\n");
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
// As the set difference utility currently cannot handle symbols in its
|
||||
// operands, validity of the slice cannot be determined.
|
||||
if (srcConstraints.getNumSymbolVars() > 0) {
|
||||
LLVM_DEBUG(llvm::dbgs() << "Cannot handle symbols in source domain\n");
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
// TODO: Handle local vars in the source domains while using the 'projectOut'
|
||||
// utility below. Currently, aligning is not done assuming that there will be
|
||||
// no local vars in the source domain.
|
||||
if (srcConstraints.getNumLocalVars() != 0) {
|
||||
LLVM_DEBUG(llvm::dbgs() << "Cannot handle locals in source domain\n");
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Create constraints for the slice loop nest that would be created if the
|
||||
|
@ -256,7 +256,7 @@ Optional<bool> ComputationSliceState::isSliceValid() {
|
|||
FlatAffineValueConstraints sliceConstraints;
|
||||
if (failed(getAsConstraints(&sliceConstraints))) {
|
||||
LLVM_DEBUG(llvm::dbgs() << "Unable to compute slice's domain\n");
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Projecting out every dimension other than the 'ivs' to express slice's
|
||||
|
@ -300,7 +300,7 @@ Optional<bool> ComputationSliceState::isMaximal() const {
|
|||
AffineForOp loop = getForInductionVarOwner(iv);
|
||||
assert(loop && "Expected affine for");
|
||||
if (failed(srcConstraints.addAffineForOpDomain(loop)))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Create constraints for the slice using the dst loop nest information. We
|
||||
|
@ -320,12 +320,12 @@ Optional<bool> ComputationSliceState::isMaximal() const {
|
|||
/*numLocals=*/0, consumerIVs);
|
||||
|
||||
if (failed(sliceConstraints.addDomainFromSliceMaps(lbs, ubs, lbOperands[0])))
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
if (srcConstraints.getNumDimVars() != sliceConstraints.getNumDimVars())
|
||||
// Constraint dims are different. The integer set difference can't be
|
||||
// computed so we don't know if the slice is maximal.
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
// Compute the difference between the src loop nest and the slice integer
|
||||
// sets.
|
||||
|
@ -381,7 +381,7 @@ Optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
|
|||
// memref's dim size if the latter has a constant size along this dim.
|
||||
auto dimSize = memRefType.getDimSize(d);
|
||||
if (dimSize == ShapedType::kDynamic)
|
||||
return None;
|
||||
return std::nullopt;
|
||||
diffConstant = dimSize;
|
||||
// Lower bound becomes 0.
|
||||
lb.resize(cstWithShapeBounds.getNumSymbolVars() + 1, 0);
|
||||
|
@ -629,7 +629,7 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
|
|||
Optional<int64_t> numElements = getConstantBoundingSizeAndShape();
|
||||
if (!numElements) {
|
||||
LLVM_DEBUG(llvm::dbgs() << "Dynamic shapes not yet supported\n");
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
return getMemRefEltSizeInBytes(memRefType) * *numElements;
|
||||
}
|
||||
|
@ -640,10 +640,10 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
|
|||
// TODO: improve/complete this when we have target data.
|
||||
Optional<uint64_t> mlir::getMemRefSizeInBytes(MemRefType memRefType) {
|
||||
if (!memRefType.hasStaticShape())
|
||||
return None;
|
||||
return std::nullopt;
|
||||
auto elementType = memRefType.getElementType();
|
||||
if (!elementType.isIntOrFloat() && !elementType.isa<VectorType>())
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
uint64_t sizeInBytes = getMemRefEltSizeInBytes(memRefType);
|
||||
for (unsigned i = 0, e = memRefType.getRank(); i < e; i++) {
|
||||
|
@ -979,7 +979,7 @@ static Optional<uint64_t> getConstDifference(AffineMap lbMap, AffineMap ubMap) {
|
|||
lbMap.getNumSymbols());
|
||||
auto cExpr = loopSpanExpr.dyn_cast<AffineConstantExpr>();
|
||||
if (!cExpr)
|
||||
return None;
|
||||
return std::nullopt;
|
||||
return cExpr.getValue();
|
||||
}
|
||||
|
||||
|
@ -1312,13 +1312,13 @@ static Optional<int64_t> getMemoryFootprintBytes(Block &block,
|
|||
return WalkResult::advance();
|
||||
});
|
||||
if (result.wasInterrupted())
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
int64_t totalSizeInBytes = 0;
|
||||
for (const auto ®ion : regions) {
|
||||
Optional<int64_t> size = region.second->getRegionSize();
|
||||
if (!size.has_value())
|
||||
return None;
|
||||
return std::nullopt;
|
||||
totalSizeInBytes += size.value();
|
||||
}
|
||||
return totalSizeInBytes;
|
||||
|
|
|
@ -2007,7 +2007,7 @@ namespace {
|
|||
static Optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
|
||||
int64_t step = forOp.getStep();
|
||||
if (!forOp.hasConstantBounds() || step <= 0)
|
||||
return None;
|
||||
return std::nullopt;
|
||||
int64_t lb = forOp.getConstantLowerBound();
|
||||
int64_t ub = forOp.getConstantUpperBound();
|
||||
return ub - lb <= 0 ? 0 : (ub - lb + step - 1) / step;
|
||||
|
@ -2263,7 +2263,7 @@ Optional<Value> AffineForOp::getSingleInductionVar() {
|
|||
|
||||
Optional<OpFoldResult> AffineForOp::getSingleLowerBound() {
|
||||
if (!hasConstantLowerBound())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
OpBuilder b(getContext());
|
||||
return OpFoldResult(b.getI64IntegerAttr(getConstantLowerBound()));
|
||||
}
|
||||
|
@ -2275,7 +2275,7 @@ Optional<OpFoldResult> AffineForOp::getSingleStep() {
|
|||
|
||||
Optional<OpFoldResult> AffineForOp::getSingleUpperBound() {
|
||||
if (!hasConstantUpperBound())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
OpBuilder b(getContext());
|
||||
return OpFoldResult(b.getI64IntegerAttr(getConstantUpperBound()));
|
||||
}
|
||||
|
@ -2365,8 +2365,8 @@ static AffineForOp
|
|||
buildAffineLoopFromConstants(OpBuilder &builder, Location loc, int64_t lb,
|
||||
int64_t ub, int64_t step,
|
||||
AffineForOp::BodyBuilderFn bodyBuilderFn) {
|
||||
return builder.create<AffineForOp>(loc, lb, ub, step, /*iterArgs=*/llvm::None,
|
||||
bodyBuilderFn);
|
||||
return builder.create<AffineForOp>(loc, lb, ub, step,
|
||||
/*iterArgs=*/std::nullopt, bodyBuilderFn);
|
||||
}
|
||||
|
||||
/// Creates an affine loop from the bounds that may or may not be constants.
|
||||
|
@ -2381,7 +2381,7 @@ buildAffineLoopFromValues(OpBuilder &builder, Location loc, Value lb, Value ub,
|
|||
ubConst.value(), step, bodyBuilderFn);
|
||||
return builder.create<AffineForOp>(loc, lb, builder.getDimIdentityMap(), ub,
|
||||
builder.getDimIdentityMap(), step,
|
||||
/*iterArgs=*/llvm::None, bodyBuilderFn);
|
||||
/*iterArgs=*/std::nullopt, bodyBuilderFn);
|
||||
}
|
||||
|
||||
void mlir::buildAffineLoopNest(
|
||||
|
@ -3551,7 +3551,7 @@ AffineValueMap AffineParallelOp::getUpperBoundsValueMap() {
|
|||
|
||||
Optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
|
||||
if (hasMinMaxBounds())
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
|
||||
// Try to convert all the ranges to constant expressions.
|
||||
SmallVector<int64_t, 8> out;
|
||||
|
@ -3563,7 +3563,7 @@ Optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
|
|||
auto expr = rangesValueMap.getResult(i);
|
||||
auto cst = expr.dyn_cast<AffineConstantExpr>();
|
||||
if (!cst)
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
out.push_back(cst.getValue());
|
||||
}
|
||||
return out;
|
||||
|
|
|
@ -141,7 +141,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
|
|||
if ((forOp = dyn_cast<AffineForOp>(&*it)) && copyNests.count(forOp) == 0) {
|
||||
// Perform the copying up unti this 'for' op first.
|
||||
(void)affineDataCopyGenerate(/*begin=*/curBegin, /*end=*/it, copyOptions,
|
||||
/*filterMemRef=*/llvm::None, copyNests);
|
||||
/*filterMemRef=*/std::nullopt, copyNests);
|
||||
|
||||
// Returns true if the footprint is known to exceed capacity.
|
||||
auto exceedsCapacity = [&](AffineForOp forOp) {
|
||||
|
@ -176,7 +176,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
|
|||
// loop's footprint fits.
|
||||
(void)affineDataCopyGenerate(/*begin=*/it, /*end=*/std::next(it),
|
||||
copyOptions,
|
||||
/*filterMemRef=*/llvm::None, copyNests);
|
||||
/*filterMemRef=*/std::nullopt, copyNests);
|
||||
}
|
||||
// Get to the next load or store op after 'forOp'.
|
||||
curBegin = std::find_if(std::next(it), block->end(), [&](Operation &op) {
|
||||
|
@ -200,7 +200,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
|
|||
// Exclude the affine.yield - hence, the std::prev.
|
||||
(void)affineDataCopyGenerate(/*begin=*/curBegin,
|
||||
/*end=*/std::prev(block->end()), copyOptions,
|
||||
/*filterMemRef=*/llvm::None, copyNests);
|
||||
/*filterMemRef=*/std::nullopt, copyNests);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -442,7 +442,7 @@ public:
|
|||
for (Block::iterator it = std::next(Block::iterator(srcNodeInst));
|
||||
it != Block::iterator(dstNodeInst); ++it) {
|
||||
Operation *op = &(*it);
|
||||
if (srcDepInsts.count(op) > 0 && firstSrcDepPos == None)
|
||||
if (srcDepInsts.count(op) > 0 && firstSrcDepPos == std::nullopt)
|
||||
firstSrcDepPos = pos;
|
||||
if (dstDepInsts.count(op) > 0)
|
||||
lastDstDepPos = pos;
|
||||
|
|
|
@ -51,8 +51,8 @@ struct LoopUnroll : public impl::AffineLoopUnrollBase<LoopUnroll> {
|
|||
|
||||
= default;
|
||||
explicit LoopUnroll(
|
||||
Optional<unsigned> unrollFactor = None, bool unrollUpToFactor = false,
|
||||
bool unrollFull = false,
|
||||
Optional<unsigned> unrollFactor = std::nullopt,
|
||||
bool unrollUpToFactor = false, bool unrollFull = false,
|
||||
const std::function<unsigned(AffineForOp)> &getUnrollFactor = nullptr)
|
||||
: getUnrollFactor(getUnrollFactor) {
|
||||
if (unrollFactor)
|
||||
|
@ -145,6 +145,6 @@ std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopUnrollPass(
|
|||
int unrollFactor, bool unrollUpToFactor, bool unrollFull,
|
||||
const std::function<unsigned(AffineForOp)> &getUnrollFactor) {
|
||||
return std::make_unique<LoopUnroll>(
|
||||
unrollFactor == -1 ? None : Optional<unsigned>(unrollFactor),
|
||||
unrollFactor == -1 ? std::nullopt : Optional<unsigned>(unrollFactor),
|
||||
unrollUpToFactor, unrollFull, getUnrollFactor);
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ namespace {
|
|||
/// outer loop in a Function.
|
||||
struct LoopUnrollAndJam
|
||||
: public impl::AffineLoopUnrollAndJamBase<LoopUnrollAndJam> {
|
||||
explicit LoopUnrollAndJam(Optional<unsigned> unrollJamFactor = None) {
|
||||
explicit LoopUnrollAndJam(Optional<unsigned> unrollJamFactor = std::nullopt) {
|
||||
if (unrollJamFactor)
|
||||
this->unrollJamFactor = *unrollJamFactor;
|
||||
}
|
||||
|
@ -73,7 +73,8 @@ struct LoopUnrollAndJam
|
|||
std::unique_ptr<OperationPass<func::FuncOp>>
|
||||
mlir::createLoopUnrollAndJamPass(int unrollJamFactor) {
|
||||
return std::make_unique<LoopUnrollAndJam>(
|
||||
unrollJamFactor == -1 ? None : Optional<unsigned>(unrollJamFactor));
|
||||
unrollJamFactor == -1 ? std::nullopt
|
||||
: Optional<unsigned>(unrollJamFactor));
|
||||
}
|
||||
|
||||
void LoopUnrollAndJam::runOnOperation() {
|
||||
|
|
|
@ -599,7 +599,7 @@ makePattern(const DenseSet<Operation *> ¶llelLoops, int vectorRank,
|
|||
For(isVectorizableLoopPtrFactory(parallelLoops, d1),
|
||||
For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
|
||||
default: {
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -235,7 +235,7 @@ Optional<SmallVector<Value, 8>> mlir::expandAffineMap(OpBuilder &builder,
|
|||
}));
|
||||
if (llvm::all_of(expanded, [](Value v) { return v; }))
|
||||
return expanded;
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Promotes the `then` or the `else` block of `ifOp` (depending on whether
|
||||
|
|
|
@ -225,7 +225,7 @@ void arith::AddIOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
|
|||
Optional<SmallVector<int64_t, 4>> arith::AddUICarryOp::getShapeForUnroll() {
|
||||
if (auto vt = getType(0).dyn_cast<VectorType>())
|
||||
return llvm::to_vector<4>(vt.getShape());
|
||||
return None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Returns the carry bit, assuming that `sum` is the result of addition of
|
||||
|
@ -1509,7 +1509,7 @@ static Optional<int64_t> getIntegerWidth(Type t) {
|
|||
if (auto vectorIntType = t.dyn_cast<VectorType>()) {
|
||||
return vectorIntType.getElementType().cast<IntegerType>().getWidth();
|
||||
}
|
||||
return llvm::None;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
OpFoldResult arith::CmpIOp::fold(ArrayRef<Attribute> operands) {
|
||||
|
|
|
@ -1064,7 +1064,7 @@ arith::WideIntEmulationConverter::WideIntEmulationConverter(
|
|||
if (width == 2 * maxIntWidth)
|
||||
return VectorType::get(2, IntegerType::get(ty.getContext(), maxIntWidth));
|
||||
|
||||
return None;
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
// Vector case.
|
||||
|
@ -1085,7 +1085,7 @@ arith::WideIntEmulationConverter::WideIntEmulationConverter(
|
|||
IntegerType::get(ty.getContext(), maxIntWidth));
|
||||
}
|
||||
|
||||
return None;
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
// Function case.
|
||||
|
@ -1094,11 +1094,11 @@ arith::WideIntEmulationConverter::WideIntEmulationConverter(
|
|||
// (i2N, i2N) -> i2N --> (vector<2xiN>, vector<2xiN>) -> vector<2xiN>
|
||||
SmallVector<Type> inputs;
|
||||
if (failed(convertTypes(ty.getInputs(), inputs)))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
SmallVector<Type> results;
|
||||
if (failed(convertTypes(ty.getResults(), results)))
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
return FunctionType::get(ty.getContext(), inputs, results);
|
||||
});
|
||||
|
|
|
@ -342,7 +342,7 @@ void FuncOp::build(OpBuilder &builder, OperationState &state, StringRef name,
|
|||
return;
|
||||
assert(type.getNumInputs() == argAttrs.size());
|
||||
function_interface_impl::addArgAndResultAttrs(builder, state, argAttrs,
|
||||
/*resultAttrs=*/llvm::None);
|
||||
/*resultAttrs=*/std::nullopt);
|
||||
}
|
||||
|
||||
ParseResult FuncOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||
|
|
|
@ -223,7 +223,7 @@ static CoroMachinery setupCoroMachinery(func::FuncOp func) {
|
|||
machinery.returnValues = retValues;
|
||||
machinery.coroHandle = coroHdlOp.getHandle();
|
||||
machinery.entry = entryBlock;
|
||||
machinery.setError = None; // created lazily only if needed
|
||||
machinery.setError = std::nullopt; // created lazily only if needed
|
||||
machinery.cleanup = cleanupBlock;
|
||||
machinery.suspend = suspendBlock;
|
||||
return machinery;
|
||||
|
|
|
@ -371,7 +371,8 @@ private:
|
|||
// parent operation. In this case, we have to introduce an additional clone
|
||||
// for buffer that is passed to the argument.
|
||||
SmallVector<RegionSuccessor, 2> successorRegions;
|
||||
regionInterface.getSuccessorRegions(/*index=*/llvm::None, successorRegions);
|
||||
regionInterface.getSuccessorRegions(/*index=*/std::nullopt,
|
||||
successorRegions);
|
||||
auto *it =
|
||||
llvm::find_if(successorRegions, [&](RegionSuccessor &successorRegion) {
|
||||
return successorRegion.getSuccessor() == argRegion;
|
||||
|
|
|
@ -131,7 +131,7 @@ bool BufferPlacementTransformationBase::isLoop(Operation *op) {
|
|||
|
||||
// Start with all entry regions and test whether they induce a loop.
|
||||
SmallVector<RegionSuccessor, 2> successorRegions;
|
||||
regionInterface.getSuccessorRegions(/*index=*/llvm::None, successorRegions);
|
||||
regionInterface.getSuccessorRegions(/*index=*/std::nullopt, successorRegions);
|
||||
for (RegionSuccessor ®ionEntry : successorRegions) {
|
||||
if (recurse(regionEntry.getSuccessor()))
|
||||
return true;
|
||||
|
|
|
@ -82,7 +82,8 @@ void BufferViewFlowAnalysis::build(Operation *op) {
|
|||
op->walk([&](RegionBranchOpInterface regionInterface) {
|
||||
// Extract all entry regions and wire all initial entry successor inputs.
|
||||
SmallVector<RegionSuccessor, 2> entrySuccessors;
|
||||
regionInterface.getSuccessorRegions(/*index=*/llvm::None, entrySuccessors);
|
||||
regionInterface.getSuccessorRegions(/*index=*/std::nullopt,
|
||||
entrySuccessors);
|
||||
for (RegionSuccessor &entrySuccessor : entrySuccessors) {
|
||||
// Wire the entry region's successor arguments with the initial
|
||||
// successor inputs.
|
||||
|
|
|
@ -210,7 +210,7 @@ struct OneShotBufferizePass
|
|||
opt.functionBoundaryTypeConversion =
|
||||
parseLayoutMapOption(functionBoundaryTypeConversion);
|
||||
if (mustInferMemorySpace)
|
||||
opt.defaultMemorySpace = None;
|
||||
opt.defaultMemorySpace = std::nullopt;
|
||||
opt.printConflicts = printConflicts;
|
||||
opt.testAnalysisOnly = testAnalysisOnly;
|
||||
opt.bufferizeFunctionBoundaries = bufferizeFunctionBoundaries;
|
||||
|
|
|
@ -132,12 +132,12 @@ static Optional<int64_t> getEquivalentFuncArgIdx(FuncOp funcOp,
|
|||
auto funcOpIt = state.equivalentFuncArgs.find(funcOp);
|
||||
if (funcOpIt == state.equivalentFuncArgs.end())
|
||||
// No equivalence info stores for funcOp.
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
auto retValIt = funcOpIt->getSecond().find(returnValIdx);
|
||||
if (retValIt == funcOpIt->getSecond().end())
|
||||
// Return value has no equivalent bbArg.
|
||||
return None;
|
||||
return std::nullopt;
|
||||
|
||||
return retValIt->getSecond();
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ struct CallOpInterface
|
|||
SmallVector<Value> replacementValues(numResults, Value());
|
||||
// For non-tensor results: A mapping from return val indices of the old
|
||||
// CallOp to return val indices of the bufferized CallOp.
|
||||
SmallVector<Optional<unsigned>> retValMapping(numResults, None);
|
||||
SmallVector<Optional<unsigned>> retValMapping(numResults, std::nullopt);
|
||||
// Operands of the bufferized CallOp.
|
||||
SmallVector<Value> newOperands(numOperands, Value());
|
||||
|
||||
|
|
|
@ -480,7 +480,7 @@ bool canUseOpDominance(const DenseSet<OpOperand *> &usesRead,
|
|||
const DenseSet<OpOperand *> &usesWrite,
|
||||
const AnalysisState &state) {
|
||||
const BufferizationOptions &options = state.getOptions();
|
||||
Optional<Region *> commonEnclosingRegion = None;
|
||||
Optional<Region *> commonEnclosingRegion = std::nullopt;
|
||||
|
||||
// In case of a write, take the region in which the write takes place.
|
||||
for (OpOperand *uWrite : usesWrite) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue