build: update llvm tag to 7ccbb4df (#1736)

Summary of changes:

 - LLVM now includes <optional> instead of "llvm/ADT/Optional.h" in most
   (although not all) places
   (https://reviews.llvm.org/rG541ef3d61e9341cd38420c0dbca9250c4d0ea04c).
   This patch replaces the affected instances of `llvm::Optional` with
   `std::optional`.

 - In the usages of llvm::Optional that remain, llvm::Optional::value()
   is deprecated, so this patch replaces them with a dereference.
pull/1680/head snapshot-20221220.693
Tanyo Kwok 2022-12-20 18:17:27 +08:00 committed by GitHub
parent f6b6069a34
commit 577e38da58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 187 additions and 186 deletions

@ -1 +1 @@
Subproject commit 3a020527c2af10741b12e756de45bd6f774885a4
Subproject commit 7ccbb4dff10efe6c26219204e361ddb0264938b8

2
externals/mlir-hlo vendored

@ -1 +1 @@
Subproject commit 8df20065b22be628f2d365c387200df7d02b80c1
Subproject commit 8c703fabd60d4447bc86f432446e9ad0eacab600

View File

@ -17,43 +17,43 @@ namespace mlir {
namespace tosa {
// Lowers ReduceAll to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceAllOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims);
// Lowers ReduceAny to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceAnyOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims);
// Lowers ReduceMin to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceMinOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims);
// Lowers ReduceMax to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceMaxOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims);
// Lowers ReduceProd to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceProdOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims);
// Lowers ReduceSum to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceSumOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims);
// Lowers ReduceMean to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceMeanOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims);

View File

@ -50,7 +50,7 @@ Value getTosaConstTensorSingleF32(PatternRewriter &rewriter, Operation *op,
// Default template creates a constant tensor in T.
// To create INT48 TOSA constant, need to pass in llvm::APInt instead.
template <typename T>
llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
std::optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
ArrayRef<T> vec, ArrayRef<int64_t> shape);
LogicalResult tosaCastTensorToType(PatternRewriter &rewriter, Operation *op,

View File

@ -82,9 +82,8 @@ SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
// Convert a scalar value to the target type. The scalar value can be an element
// from a tensor or a scalar in the pytorch dialect. Both the scalar and dtype
// should be converted builtin types.
Value convertScalarToDtype(
OpBuilder &b, Location loc, Value scalar, Type dtype,
llvm::Optional<Type> srcOriginalDtype = std::nullopt);
Value convertScalarToDtype(OpBuilder &b, Location loc, Value scalar, Type dtype,
std::optional<Type> srcOriginalDtype = std::nullopt);
} // namespace Torch
} // namespace torch

View File

@ -174,11 +174,11 @@ m_TorchListOfConstantInts(SmallVectorImpl<int64_t> &bind_values) {
namespace detail {
/// Matches the optional constant integers stored in a `torch.ListConstruct`.
struct torch_list_of_optional_constant_ints_op_binder {
SmallVectorImpl<Optional<int64_t>> &bind_values;
SmallVectorImpl<std::optional<int64_t>> &bind_values;
/// Creates a matcher instance that binds the value to bvs if match succeeds.
torch_list_of_optional_constant_ints_op_binder(
SmallVectorImpl<Optional<int64_t>> &bvs)
SmallVectorImpl<std::optional<int64_t>> &bvs)
: bind_values(bvs) {}
bool match(Operation *op) {
@ -203,7 +203,7 @@ struct torch_list_of_optional_constant_ints_op_binder {
/// `torch.prim.ListConstruct`.
inline detail::torch_list_of_optional_constant_ints_op_binder
m_TorchListOfOptionalConstantInts(
SmallVectorImpl<Optional<int64_t>> &bind_values) {
SmallVectorImpl<std::optional<int64_t>> &bind_values) {
return detail::torch_list_of_optional_constant_ints_op_binder(bind_values);
}

View File

@ -31,8 +31,8 @@ class ValueTensorType;
/// Common getter function signature that covers all tensor types.
/// Used for sharing code between NonValueTensorType and ValueTensorType.
using GetTensorTypeFn =
llvm::function_ref<Type(MLIRContext *, Optional<ArrayRef<int64_t>>, Type)>;
using GetTensorTypeFn = llvm::function_ref<Type(
MLIRContext *, std::optional<ArrayRef<int64_t>>, Type)>;
/// The representation of an unknown dimension size in an ArrayRef<int64_t>.
constexpr static int64_t kUnknownSize = -1;
@ -45,7 +45,7 @@ public:
///
/// It is expected that for many users, `hasSizes`/`getSizes` will be a more
/// convenient API.
Optional<ArrayRef<int64_t>> getOptionalSizes() const;
std::optional<ArrayRef<int64_t>> getOptionalSizes() const;
/// Get the raw nullable Type representing the dtype of this tensor type.
///
@ -90,7 +90,7 @@ public:
/// Return a type of the same kind as this one, but with given raw optional
/// sizes and raw optional dtype.
Type getWithSizesAndDtype(Optional<ArrayRef<int64_t>> optionalSizes,
Type getWithSizesAndDtype(std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalDtype) const;
/// Return a type with the same shape and dtype as this one, but with
@ -127,7 +127,8 @@ namespace mlir {
namespace torch {
namespace Torch {
inline Optional<ArrayRef<int64_t>> BaseTensorType::getOptionalSizes() const {
inline std::optional<ArrayRef<int64_t>>
BaseTensorType::getOptionalSizes() const {
if (auto tensor = dyn_cast<NonValueTensorType>())
return tensor.getOptionalSizes();
if (auto tensor = dyn_cast<ValueTensorType>())

View File

@ -47,7 +47,7 @@ def Torch_NnModuleType : Torch_Type<"NnModule", "nn.Module"> {
// For standard ArrayRefs, which require allocation.
class OptionalArrayRefTorchParameter<string arrayOf, string desc = ""> :
AttrOrTypeParameter<
"::llvm::Optional<::llvm::ArrayRef<" # arrayOf # ">>", desc> {
"::std::optional<::llvm::ArrayRef<" # arrayOf # ">>", desc> {
let allocator = [{
if ($_self.has_value()) {
$_dst.value() = $_allocator.copyInto($_self.value());

View File

@ -23,7 +23,7 @@ bool getListConstructElements(Value v, SmallVectorImpl<Value> &elems);
/// Returns the index indicated by `v` for a list of given `length`.
/// If the index is negative, it is adjusted to `length` + `v`.
/// `None` is returned the index is not an integer in the range [0,`length).
llvm::Optional<int64_t> matchLegalConstantIndexIntoListOfSize(Value v,
std::optional<int64_t> matchLegalConstantIndexIntoListOfSize(Value v,
int64_t length);
torch_upstream::ScalarType getScalarTypeForType(Type type);
Type getTypeForScalarType(
@ -62,7 +62,7 @@ bool isBuiltInType(Type type);
// Helper funtion to get rank of `Base tensor type`.
// std::nullopt is returned if the tensorRank can't be determined.
Optional<unsigned> getTensorRank(Value tensor);
std::optional<unsigned> getTensorRank(Value tensor);
bool isViewLikeOp(Operation *op);

View File

@ -198,7 +198,7 @@ MlirType torchMlirTorchNonValueTensorTypeGet(MlirContext context,
intptr_t numSizes,
const int64_t *optionalSizes,
MlirType optionalDtype) {
Optional<ArrayRef<int64_t>> optionalSizesArrayRef = std::nullopt;
std::optional<ArrayRef<int64_t>> optionalSizesArrayRef = std::nullopt;
// if numSizes == -1, then it is unranked.
if (numSizes > -1)
optionalSizesArrayRef = llvm::makeArrayRef(optionalSizes, numSizes);
@ -232,7 +232,7 @@ MlirType torchMlirTorchValueTensorTypeGet(MlirContext context,
intptr_t numSizes,
const int64_t *optionalSizes,
MlirType optionalDtype) {
Optional<ArrayRef<int64_t>> optionalSizesArrayRef = std::nullopt;
std::optional<ArrayRef<int64_t>> optionalSizesArrayRef = std::nullopt;
// if numSizes == -1, then it is unranked.
if (numSizes > -1)
optionalSizesArrayRef = llvm::makeArrayRef(optionalSizes, numSizes);

View File

@ -393,7 +393,7 @@ public:
// is violated for the cases of dynamic dimensions.
SmallVector<int64_t> outputShape(resultRank, kUnknownSize);
SmallVector<ReassociationIndices> unchangedDims;
llvm::Optional<int64_t> inferredDimension;
std::optional<int64_t> inferredDimension;
for (auto en : llvm::enumerate(outputSizeTorchInt)) {
int64_t inputDim;
int64_t size;
@ -644,8 +644,8 @@ public:
makeShapeLLVMCompatible(inputShapeVec), resultType.getElementType());
Value castedInput =
rewriter.create<tensor::CastOp>(loc, adjustedInputType, input);
llvm::Optional<Value> expandedInput;
llvm::Optional<Value> collapsedInput;
std::optional<Value> expandedInput;
std::optional<Value> collapsedInput;
if (llvm::any_of(inputAssociations, [](ReassociationIndices indices) {
return indices.size() > 1;

View File

@ -49,7 +49,7 @@ Value getMhloConstTensorSingleF64(PatternRewriter &rewriter, Operation *op,
// T: storage C type.
// Default template creates a constant tensor in T.
template <typename T>
llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
std::optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
ArrayRef<T> vec, ArrayRef<int64_t> shape) {
uint64_t num_total_elements = 1;
for (int64_t a : shape) {
@ -72,7 +72,7 @@ llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
// Template specialization for APInt
template <>
llvm::Optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,
std::optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,
Operation *op, ArrayRef<APInt> vec,
ArrayRef<int64_t> shape) {
uint64_t num_total_elements = 1;
@ -95,7 +95,7 @@ llvm::Optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,
// Template specialization for float
template <>
llvm::Optional<Value> getConstTensor<float>(PatternRewriter &rewriter,
std::optional<Value> getConstTensor<float>(PatternRewriter &rewriter,
Operation *op, ArrayRef<float> vec,
ArrayRef<int64_t> shape) {
uint64_t num_total_elements = 1;
@ -117,9 +117,9 @@ llvm::Optional<Value> getConstTensor<float>(PatternRewriter &rewriter,
}
template <>
llvm::Optional<Value>
getConstTensor<double>(PatternRewriter &rewriter, Operation *op,
ArrayRef<double> vec, ArrayRef<int64_t> shape) {
std::optional<Value> getConstTensor<double>(PatternRewriter &rewriter,
Operation *op, ArrayRef<double> vec,
ArrayRef<int64_t> shape) {
uint64_t num_total_elements = 1;
for (int64_t a : shape) {
num_total_elements *= a;
@ -139,12 +139,12 @@ getConstTensor<double>(PatternRewriter &rewriter, Operation *op,
}
// Template instantiation
template llvm::Optional<Value> getConstTensor<int32_t>(PatternRewriter &,
template std::optional<Value> getConstTensor<int32_t>(PatternRewriter &,
Operation *,
ArrayRef<int32_t> vec,
ArrayRef<int64_t> shape);
template llvm::Optional<Value> getConstTensor<int64_t>(PatternRewriter &,
template std::optional<Value> getConstTensor<int64_t>(PatternRewriter &,
Operation *,
ArrayRef<int64_t> vec,
ArrayRef<int64_t> shape);

View File

@ -35,7 +35,7 @@ Value getMhloConstTensorSingleF64(PatternRewriter &rewriter, Operation *op,
// Default template creates a constant tensor in T.
// To create INT48 MHLO constant, need to pass in llvm::APInt instead.
template <typename T>
llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
std::optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
ArrayRef<T> vec, ArrayRef<int64_t> shape);
template <typename T>

View File

@ -71,7 +71,7 @@ static Value createInitialValueForReduceOp(Operation *op, Type elementTy,
}
// Util for converting AtenArgmaxOp and AtenMaxDimOp
static llvm::Optional<ValueRange>
static std::optional<ValueRange>
getMaxInDim(ConversionPatternRewriter &rewriter, Operation *op, Value &input,
ArrayRef<Value> inputShapeVec, int64_t dim,
size_t dimSizeIndexBits) {

View File

@ -109,9 +109,9 @@ Value getDynamicSliceInternal(PatternRewriter &rewriter, Operation *op,
// endIndex(default to dimSize), and step(default to 1) can be optional.
FailureOr<Value> getDynamicSlice(PatternRewriter &rewriter, Operation *op,
Type outTy, Value input,
llvm::Optional<Value> startIndexOpt,
llvm::Optional<Value> endIndexOpt,
llvm::Optional<Value> stepOpt, int64_t dim,
std::optional<Value> startIndexOpt,
std::optional<Value> endIndexOpt,
std::optional<Value> stepOpt, int64_t dim,
size_t dimSizeIndexBits) {
auto loc = op->getLoc();
auto inputTy = input.getType().dyn_cast<RankedTensorType>();
@ -267,7 +267,7 @@ LogicalResult ConvertAtenOp<AtenSliceTensorOp>::matchAndRewrite(
return rewriter.notifyMatchFailure(
op, "only constant dim is currently supported");
auto getOptionalVal = [&](Value val) -> llvm::Optional<Value> {
auto getOptionalVal = [&](Value val) -> std::optional<Value> {
if (val.getType().isa<Torch::NoneType>()) {
return std::nullopt;
} else {
@ -275,9 +275,9 @@ LogicalResult ConvertAtenOp<AtenSliceTensorOp>::matchAndRewrite(
}
};
llvm::Optional<Value> start = getOptionalVal(adaptor.getStart());
llvm::Optional<Value> end = getOptionalVal(adaptor.getEnd());
llvm::Optional<Value> step = getOptionalVal(adaptor.getStep());
std::optional<Value> start = getOptionalVal(adaptor.getStart());
std::optional<Value> end = getOptionalVal(adaptor.getEnd());
std::optional<Value> step = getOptionalVal(adaptor.getStep());
FailureOr<Value> sliceInfo =
getDynamicSlice(rewriter, op, outTy, self, start, end, step, dim,

View File

@ -279,7 +279,7 @@ public:
// to i32 as required for the scatter op.
// 2.) `values` is mapped to `updates` in scatter op.
// 3.) `input` is mapped to `original` in scatter op.
Optional<unsigned> indexTensorRank = getTensorRank(indexTensor);
std::optional<unsigned> indexTensorRank = getTensorRank(indexTensor);
if (!indexTensorRank || *indexTensorRank != 1)
return rewriter.notifyMatchFailure(
op, "unimplemented: index tensor with rank != 1 is not supported");

View File

@ -595,7 +595,7 @@ LogicalResult ConvertAtenOp<AtenReluOp>::matchAndRewrite(
return success();
}
using ReductionConvFunc = llvm::Optional<Value> (*)(PatternRewriter &,
using ReductionConvFunc = std::optional<Value> (*)(PatternRewriter &,
Operation *,
RankedTensorType, Value,
ElementsAttr, bool);
@ -642,7 +642,7 @@ public:
keepDims)))
return failure();
llvm::Optional<Value> result =
std::optional<Value> result =
ConversionFuncT(rewriter, op, outputTy, self, reduceDimsAttr, keepDims);
if (!result)
@ -1274,7 +1274,7 @@ public:
auto transposedLhsType = RankedTensorType::get(
makeShapeLLVMCompatible(transposedLhsShape), rhsElemTy);
llvm::Optional<Value> transposedLhsDimsConst =
std::optional<Value> transposedLhsDimsConst =
tosa::getConstTensor<int32_t>(
rewriter, op,
/*vec=*/transposedLhsDims,
@ -1354,7 +1354,7 @@ public:
auto transposedRhsValue = rankBroadcastedRhs;
if (rhsNeedsTranspose) {
llvm::Optional<Value> transposedRhsDimsConst =
std::optional<Value> transposedRhsDimsConst =
tosa::getConstTensor<int32_t>(
rewriter, op,
/*vec=*/transposedRhsDims,
@ -1510,7 +1510,7 @@ public:
if (opNeedsTranspose) {
llvm::Optional<Value> transposedOpShapeConst =
std::optional<Value> transposedOpShapeConst =
tosa::getConstTensor<int32_t>(
rewriter, op,
/*vec=*/transposedOpDims,
@ -1699,7 +1699,7 @@ public:
std::swap(transposedRhsShape[rhsRank - 1], transposedRhsShape[rhsRank - 2]);
std::swap(transposedRhsDims[rhsRank - 1], transposedRhsDims[rhsRank - 2]);
llvm::Optional<Value> transposedRhsShapeConst =
std::optional<Value> transposedRhsShapeConst =
tosa::getConstTensor<int32_t>(
rewriter, op,
/*vec=*/transposedRhsDims,
@ -1860,7 +1860,7 @@ LogicalResult ConvertAtenOp<AtenConvolutionOp>::matchAndRewrite(
"non-const dilation list unsupported");
// TOSA works in NHWC and takes OHWI weights. Perform the necessary transpose.
llvm::Optional<Value> nchwToNhwcTransposeConst =
std::optional<Value> nchwToNhwcTransposeConst =
tosa::getConstTensor<int32_t>(rewriter, op,
/*vec=*/{0, 2, 3, 1},
/*shape=*/{static_cast<int32_t>(4)});
@ -1920,7 +1920,7 @@ LogicalResult ConvertAtenOp<AtenConvolutionOp>::matchAndRewrite(
rewriter.getI64ArrayAttr(dilation))
.getResult();
llvm::Optional<Value> nhwcToNchwTransposeConst =
std::optional<Value> nhwcToNchwTransposeConst =
tosa::getConstTensor<int32_t>(rewriter, op,
/*vec=*/{0, 3, 1, 2},
/*shape=*/{static_cast<int32_t>(4)});
@ -3364,7 +3364,7 @@ public:
auto inputShape = makeShapeTorchCompatible(inputTy.getShape());
auto inputRank = inputTy.getRank();
llvm::Optional<Value> transposeDimsConst = tosa::getConstTensor<int32_t>(
std::optional<Value> transposeDimsConst = tosa::getConstTensor<int32_t>(
rewriter, op,
/*vec=*/transposeDims,
/*shape=*/{static_cast<int32_t>(inputRank)});

View File

@ -29,7 +29,7 @@ namespace tosa {
// Common function for lowering reduce operations to TOSA ops.
template <typename T>
llvm::Optional<Value> convertReduceOpCommon(
std::optional<Value> convertReduceOpCommon(
PatternRewriter &rewriter, Operation *op, RankedTensorType output_type,
Value input_value, ElementsAttr axes_elems, bool keep_dims,
Type reduce_element_type, bool is_quantized, double input_scale,
@ -94,7 +94,7 @@ llvm::Optional<Value> convertReduceOpCommon(
}
// Lowers ReduceAll to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceAllOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims) {
@ -109,7 +109,7 @@ convertReduceAllOp(PatternRewriter &rewriter, Operation *op,
}
// Lowers ReduceAny to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceAnyOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims) {
@ -124,7 +124,7 @@ convertReduceAnyOp(PatternRewriter &rewriter, Operation *op,
}
// Lowers ReduceMin to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceMinOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims) {
@ -139,7 +139,7 @@ convertReduceMinOp(PatternRewriter &rewriter, Operation *op,
}
// Lowers ReduceMax to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceMaxOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims) {
@ -154,7 +154,7 @@ convertReduceMaxOp(PatternRewriter &rewriter, Operation *op,
}
// Lowers ReduceProd to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceProdOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims) {
@ -180,7 +180,7 @@ convertReduceProdOp(PatternRewriter &rewriter, Operation *op,
}
// Lowers ReduceSum to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceSumOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims) {
@ -231,7 +231,7 @@ convertReduceSumOp(PatternRewriter &rewriter, Operation *op,
}
// Lowers ReduceMean to a sequence of TOSA ops.
llvm::Optional<Value>
std::optional<Value>
convertReduceMeanOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType output_type, Value input_value,
ElementsAttr axes_elems, bool keep_dims) {

View File

@ -153,7 +153,7 @@ Value getTosaConstTensorSingleF32(PatternRewriter &rewriter, Operation *op,
// T: storage C type.
// Default template creates a constant tensor in T.
template <typename T>
llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
std::optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
ArrayRef<T> vec, ArrayRef<int64_t> shape) {
uint64_t num_total_elements = 1;
for (int64_t a : shape) {
@ -176,7 +176,7 @@ llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
// Template specialization for APInt
template <>
llvm::Optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,
std::optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,
Operation *op, ArrayRef<APInt> vec,
ArrayRef<int64_t> shape) {
uint64_t num_total_elements = 1;
@ -200,7 +200,7 @@ llvm::Optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,
// Template specialization for float
template <>
llvm::Optional<Value> getConstTensor<float>(PatternRewriter &rewriter,
std::optional<Value> getConstTensor<float>(PatternRewriter &rewriter,
Operation *op, ArrayRef<float> vec,
ArrayRef<int64_t> shape) {
uint64_t num_total_elements = 1;
@ -254,7 +254,7 @@ LogicalResult tosaCastTensorToType(PatternRewriter &rewriter, Operation *op,
for (int64_t a : srcShape)
num_total_elements *= a;
llvm::Optional<Value> constOp;
std::optional<Value> constOp;
if (srcElemTy.isInteger(64)) {
SmallVector<int64_t> values(num_total_elements, 0);
constOp =
@ -283,12 +283,12 @@ LogicalResult tosaCastTensorToType(PatternRewriter &rewriter, Operation *op,
}
// Template instantiation
template llvm::Optional<Value> getConstTensor<int32_t>(PatternRewriter &,
template std::optional<Value> getConstTensor<int32_t>(PatternRewriter &,
Operation *,
ArrayRef<int32_t> vec,
ArrayRef<int64_t> shape);
template llvm::Optional<Value> getConstTensor<int64_t>(PatternRewriter &,
template std::optional<Value> getConstTensor<int64_t>(PatternRewriter &,
Operation *,
ArrayRef<int64_t> vec,
ArrayRef<int64_t> shape);

View File

@ -241,7 +241,7 @@ SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
// from a tensor or a scalar in the pytorch dialect. Both the scalar and dtype
// should be converted builtin types.
Value convertScalarToDtype(OpBuilder &b, Location loc, Value scalar, Type dtype,
llvm::Optional<Type> srcOriginalDtype) {
std::optional<Type> srcOriginalDtype) {
Type scalarType = scalar.getType();
if (scalarType == dtype)
return scalar;

View File

@ -136,7 +136,7 @@ static Value getScalarValue(Value input, Location loc,
}
Value scalar = nullptr;
if (auto valueTensorLiteralOp = input.getDefiningOp<ValueTensorLiteralOp>()) {
Optional<unsigned> tensorRank =
std::optional<unsigned> tensorRank =
getTensorRank(valueTensorLiteralOp.getResult());
if (valueTensorLiteralOp && tensorRank && *tensorRank == 0) {
auto tensorType =
@ -296,13 +296,13 @@ LogicalResult ClassTypeOp::verify() {
//===----------------------------------------------------------------------===//
OperandRange
PrimLoopOp::getSuccessorEntryOperands(Optional<unsigned int> index) {
PrimLoopOp::getSuccessorEntryOperands(std::optional<unsigned int> index) {
assert(index.has_value() && index.value() == 0);
return getIterArgsInit();
}
void PrimLoopOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
(void)operands;
@ -324,8 +324,8 @@ bool PrimLoopOp::isForLike() {
// PrimLoopConditionOp
//===----------------------------------------------------------------------===//
MutableOperandRange
PrimLoopConditionOp::getMutableSuccessorOperands(Optional<unsigned> index) {
MutableOperandRange PrimLoopConditionOp::getMutableSuccessorOperands(
std::optional<unsigned> index) {
// Pass all operands except the condition to the successor which is the
// parent loop op.
return getIterArgsMutable();
@ -374,7 +374,7 @@ void PrimIfOp::print(OpAsmPrinter &p) {
p.printOptionalAttrDict((*this)->getAttrs());
}
void PrimIfOp::getSuccessorRegions(Optional<unsigned> index,
void PrimIfOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `then` and the `else` region branch back to the parent operation.
@ -1022,9 +1022,9 @@ void AtenDivTensorModeOp::getCanonicalizationPatterns(
// dimension size or returns failure if such a type was not found. If `dim` is
// `None`, then all dimension's sizes must be known.
static FailureOr<BaseTensorType>
traceKnownSizeTensorType(Value value, llvm::Optional<int64_t> dim) {
traceKnownSizeTensorType(Value value, std::optional<int64_t> dim) {
// Function to check if we found a type that contains the queried information.
auto foundType = [](BaseTensorType tensorType, llvm::Optional<int64_t>(dim)) {
auto foundType = [](BaseTensorType tensorType, std::optional<int64_t>(dim)) {
if (!tensorType.hasSizes())
return false;
@ -1386,7 +1386,7 @@ void AtenSortIntOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
//===----------------------------------------------------------------------===//
LogicalResult NonValueTensorLiteralOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
auto attr = attributes.get("value").dyn_cast_or_null<ElementsAttr>();
@ -1426,7 +1426,7 @@ bool NonValueTensorLiteralOp::isCompatibleReturnTypes(TypeRange inferred,
//===----------------------------------------------------------------------===//
LogicalResult ValueTensorLiteralOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
auto attr = attributes.get("value").dyn_cast_or_null<ElementsAttr>();
@ -1500,7 +1500,7 @@ LogicalResult CopyToNonValueTensorOp::verify() {
}
LogicalResult CopyToNonValueTensorOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
auto resultType = operands[0].getType().cast<ValueTensorType>();
@ -1527,7 +1527,7 @@ LogicalResult CopyToValueTensorOp::verify() {
}
LogicalResult CopyToValueTensorOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
auto resultType = operands[0].getType().cast<NonValueTensorType>();
@ -1714,7 +1714,7 @@ void Aten__Getitem__TOp::getCanonicalizationPatterns(
return failure();
// Get the index, but be careful because it might be statically invalid.
llvm::Optional<int64_t> indexOpt = matchLegalConstantIndexIntoListOfSize(
std::optional<int64_t> indexOpt = matchLegalConstantIndexIntoListOfSize(
op.getOperand(1), listConstruct.getNumOperands());
if (!indexOpt)
return rewriter.notifyMatchFailure(op, "statically invalid index");
@ -2326,7 +2326,7 @@ OpFoldResult PrimMinSelfIntOp::fold(ArrayRef<Attribute> operands) {
template <typename CalculateOp>
static void
getSuccessorRegionsForCalculateOp(CalculateOp op, Optional<unsigned> index,
getSuccessorRegionsForCalculateOp(CalculateOp op, std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (!index.has_value()) {
@ -2345,7 +2345,7 @@ getSuccessorRegionsForCalculateOp(CalculateOp op, Optional<unsigned> index,
}
void ShapeCalculateOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, operands, regions);
}
@ -2355,7 +2355,7 @@ void ShapeCalculateOp::getSuccessorRegions(
//===----------------------------------------------------------------------===//
void DtypeCalculateOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, operands, regions);
}
@ -2365,7 +2365,7 @@ void DtypeCalculateOp::getSuccessorRegions(
//===----------------------------------------------------------------------===//
MutableOperandRange ShapeCalculateYieldShapesOp::getMutableSuccessorOperands(
Optional<unsigned> index) {
std::optional<unsigned> index) {
// The shape operands don't get forwarded to the body.
// MutableOperandRange always has an owning operation, even if empty, so
// create a 0-length range.
@ -2384,7 +2384,7 @@ LogicalResult ShapeCalculateYieldShapesOp::verify() {
//===----------------------------------------------------------------------===//
MutableOperandRange DtypeCalculateYieldDtypesOp::getMutableSuccessorOperands(
Optional<unsigned> index) {
std::optional<unsigned> index) {
// The dtype operands don't get forwarded to the body.
// MutableOperandRange always has an owning operation, even if empty, so
// create a 0-length range.

View File

@ -86,7 +86,7 @@ bool Torch::isValidSubtype(Type subtype, Type type) {
//===----------------------------------------------------------------------===//
// Parse the `<T1, T2, T3>` of a type such as `!torch.tuple<T1, T2, T3>`.
static Optional<SmallVector<Type>>
static std::optional<SmallVector<Type>>
parseMultipleContainedTypes(AsmParser &parser) {
if (parser.parseLess())
return std::nullopt;
@ -185,7 +185,7 @@ Type BaseTensorType::getWithSizesAndDtypeFrom(BaseTensorType other) const {
}
Type BaseTensorType::getWithSizesAndDtype(
Optional<ArrayRef<int64_t>> optionalSizes, Type optionalDtype) const {
std::optional<ArrayRef<int64_t>> optionalSizes, Type optionalDtype) const {
if (isa<NonValueTensorType>())
return NonValueTensorType::get(getContext(), optionalSizes, optionalDtype);
if (isa<ValueTensorType>())
@ -203,7 +203,7 @@ ValueTensorType BaseTensorType::getWithValueSemantics() const {
static LogicalResult
verifyTensorType(function_ref<InFlightDiagnostic()> emitError,
Optional<ArrayRef<int64_t>> optionalSizes,
std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalDtype) {
if (optionalDtype && !isValidTorchDtype(optionalDtype)) {
emitError() << "invalid dtype " << optionalDtype
@ -274,7 +274,7 @@ Type parseTensorType(MLIRContext *context, AsmParser &parser,
}
if (parser.parseGreater())
return Type();
Optional<ArrayRef<int64_t>> optionalSizes;
std::optional<ArrayRef<int64_t>> optionalSizes;
if (hasSizes)
optionalSizes.emplace(sizes);
@ -286,7 +286,7 @@ Type parseTensorType(MLIRContext *context, AsmParser &parser,
}
static void printTensorType(AsmPrinter &printer,
Optional<ArrayRef<int64_t>> optionalSizes,
std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalDtype) {
if (!optionalSizes && !optionalDtype)
return;
@ -331,7 +331,7 @@ NonValueTensorType::getWithLeastStaticInformation(MLIRContext *context) {
LogicalResult
NonValueTensorType::verify(function_ref<InFlightDiagnostic()> emitError,
Optional<ArrayRef<int64_t>> optionalSizes,
std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalDtype) {
return verifyTensorType(emitError, optionalSizes, optionalDtype);
}
@ -340,7 +340,7 @@ Type NonValueTensorType::parse(AsmParser &parser) {
MLIRContext *context = parser.getContext();
return parseTensorType(
context, parser,
[](MLIRContext *context, Optional<ArrayRef<int64_t>> optionalSizes,
[](MLIRContext *context, std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalType) {
return NonValueTensorType::get(context, optionalSizes, optionalType);
});
@ -392,7 +392,7 @@ TensorType ValueTensorType::toBuiltinTensor() const {
LogicalResult
ValueTensorType::verify(function_ref<InFlightDiagnostic()> emitError,
Optional<ArrayRef<int64_t>> optionalSizes,
std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalDtype) {
return verifyTensorType(emitError, optionalSizes, optionalDtype);
}
@ -401,7 +401,7 @@ Type ValueTensorType::parse(AsmParser &parser) {
MLIRContext *context = parser.getContext();
return parseTensorType(
context, parser,
[](MLIRContext *context, Optional<ArrayRef<int64_t>> optionalSizes,
[](MLIRContext *context, std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalType) {
return ValueTensorType::get(context, optionalSizes, optionalType);
});

View File

@ -69,7 +69,7 @@ static Type computeReductionType(PatternRewriter &rewriter, Operation *op,
}
Type resultType = tensorType.getWithSizesAndDtype(
sizes.size() == 0 ? Optional<ArrayRef<int64_t>>()
sizes.size() == 0 ? std::optional<ArrayRef<int64_t>>()
: llvm::makeArrayRef(sizes),
tensorType.getDtype());
return resultType;
@ -105,7 +105,7 @@ static Value createMaxAlongDimension(PatternRewriter &rewriter, Location loc,
BaseTensorType indexType =
valueType
.getWithSizesAndDtype(
!valueType.hasSizes() ? Optional<ArrayRef<int64_t>>()
!valueType.hasSizes() ? std::optional<ArrayRef<int64_t>>()
: llvm::makeArrayRef(valueType.getSizes()),
IntegerType::get(op->getContext(), 64, IntegerType::Signed))
.cast<BaseTensorType>();
@ -228,7 +228,7 @@ public:
Location loc = op.getLoc();
Value self = op.getSelf();
MLIRContext *context = op.getContext();
Optional<unsigned> maybeRank = getTensorRank(self);
std::optional<unsigned> maybeRank = getTensorRank(self);
if (!maybeRank)
return rewriter.notifyMatchFailure(op, "Unimplemented: unranked tensor");
unsigned rank = *maybeRank;
@ -548,7 +548,7 @@ public:
BaseTensorType inputType = input.getType().cast<BaseTensorType>();
BaseTensorType indicesTensorType = result.getType().cast<BaseTensorType>();
Optional<unsigned> maybeInputRank = getTensorRank(input);
std::optional<unsigned> maybeInputRank = getTensorRank(input);
if (!maybeInputRank) {
return rewriter.notifyMatchFailure(
op, "expected input tensor to have a rank");
@ -681,8 +681,8 @@ public:
Value lhs = op.getSelf();
Value rhs = op.getOther();
Optional<unsigned> maybeLhsRank = getTensorRank(lhs);
Optional<unsigned> maybeRhsRank = getTensorRank(rhs);
std::optional<unsigned> maybeLhsRank = getTensorRank(lhs);
std::optional<unsigned> maybeRhsRank = getTensorRank(rhs);
if (!maybeLhsRank || !maybeRhsRank) {
return rewriter.notifyMatchFailure(
op, "expected input tensors to have a rank");
@ -786,7 +786,7 @@ public:
LogicalResult matchAndRewrite(AtenTOp op,
PatternRewriter &rewriter) const override {
Value lhs = op.getSelf();
Optional<unsigned> lhsRank = getTensorRank(lhs);
std::optional<unsigned> lhsRank = getTensorRank(lhs);
auto loc = op.getLoc();
if (!lhsRank) {
@ -861,7 +861,7 @@ public:
loc, listType, llvm::ArrayRef<Value>{slice0, slice1});
return rewriter.create<AtenCatOp>(loc, self.getType(), slices, dim);
};
Optional<unsigned> maybeRank = getTensorRank(self);
std::optional<unsigned> maybeRank = getTensorRank(self);
if (!maybeRank)
return rewriter.notifyMatchFailure(op, "Unimplemented: unranked tensor");
unsigned rank = *maybeRank;
@ -917,7 +917,7 @@ public:
Location loc = op.getLoc();
Value self = op.getSelf();
MLIRContext *context = op.getContext();
Optional<unsigned> maybeRank = getTensorRank(self);
std::optional<unsigned> maybeRank = getTensorRank(self);
if (!maybeRank)
return rewriter.notifyMatchFailure(op, "Unimplemented: unranked tensor");
unsigned rank = *maybeRank;
@ -1020,7 +1020,7 @@ public:
Location loc = op.getLoc();
Value self = op.getSelf();
MLIRContext *context = op.getContext();
Optional<unsigned> maybeRank = getTensorRank(self);
std::optional<unsigned> maybeRank = getTensorRank(self);
if (!maybeRank)
return rewriter.notifyMatchFailure(op, "unimplemented: unranked tensor");
unsigned rank = *maybeRank;
@ -1258,7 +1258,7 @@ public:
Location loc = op.getLoc();
MLIRContext *context = op.getContext();
Value gradOutput = op.getGradOutput();
Optional<unsigned> maybeGradRank = getTensorRank(gradOutput);
std::optional<unsigned> maybeGradRank = getTensorRank(gradOutput);
if (!maybeGradRank) {
return rewriter.notifyMatchFailure(op,
"expected grad output to have a rank");
@ -1410,8 +1410,8 @@ public:
Value input = op.getSelf();
Value mat1 = op.getMat1();
Value mat2 = op.getMat2();
Optional<unsigned> mat1Rank = getTensorRank(mat1);
Optional<unsigned> mat2Rank = getTensorRank(mat2);
std::optional<unsigned> mat1Rank = getTensorRank(mat1);
std::optional<unsigned> mat2Rank = getTensorRank(mat2);
// The operands `mat1`, `mat2` to aten.addmm must be of rank 2.
if (!mat1Rank || !mat2Rank || *mat1Rank != 2 || *mat2Rank != 2) {
@ -1472,7 +1472,7 @@ public:
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value input = op.getSelf();
Optional<unsigned> maybeInputRank = getTensorRank(input);
std::optional<unsigned> maybeInputRank = getTensorRank(input);
if (!maybeInputRank) {
return rewriter.notifyMatchFailure(op, "expected input to have a rank");
}
@ -1602,7 +1602,7 @@ public:
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value self = op.getSelf();
Optional<unsigned> maybeInputRank = getTensorRank(self);
std::optional<unsigned> maybeInputRank = getTensorRank(self);
if (!maybeInputRank) {
return rewriter.notifyMatchFailure(op, "expected input to have a rank");
}
@ -2163,7 +2163,7 @@ class DecomposeAtenNativeBatchNormOp
// Rank of the input tensor must be greater than or equal to 2. The shape of
// the `input` is supposed to be (N, C, D?, H?, W?).
Optional<unsigned> maybeInputRank = getTensorRank(input);
std::optional<unsigned> maybeInputRank = getTensorRank(input);
if (!maybeInputRank || *maybeInputRank < 2)
return rewriter.notifyMatchFailure(
op, "input must have rank greater than or equal to 2");
@ -2177,8 +2177,8 @@ class DecomposeAtenNativeBatchNormOp
op, "running stats must not be None in inference mode");
// Rank of `runningMean` and `runningVar` must be exactly 1.
Optional<unsigned> runningMeanRank = getTensorRank(runningMean);
Optional<unsigned> runningVarRank = getTensorRank(runningVar);
std::optional<unsigned> runningMeanRank = getTensorRank(runningMean);
std::optional<unsigned> runningVarRank = getTensorRank(runningVar);
if (!runningMeanRank || !runningVarRank || *runningMeanRank != 1 ||
*runningVarRank != 1)
return rewriter.notifyMatchFailure(
@ -2229,7 +2229,7 @@ class DecomposeAtenNativeBatchNormOp
Value batchNormOutput = normalizedInput;
if (!weight.getType().isa<Torch::NoneType>()) {
// Rank of `weight` must be exactly 1.
Optional<unsigned> weightRank = getTensorRank(weight);
std::optional<unsigned> weightRank = getTensorRank(weight);
if (!weightRank || *weightRank != 1)
return rewriter.notifyMatchFailure(op, "expected weight to be rank 1");
weight = rewriter.create<AtenViewOp>(loc, reshapeType, weight,
@ -2239,7 +2239,7 @@ class DecomposeAtenNativeBatchNormOp
}
if (!bias.getType().isa<Torch::NoneType>()) {
// Rank of `bias` must be exactly 1.
Optional<unsigned> biasRank = getTensorRank(bias);
std::optional<unsigned> biasRank = getTensorRank(bias);
if (!biasRank || *biasRank != 1)
return rewriter.notifyMatchFailure(op, "expected bias to be rank 1");
bias = rewriter.create<AtenViewOp>(loc, reshapeType, bias,
@ -2659,7 +2659,7 @@ class DecomposeAtenAdaptiveAvgPool2dOp
MLIRContext *context = op.getContext();
Value input = op.getSelf();
Optional<unsigned> maybeRank = getTensorRank(input);
std::optional<unsigned> maybeRank = getTensorRank(input);
if (!maybeRank) {
return rewriter.notifyMatchFailure(op, "expected input to have a rank");
}
@ -2832,7 +2832,7 @@ class DecomposeAtenNumpyTOp : public OpRewritePattern<AtenNumpyTOp> {
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value self = op.getSelf();
Optional<unsigned> maybeInputRank = getTensorRank(self);
std::optional<unsigned> maybeInputRank = getTensorRank(self);
if (!maybeInputRank) {
return rewriter.notifyMatchFailure(op, "expected input to have a rank");
}
@ -2879,7 +2879,7 @@ static LogicalResult calculateVariance(OpTy op, PatternRewriter &rewriter,
inputTensorTy = self.getType().cast<BaseTensorType>();
}
Optional<unsigned> maybeInputRank = getTensorRank(self);
std::optional<unsigned> maybeInputRank = getTensorRank(self);
if (!maybeInputRank) {
return rewriter.notifyMatchFailure(op, "expected input to have a rank");
}
@ -3368,7 +3368,8 @@ private:
template <typename DecomposePattern>
void addPatternIfTargetOpIsIllegal(RewritePatternSet &patterns) {
MLIRContext *context = &getContext();
Optional<OperationName> opName = DecomposePattern(context).getRootKind();
std::optional<OperationName> opName =
DecomposePattern(context).getRootKind();
// Because the `DecomposeComplexOpsPass` uses a greedy algorithm
// to apply patterns, only patterns that we for sure know we want to run
// must be added. This restricts the set of patterns allowed in this file to

View File

@ -81,7 +81,7 @@ public:
assert(it != slotLinkageInfo.end());
return it->second;
}
Optional<LinkageInfo> getFuncLinkageInfo(NnModuleOp instance,
std::optional<LinkageInfo> getFuncLinkageInfo(NnModuleOp instance,
func::FuncOp methodFunc) {
auto it = funcLinkageInfo.find({instance, methodFunc});
if (it == funcLinkageInfo.end())
@ -638,7 +638,7 @@ static LogicalResult globalizeObjectGraph(ModuleOp module) {
for (auto &monomorphization : tracker.getMonomorphizations()) {
auto newFunc = cast<func::FuncOp>(monomorphization.func->clone());
newFuncs[monomorphization] = newFunc;
Optional<LinkageInfo> linkageInfo = std::nullopt;
std::optional<LinkageInfo> linkageInfo = std::nullopt;
// If it is potentially a method, check its linkage info.
if (monomorphization.argInstances.size() != 0 &&
monomorphization.argInstances[0].argIndex == 0) {

View File

@ -367,8 +367,8 @@ static void markDecomposedOpsAsIllegal(MLIRContext *context,
target.addIllegalOp<AtenTOp>();
target.addIllegalOp<Aten_LogSoftmaxBackwardDataOp>();
target.addDynamicallyLegalOp<AtenMatmulOp>([](AtenMatmulOp op) {
Optional<unsigned> lhsRank = getTensorRank(op.getSelf());
Optional<unsigned> rhsRank = getTensorRank(op.getOther());
std::optional<unsigned> lhsRank = getTensorRank(op.getSelf());
std::optional<unsigned> rhsRank = getTensorRank(op.getOther());
if (!lhsRank || !rhsRank)
return false;
// Make aten.matmul legal if the following condition is satisfied.

View File

@ -40,7 +40,7 @@ public:
SmallVector<Operation *> copyLikeOps;
SmallVector<Operation *> viewLikeOps;
SmallVector<OverwriteTensorContentsOp> overwriteTensorContentsOps;
Optional<mlir::func::ReturnOp> returnOp;
std::optional<mlir::func::ReturnOp> returnOp;
};
// Check that graph rewriting is possible by doing an abstract

View File

@ -115,7 +115,7 @@ static torch_upstream::TypeKind getTypeKind(Type type) {
/// Returns `std::nullopt` if the types are contradictory. Note this can only
/// be used on the `dtype` from tensors and can't be used on other types like
/// scalar types.
static Optional<Type> meetElementTypes(Type lhs, Type rhs) {
static std::optional<Type> meetElementTypes(Type lhs, Type rhs) {
auto isNullOrBuiltIn = [](Type type) { return !type || isBuiltInType(type); };
(void)isNullOrBuiltIn;
assert(isNullOrBuiltIn(lhs) && "`lhs` must be a builtin type");
@ -138,7 +138,7 @@ enum class OptionalKnowledge {
/// Returns the OptionalKnowledge that assumes information from both `lhs` and
/// `rhs`. Returns `std::nullopt` if the knowledges are contradictory.
static Optional<OptionalKnowledge>
static std::optional<OptionalKnowledge>
meetOptionalKnowledge(OptionalKnowledge lhs, OptionalKnowledge rhs) {
if (lhs == OptionalKnowledge::unKnown)
return rhs;
@ -328,20 +328,20 @@ struct ValueKnowledge {
// Given two pieces of static knowledge, calculate new knowledge that assumes
// the facts from both.
// If the two pieces of knowledge are contradictory, std::nullopt is returned.
static Optional<ValueKnowledge> meet(const ValueKnowledge &lhs,
static std::optional<ValueKnowledge> meet(const ValueKnowledge &lhs,
const ValueKnowledge &rhs) {
if (!lhs.isInitialized)
return lhs;
if (!rhs.isInitialized)
return rhs;
Optional<ValueKnowledge> knowledge = meetTypes(lhs, rhs);
std::optional<ValueKnowledge> knowledge = meetTypes(lhs, rhs);
if (!knowledge.has_value())
return std::nullopt;
ValueKnowledge result = knowledge.value();
Optional<OptionalKnowledge> optional =
std::optional<OptionalKnowledge> optional =
meetOptionalKnowledge(lhs.optional, rhs.optional);
if (!optional.has_value())
return std::nullopt;
@ -349,7 +349,7 @@ struct ValueKnowledge {
return result;
}
static Optional<ValueKnowledge> meetTypes(const ValueKnowledge &lhs,
static std::optional<ValueKnowledge> meetTypes(const ValueKnowledge &lhs,
const ValueKnowledge &rhs) {
if (!lhs.isInitialized)
return lhs;
@ -449,8 +449,8 @@ private:
void visitAtenArangeStartStepOp(AtenArangeStartStepOp op);
void visitAtenArangeStartOp(AtenArangeStartOp op);
void visitAtenArangeOp(AtenArangeOp op);
void visitAtenArangeLikeOpHelper(Operation *op, llvm::Optional<Value> start,
Value end, llvm::Optional<Value> step,
void visitAtenArangeLikeOpHelper(Operation *op, std::optional<Value> start,
Value end, std::optional<Value> step,
Value dtype);
void visitReductionAlongAllDimsOp(Operation *op, Type dtype,
ArrayRef<const ValueState *> operands);
@ -464,7 +464,7 @@ private:
template <typename OpTy> void visitScalarToTensorConversionOp(OpTy op);
void visitAtenTensorOp(AtenTensorOp op);
template <typename OpTy>
void visitConstantTensorAllocOp(OpTy op, llvm::Optional<Type> dataType);
void visitConstantTensorAllocOp(OpTy op, std::optional<Type> dataType);
template <typename OpTy>
void visitConstantTensorAllocLikeOp(OpTy op,
ArrayRef<const ValueState *> operands);
@ -518,7 +518,7 @@ updateResultTypeState(Type scalarType,
// unknown (None variant of the optional).
static torch_upstream::ResultTypeState
updateResultTypeState(const ValueKnowledge *tensor,
Optional<bool> rankIsNonZero,
std::optional<bool> rankIsNonZero,
const torch_upstream::ResultTypeState &inState,
bool skipRankCheck = false) {
if (!rankIsNonZero.has_value() && !skipRankCheck)
@ -566,8 +566,9 @@ static Type getPromotedResultDType(ValueKnowledge *tensor, Type scalarType) {
return getTypeForScalarType(scalarType.getContext(), result_type(state));
}
static SmallVector<Optional<bool>> getRankIsNonZeroArray(ValueRange values) {
SmallVector<Optional<bool>> rankIsNonZero;
static SmallVector<std::optional<bool>>
getRankIsNonZeroArray(ValueRange values) {
SmallVector<std::optional<bool>> rankIsNonZero;
for (Value v : values) {
if (auto tensorType = v.getType().dyn_cast<BaseTensorType>()) {
if (tensorType.hasSizes()) {
@ -588,13 +589,13 @@ static SmallVector<Optional<bool>> getRankIsNonZeroArray(ValueRange values) {
// Returns most generic type Type() if the tensor dtype is unknown.
static Type getPromotedResultType(MLIRContext *context,
ArrayRef<const ValueKnowledge *> tensors,
ArrayRef<Optional<bool>> rankIsNonZero,
ArrayRef<std::optional<bool>> rankIsNonZero,
bool skipRankCheck = false) {
torch_upstream::ResultTypeState state = {};
assert(tensors.size() == rankIsNonZero.size());
for (auto t : llvm::zip(tensors, rankIsNonZero)) {
const ValueKnowledge *tensor = std::get<0>(t);
Optional<bool> rankIsNonZero = std::get<1>(t);
std::optional<bool> rankIsNonZero = std::get<1>(t);
if (!tensor->dtype)
return Type();
state = updateResultTypeState(tensor, rankIsNonZero, state, skipRankCheck);
@ -604,7 +605,7 @@ static Type getPromotedResultType(MLIRContext *context,
static Type getPromotedResultTypeAssumingNonZeroRank(
MLIRContext *context, ArrayRef<const ValueKnowledge *> tensors) {
SmallVector<Optional<bool>> rankIsNonZero(tensors.size(), true);
SmallVector<std::optional<bool>> rankIsNonZero(tensors.size(), true);
return getPromotedResultType(context, tensors, rankIsNonZero,
/*skipRankCheck=*/true);
}
@ -1232,9 +1233,9 @@ void TypeAnalysis::visitAtenEmbeddingBagOp(Operation *op) {
// Arange like ops returns a 1-D tensor of size ceil(end - start).
void TypeAnalysis::visitAtenArangeLikeOpHelper(Operation *op,
llvm::Optional<Value> start,
std::optional<Value> start,
Value end,
llvm::Optional<Value> step,
std::optional<Value> step,
Value dtype) {
auto knowledge =
ValueKnowledge::getTensorPessimisticValueState(op->getContext());
@ -1343,7 +1344,7 @@ void TypeAnalysis::visitAtenTensorOp(AtenTensorOp op) {
template <typename OpTy>
void TypeAnalysis::visitConstantTensorAllocOp(OpTy op,
llvm::Optional<Type> dataType) {
std::optional<Type> dataType) {
auto knowledge =
ValueKnowledge::getTensorPessimisticValueState(op->getContext());
if (!dataType)

View File

@ -83,7 +83,7 @@ public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(PromoteDtypesOp op,
PatternRewriter &rewriter) const override {
SmallVector<Optional<int64_t>> ranks;
SmallVector<std::optional<int64_t>> ranks;
SmallVector<int64_t> dtypes;
if (!matchPattern(op.getRanks(), m_TorchListOfOptionalConstantInts(ranks))) {
return rewriter.notifyMatchFailure(
@ -107,13 +107,13 @@ public:
torch_upstream::ResultTypeState state{};
for (auto ranksAndDtypes : llvm::zip(ranks, dtypes)) {
Optional<int64_t> rank;
std::optional<int64_t> rank;
int64_t dtype;
std::tie(rank, dtype) = ranksAndDtypes;
auto scalarType = static_cast<torch_upstream::ScalarType>(dtype);
bool isScalarOnlyOp = llvm::all_of(
ranks, [](Optional<int64_t> rank) { return !rank.has_value(); });
ranks, [](std::optional<int64_t> rank) { return !rank.has_value(); });
if (!rank.has_value()) {
// If `rank` does not have a value, then we are dealing with a scalar

View File

@ -174,9 +174,8 @@ public:
if (!setItem.use_empty())
return rewriter.notifyMatchFailure(
op, "Expected `Aten_SetItemTOp` to not have users");
llvm::Optional<int64_t> indexOpt =
matchLegalConstantIndexIntoListOfSize(setItem.getIdx(),
runningList.size());
std::optional<int64_t> indexOpt = matchLegalConstantIndexIntoListOfSize(
setItem.getIdx(), runningList.size());
// The index might be statically out of bounds.
if (!indexOpt)
return rewriter.notifyMatchFailure(
@ -311,7 +310,7 @@ static LogicalResult refineShapeCalculateResult(ShapeCalculateOp op,
if (auto setItem = dyn_cast<Aten_SetItemTOp>(user)) {
// If the index is statically known, we can clobber only a single index.
// Otherwise, we conservatively clobber all of them.
llvm::Optional<int64_t> indexOpt = matchLegalConstantIndexIntoListOfSize(
std::optional<int64_t> indexOpt = matchLegalConstantIndexIntoListOfSize(
setItem.getIdx(), listConstruct->getNumOperands());
if (indexOpt)
clobberedElements.set(*indexOpt);

View File

@ -23,7 +23,7 @@ bool Torch::isValidDim(int64_t dim, int64_t inputRank) {
return dim >= 0 && dim < inputRank;
}
llvm::Optional<int64_t>
std::optional<int64_t>
Torch::matchLegalConstantIndexIntoListOfSize(Value v, int64_t length) {
int64_t dim;
if (!matchPattern(v, m_TorchConstantInt(&dim)))
@ -181,7 +181,7 @@ bool Torch::isBuiltInType(Type type) {
return isa<BuiltinDialect>(type.getDialect());
}
Optional<unsigned> Torch::getTensorRank(Value tensor) {
std::optional<unsigned> Torch::getTensorRank(Value tensor) {
BaseTensorType tensorType = tensor.getType().cast<BaseTensorType>();
if (!tensorType.hasSizes())
return std::nullopt;

View File

@ -45,7 +45,7 @@ LogicalResult ToBuiltinTensorOp::verify() {
}
LogicalResult ToBuiltinTensorOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
auto resultType =