diff --git a/include/torch-mlir/Dialect/Torch/Utils/Utils.h b/include/torch-mlir/Dialect/Torch/Utils/Utils.h index e7bb768e1..55e840301 100644 --- a/include/torch-mlir/Dialect/Torch/Utils/Utils.h +++ b/include/torch-mlir/Dialect/Torch/Utils/Utils.h @@ -61,7 +61,7 @@ Value convertTensorToDtype(PatternRewriter &rewriter, Location loc, Value input, bool isBuiltInType(Type type); // Helper funtion to get rank of `Base tensor type`. -// llvm::None is returned if the tensorRank can't be determined. +// std::nullopt is returned if the tensorRank can't be determined. Optional getTensorRank(Value tensor); bool isViewLikeOp(Operation *op); diff --git a/lib/Conversion/TorchToMhlo/MhloLegalizeUtils.cpp b/lib/Conversion/TorchToMhlo/MhloLegalizeUtils.cpp index f7addbe41..04ba232ae 100644 --- a/lib/Conversion/TorchToMhlo/MhloLegalizeUtils.cpp +++ b/lib/Conversion/TorchToMhlo/MhloLegalizeUtils.cpp @@ -58,7 +58,7 @@ llvm::Optional getConstTensor(PatternRewriter &rewriter, Operation *op, if (vec.size() != num_total_elements) { op->emitOpError("getConstTensor(): number of elements mismatch."); - return llvm::None; + return std::nullopt; } auto const_type = @@ -82,7 +82,7 @@ llvm::Optional getConstTensor(PatternRewriter &rewriter, if (vec.size() != num_total_elements) { op->emitOpError("getConstTensor(): number of elements mismatch."); - return llvm::None; + return std::nullopt; } auto const_type = RankedTensorType::get( shape, rewriter.getIntegerType(vec[0].getBitWidth())); @@ -105,7 +105,7 @@ llvm::Optional getConstTensor(PatternRewriter &rewriter, if (vec.size() != num_total_elements) { op->emitOpError("getConstTensor(): number of elements mismatch."); - return llvm::None; + return std::nullopt; } auto const_type = RankedTensorType::get(shape, rewriter.getF32Type()); @@ -127,7 +127,7 @@ getConstTensor(PatternRewriter &rewriter, Operation *op, if (vec.size() != num_total_elements) { op->emitOpError("getConstTensor(): number of elements mismatch."); - return llvm::None; + return std::nullopt; } auto const_type = RankedTensorType::get(shape, rewriter.getF64Type()); diff --git a/lib/Conversion/TorchToMhlo/Reduction.cpp b/lib/Conversion/TorchToMhlo/Reduction.cpp index 68a0dbb88..4fdb614f5 100644 --- a/lib/Conversion/TorchToMhlo/Reduction.cpp +++ b/lib/Conversion/TorchToMhlo/Reduction.cpp @@ -77,16 +77,17 @@ getMaxInDim(ConversionPatternRewriter &rewriter, Operation *op, Value &input, size_t dimSizeIndexBits) { auto inputTy = input.getType().template cast(); if (!inputTy) { - return llvm::None; + return std::nullopt; } if (!inputTy.getElementType().isIntOrFloat()) { - return llvm::None; + return std::nullopt; } auto inputShape = inputTy.getShape(); auto inputElemTy = inputTy.getElementType(); Value initValue = createInitialValueForReduceOp(op, inputElemTy, rewriter); - if (!initValue) return llvm::None; + if (!initValue) + return std::nullopt; Value initIndex; if (dimSizeIndexBits == 32) { initIndex = mhlo::getConstTensor(rewriter, op, {0}, {}).value(); diff --git a/lib/Conversion/TorchToMhlo/ViewLike.cpp b/lib/Conversion/TorchToMhlo/ViewLike.cpp index 9d294ebdf..8934d0ff2 100644 --- a/lib/Conversion/TorchToMhlo/ViewLike.cpp +++ b/lib/Conversion/TorchToMhlo/ViewLike.cpp @@ -269,7 +269,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( auto getOptionalVal = [&](Value val) -> llvm::Optional { if (val.getType().isa()) { - return llvm::None; + return std::nullopt; } else { return val; }