[cleanup] Fix a few more llvm::None -> std::nullopt

pull/1723/head
Sean Silva 2022-12-14 10:44:05 +00:00
parent 8c3774bb2a
commit b60da34f84
4 changed files with 10 additions and 9 deletions

View File

@ -61,7 +61,7 @@ Value convertTensorToDtype(PatternRewriter &rewriter, Location loc, Value input,
bool isBuiltInType(Type type);
// Helper funtion to get rank of `Base tensor type`.
// llvm::None is returned if the tensorRank can't be determined.
// std::nullopt is returned if the tensorRank can't be determined.
Optional<unsigned> getTensorRank(Value tensor);
bool isViewLikeOp(Operation *op);

View File

@ -58,7 +58,7 @@ llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,
if (vec.size() != num_total_elements) {
op->emitOpError("getConstTensor(): number of elements mismatch.");
return llvm::None;
return std::nullopt;
}
auto const_type =
@ -82,7 +82,7 @@ llvm::Optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,
if (vec.size() != num_total_elements) {
op->emitOpError("getConstTensor(): number of elements mismatch.");
return llvm::None;
return std::nullopt;
}
auto const_type = RankedTensorType::get(
shape, rewriter.getIntegerType(vec[0].getBitWidth()));
@ -105,7 +105,7 @@ llvm::Optional<Value> getConstTensor<float>(PatternRewriter &rewriter,
if (vec.size() != num_total_elements) {
op->emitOpError("getConstTensor(): number of elements mismatch.");
return llvm::None;
return std::nullopt;
}
auto const_type = RankedTensorType::get(shape, rewriter.getF32Type());
@ -127,7 +127,7 @@ getConstTensor<double>(PatternRewriter &rewriter, Operation *op,
if (vec.size() != num_total_elements) {
op->emitOpError("getConstTensor(): number of elements mismatch.");
return llvm::None;
return std::nullopt;
}
auto const_type = RankedTensorType::get(shape, rewriter.getF64Type());

View File

@ -77,16 +77,17 @@ getMaxInDim(ConversionPatternRewriter &rewriter, Operation *op, Value &input,
size_t dimSizeIndexBits) {
auto inputTy = input.getType().template cast<RankedTensorType>();
if (!inputTy) {
return llvm::None;
return std::nullopt;
}
if (!inputTy.getElementType().isIntOrFloat()) {
return llvm::None;
return std::nullopt;
}
auto inputShape = inputTy.getShape();
auto inputElemTy = inputTy.getElementType();
Value initValue = createInitialValueForReduceOp(op, inputElemTy, rewriter);
if (!initValue) return llvm::None;
if (!initValue)
return std::nullopt;
Value initIndex;
if (dimSizeIndexBits == 32) {
initIndex = mhlo::getConstTensor<int32_t>(rewriter, op, {0}, {}).value();

View File

@ -269,7 +269,7 @@ LogicalResult ConvertAtenOp<AtenSliceTensorOp>::matchAndRewrite(
auto getOptionalVal = [&](Value val) -> llvm::Optional<Value> {
if (val.getType().isa<Torch::NoneType>()) {
return llvm::None;
return std::nullopt;
} else {
return val;
}