From 41bafe13ccdce88c16ef096bfa7486cf6624befa Mon Sep 17 00:00:00 2001 From: Ramiro Leal-Cavazos Date: Tue, 15 Aug 2023 16:53:28 +0000 Subject: [PATCH] [build] Update llvm tag to a3f2751f (#2397) This commit updates the `llvm-project` and `mlir-hlo` submodules to commits: llvm-project: a3f2751f782f3cdc6ba4790488ec20163a40ac37 mlir-hlo: 97c7e4b4506c3a2441c923e592833f45da439009 Changes made: - Rename `getSuccessorEntryOperands` with `getEntrySuccessorOperands` and remove `operands` from `getSuccessorRegions` (https://reviews.llvm.org/D157506) - Make `TypeConverter` a `const` (https://reviews.llvm.org/D157601) --- externals/llvm-project | 2 +- externals/mlir-hlo | 2 +- include/torch-mlir/Conversion/Utils/Utils.h | 2 +- .../torch-mlir/Dialect/Torch/IR/TorchOps.td | 2 +- lib/Conversion/TorchToLinalg/DataMovement.cpp | 14 +++++------ lib/Conversion/TorchToLinalg/Pooling.cpp | 9 ++++--- .../TorchToLinalg/TensorConstructors.cpp | 6 ++--- .../TorchToLinalg/Uncategorized.cpp | 8 ++++--- lib/Conversion/TorchToSCF/TorchToSCF.cpp | 4 ++-- lib/Conversion/TorchToStablehlo/Basic.cpp | 2 +- .../TorchToStablehlo/GatherScatter.cpp | 2 +- .../TorchToTMTensor/TorchToTMTensor.cpp | 4 ++-- lib/Conversion/TorchToTosa/TorchToTosa.cpp | 8 +++---- lib/Conversion/Utils/Utils.cpp | 2 +- lib/Dialect/Torch/IR/TorchOps.cpp | 24 +++++++------------ 15 files changed, 43 insertions(+), 48 deletions(-) diff --git a/externals/llvm-project b/externals/llvm-project index f580901d5..a3f2751f7 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit f580901d5d30e37755212f1c09e5b587587fbfeb +Subproject commit a3f2751f782f3cdc6ba4790488ec20163a40ac37 diff --git a/externals/mlir-hlo b/externals/mlir-hlo index 503736d15..97c7e4b45 160000 --- a/externals/mlir-hlo +++ b/externals/mlir-hlo @@ -1 +1 @@ -Subproject commit 503736d156c25022813c51cbdbe3b862d67a6916 +Subproject commit 97c7e4b4506c3a2441c923e592833f45da439009 diff --git a/include/torch-mlir/Conversion/Utils/Utils.h b/include/torch-mlir/Conversion/Utils/Utils.h index 485160b7e..8795974a3 100644 --- a/include/torch-mlir/Conversion/Utils/Utils.h +++ b/include/torch-mlir/Conversion/Utils/Utils.h @@ -76,7 +76,7 @@ SmallVector getAsConstantIndexValues(OpBuilder &b, Location loc, // convert their elements to valid target type. // TODO: remove this when list gets full support. SmallVector getTypeConvertedValues(OpBuilder &b, Location loc, - TypeConverter *converter, + const TypeConverter *converter, SmallVectorImpl &vs); mlir::RankedTensorType GetTypeFromTensorShape(llvm::ArrayRef shape, diff --git a/include/torch-mlir/Dialect/Torch/IR/TorchOps.td b/include/torch-mlir/Dialect/Torch/IR/TorchOps.td index 203cc0e70..e0a23df9a 100644 --- a/include/torch-mlir/Dialect/Torch/IR/TorchOps.td +++ b/include/torch-mlir/Dialect/Torch/IR/TorchOps.td @@ -507,7 +507,7 @@ def Torch_PrimCallMethodOp : Torch_Op<"prim.CallMethod", []> { } def Torch_PrimLoopOp : Torch_Op<"prim.Loop", [ - DeclareOpInterfaceMethods]> { + DeclareOpInterfaceMethods]> { let summary = "TorchScript prim::Loop op"; let description = [{ This op (together with prim.Loop.condition) define a looping construct diff --git a/lib/Conversion/TorchToLinalg/DataMovement.cpp b/lib/Conversion/TorchToLinalg/DataMovement.cpp index 5f5d87c06..7264b037c 100644 --- a/lib/Conversion/TorchToLinalg/DataMovement.cpp +++ b/lib/Conversion/TorchToLinalg/DataMovement.cpp @@ -328,7 +328,7 @@ public: SmallVector inputShape = makeShapeTorchCompatible(inputType.getShape()); int64_t inputRank = inputType.getRank(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); auto resultType = typeConverter->convertType(op.getType()).cast(); int64_t resultRank = resultType.getRank(); @@ -695,7 +695,7 @@ public: Value input = adaptor.getSelf(); auto inputType = input.getType().cast(); int64_t inputRank = inputType.getRank(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); auto resultType = typeConverter->convertType(op.getType()).cast(); int64_t resultRank = resultType.getRank(); @@ -804,7 +804,7 @@ public: op, "unimplemented: dim(th) dimension is not expected to be dynamic"); } - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); auto resultType = typeConverter->convertType(op.getType()).cast(); int64_t resultRank = resultType.getRank(); @@ -1046,7 +1046,7 @@ public: return failure(); Location loc = op.getLoc(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); auto input = adaptor.getSelf(); RankedTensorType resultType = @@ -1081,7 +1081,7 @@ public: if (failed(verifyLinalgCompatibleTypes(op, rewriter))) return failure(); Location loc = op.getLoc(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); // Collect all the tensors to be concatenated. auto tensorList = op.getTensors(); @@ -1312,7 +1312,7 @@ public: return failure(); Location loc = op.getLoc(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); auto input = adaptor.getSelf(); @@ -1361,7 +1361,7 @@ public: return failure(); Location loc = op.getLoc(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); MLIRContext *context = rewriter.getContext(); auto input = adaptor.getSelf(); diff --git a/lib/Conversion/TorchToLinalg/Pooling.cpp b/lib/Conversion/TorchToLinalg/Pooling.cpp index 4d0c1bd79..1d7ff925b 100644 --- a/lib/Conversion/TorchToLinalg/Pooling.cpp +++ b/lib/Conversion/TorchToLinalg/Pooling.cpp @@ -32,7 +32,7 @@ using namespace mlir::torch::Torch; template static LogicalResult checkAndGetPoolingParameters(OpTy op, ConversionPatternRewriter &rewriter, - TypeConverter *typeConverter, bool &ceilMode, + const TypeConverter *typeConverter, bool &ceilMode, SmallVectorImpl &kernelSizeIntValues, SmallVectorImpl &strideInts, SmallVectorImpl &paddingInts) { @@ -72,7 +72,6 @@ checkAndGetPoolingParameters(OpTy op, ConversionPatternRewriter &rewriter, return success(); } - // Creates a pooling operation based on the type specified by `OpTy` and // arguments passed. template @@ -153,7 +152,7 @@ public: if (failed(verifyLinalgCompatibleTypes(op, rewriter))) return failure(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); Value self = adaptor.getSelf(); int64_t selfRank = self.getType().cast().getRank(); // TODO: Add support for 3D inputs. @@ -225,7 +224,7 @@ public: if (failed(verifyLinalgCompatibleTypes(op, rewriter))) return failure(); Location loc = op->getLoc(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); Value self = adaptor.getSelf(); RankedTensorType selfType = self.getType().cast(); Type elementType = selfType.getElementType(); @@ -386,7 +385,7 @@ public: return failure(); Location loc = op->getLoc(); - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); Value self = adaptor.getSelf(); Type inputElementType = diff --git a/lib/Conversion/TorchToLinalg/TensorConstructors.cpp b/lib/Conversion/TorchToLinalg/TensorConstructors.cpp index 724430401..7e73fabd8 100644 --- a/lib/Conversion/TorchToLinalg/TensorConstructors.cpp +++ b/lib/Conversion/TorchToLinalg/TensorConstructors.cpp @@ -106,7 +106,7 @@ public: } Location loc = op.getLoc(); - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); SmallVector resultSizeTorchInt, resultSize, resultSizeIndex; if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) { return rewriter.notifyMatchFailure( @@ -211,7 +211,7 @@ public: } Location loc = op.getLoc(); - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); SmallVector resultSizeTorchInt, resultSize, resultSizeIndex; if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) { return rewriter.notifyMatchFailure( @@ -282,7 +282,7 @@ public: } Location loc = op.getLoc(); - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); RankedTensorType resultType = typeConverter->convertType(op->getResult(0).getType()) .cast(); diff --git a/lib/Conversion/TorchToLinalg/Uncategorized.cpp b/lib/Conversion/TorchToLinalg/Uncategorized.cpp index 0f65b7898..5291869ec 100644 --- a/lib/Conversion/TorchToLinalg/Uncategorized.cpp +++ b/lib/Conversion/TorchToLinalg/Uncategorized.cpp @@ -127,8 +127,10 @@ static Value buildUnitNormalCdf(OpBuilder &b, Location &loc, Value x) { } template -static Value createCalculationForMathOpWithDtypeConversion( - OpBuilder &b, TypeConverter *converter, Value payloadArg, Operation *op) { +static Value +createCalculationForMathOpWithDtypeConversion(OpBuilder &b, + const TypeConverter *converter, + Value payloadArg, Operation *op) { Type dtype = converter->convertType(op->getResult(0).getType()) .template cast() .getElementType(); @@ -207,7 +209,7 @@ createTriangularMatrix(OpBuilder &b, Location loc, ValueRange payloadArgs, } static Value createLinalgPayloadCalculationForElementwiseOp( - OpBuilder &b, Location loc, TypeConverter *converter, + OpBuilder &b, Location loc, const TypeConverter *converter, ValueRange payloadArgs, Operation *op, ArrayRef operands) { if (isa(op)) return b.create(loc, payloadArgs[0]); diff --git a/lib/Conversion/TorchToSCF/TorchToSCF.cpp b/lib/Conversion/TorchToSCF/TorchToSCF.cpp index 7c256c071..146959151 100644 --- a/lib/Conversion/TorchToSCF/TorchToSCF.cpp +++ b/lib/Conversion/TorchToSCF/TorchToSCF.cpp @@ -77,7 +77,7 @@ public: if (op.isForLike()) return failure(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); SmallVector newResultTypes; if (failed( typeConverter->convertTypes(op.getResultTypes(), newResultTypes))) @@ -217,7 +217,7 @@ public: if (!op.isForLike()) return failure(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); SmallVector newResultTypes; if (failed( typeConverter->convertTypes(op.getResultTypes(), newResultTypes))) diff --git a/lib/Conversion/TorchToStablehlo/Basic.cpp b/lib/Conversion/TorchToStablehlo/Basic.cpp index 561f72016..ccaa866dd 100644 --- a/lib/Conversion/TorchToStablehlo/Basic.cpp +++ b/lib/Conversion/TorchToStablehlo/Basic.cpp @@ -1555,7 +1555,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( } Location loc = op.getLoc(); - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); SmallVector resultSizeTorchInt, resultSize, resultSizeIndex; if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) { return rewriter.notifyMatchFailure( diff --git a/lib/Conversion/TorchToStablehlo/GatherScatter.cpp b/lib/Conversion/TorchToStablehlo/GatherScatter.cpp index 396f7ddce..ed8015e92 100644 --- a/lib/Conversion/TorchToStablehlo/GatherScatter.cpp +++ b/lib/Conversion/TorchToStablehlo/GatherScatter.cpp @@ -342,7 +342,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( return failure(); Location loc = op.getLoc(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); auto input = adaptor.getSelf(); diff --git a/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp b/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp index d9e542532..a2d58daac 100644 --- a/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp +++ b/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp @@ -309,7 +309,7 @@ public: if (failed(verifyLinalgCompatibleTypes(op, rewriter))) return failure(); Location loc = op.getLoc(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); Value self = adaptor.getSelf(); Value index = adaptor.getIndex(); Value src = adaptor.getSrc(); @@ -361,7 +361,7 @@ public: return failure(); Location loc = op.getLoc(); MLIRContext *context = op->getContext(); - TypeConverter *typeConverter = getTypeConverter(); + const TypeConverter *typeConverter = getTypeConverter(); Value input = adaptor.getSelf(); Value torchTypeInput = op.getSelf(); Value minlength = adaptor.getMinlength(); diff --git a/lib/Conversion/TorchToTosa/TorchToTosa.cpp b/lib/Conversion/TorchToTosa/TorchToTosa.cpp index 8633fd03f..f701df29c 100644 --- a/lib/Conversion/TorchToTosa/TorchToTosa.cpp +++ b/lib/Conversion/TorchToTosa/TorchToTosa.cpp @@ -2121,7 +2121,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( // reshaped so it sits on the same dim as 'C'. auto reshapeToNormInputDim = [&](Operation *op, ConversionPatternRewriter &rewriter, - TypeConverter *converter, Type outType, + const TypeConverter *converter, Type outType, const Value toBcast, Value &result) { RankedTensorType toBcastType = toBcast.getType().dyn_cast(); @@ -3809,7 +3809,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( AtenArangeStartStepOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const { - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); RankedTensorType resultType = typeConverter->convertType(op->getResult(0).getType()) .cast(); @@ -3859,7 +3859,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( PrimNumToTensorScalarOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const { - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); RankedTensorType resultType = typeConverter->convertType(op->getResult(0).getType()) .cast(); @@ -4673,7 +4673,7 @@ template <> LogicalResult ConvertAtenOp::matchAndRewrite( AtenCatOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const { - TypeConverter *typeConverter = this->getTypeConverter(); + const TypeConverter *typeConverter = this->getTypeConverter(); auto outType = typeConverter->convertType(op.getType()).cast(); int64_t rank = outType.getRank(); diff --git a/lib/Conversion/Utils/Utils.cpp b/lib/Conversion/Utils/Utils.cpp index 1f6a889b5..80a95b7cf 100644 --- a/lib/Conversion/Utils/Utils.cpp +++ b/lib/Conversion/Utils/Utils.cpp @@ -230,7 +230,7 @@ SmallVector getAsConstantIndexValues(OpBuilder &b, Location loc, // convert their elements to valid target type. // TODO: remove this when list gets full support. SmallVector getTypeConvertedValues(OpBuilder &b, Location loc, - TypeConverter *converter, + const TypeConverter *converter, SmallVectorImpl &vs) { return llvm::to_vector<4>(llvm::map_range(vs, [&](Value v) { return converter->materializeTargetConversion( diff --git a/lib/Dialect/Torch/IR/TorchOps.cpp b/lib/Dialect/Torch/IR/TorchOps.cpp index 9403811ec..d271243f7 100644 --- a/lib/Dialect/Torch/IR/TorchOps.cpp +++ b/lib/Dialect/Torch/IR/TorchOps.cpp @@ -302,15 +302,13 @@ LogicalResult ClassTypeOp::verify() { //===----------------------------------------------------------------------===// OperandRange -PrimLoopOp::getSuccessorEntryOperands(std::optional index) { +PrimLoopOp::getEntrySuccessorOperands(std::optional index) { assert(index.has_value() && index.value() == 0); return getIterArgsInit(); } void PrimLoopOp::getSuccessorRegions( - std::optional index, ArrayRef operands, - SmallVectorImpl ®ions) { - (void)operands; + std::optional index, SmallVectorImpl ®ions) { if (!index.has_value()) { regions.emplace_back(&getRegion(), getRegion().getArguments().slice(1)); @@ -381,7 +379,6 @@ void PrimIfOp::print(OpAsmPrinter &p) { } void PrimIfOp::getSuccessorRegions(std::optional index, - ArrayRef operands, SmallVectorImpl ®ions) { // The `then` and the `else` region branch back to the parent operation. if (index.has_value()) { @@ -390,9 +387,9 @@ void PrimIfOp::getSuccessorRegions(std::optional index, } // If the condition is constant, we can give a more precise answer. - if (auto condAttr = operands.front().dyn_cast_or_null()) { - Region *executedRegion = - condAttr.getValue().isOne() ? &getThenRegion() : &getElseRegion(); + bool condition; + if (matchPattern(getCondition(), m_TorchConstantBool(&condition))) { + Region *executedRegion = condition ? &getThenRegion() : &getElseRegion(); regions.push_back(RegionSuccessor(executedRegion)); return; } @@ -2720,7 +2717,6 @@ OpFoldResult PrimMinIntOp::fold(FoldAdaptor adaptor) { template static void getSuccessorRegionsForCalculateOp(CalculateOp op, std::optional index, - ArrayRef operands, SmallVectorImpl ®ions) { if (!index.has_value()) { // First thing the op does is branch into the calculation. @@ -2738,9 +2734,8 @@ getSuccessorRegionsForCalculateOp(CalculateOp op, std::optional index, } void ShapeCalculateOp::getSuccessorRegions( - std::optional index, ArrayRef operands, - SmallVectorImpl ®ions) { - getSuccessorRegionsForCalculateOp(*this, index, operands, regions); + std::optional index, SmallVectorImpl ®ions) { + getSuccessorRegionsForCalculateOp(*this, index, regions); } //===----------------------------------------------------------------------===// @@ -2748,9 +2743,8 @@ void ShapeCalculateOp::getSuccessorRegions( //===----------------------------------------------------------------------===// void DtypeCalculateOp::getSuccessorRegions( - std::optional index, ArrayRef operands, - SmallVectorImpl ®ions) { - getSuccessorRegionsForCalculateOp(*this, index, operands, regions); + std::optional index, SmallVectorImpl ®ions) { + getSuccessorRegionsForCalculateOp(*this, index, regions); } //===----------------------------------------------------------------------===//