diff --git a/externals/llvm-project b/externals/llvm-project index 458598ccc..bebc96956 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 458598ccc50c5118107f05d60f3d043772a91f26 +Subproject commit bebc96956b76bdbc36f1d82a788c810e5b12e2c5 diff --git a/externals/mlir-hlo b/externals/mlir-hlo index cd9da150e..7b0ecf782 160000 --- a/externals/mlir-hlo +++ b/externals/mlir-hlo @@ -1 +1 @@ -Subproject commit cd9da150e729fd046109e7962e5f63f5fe067a3b +Subproject commit 7b0ecf7827e3fc07d2af90e147bcedc165bc78ac diff --git a/include/torch-mlir/Dialect/Torch/IR/TorchTypes.td b/include/torch-mlir/Dialect/Torch/IR/TorchTypes.td index ae9dd4249..92d6186d6 100644 --- a/include/torch-mlir/Dialect/Torch/IR/TorchTypes.td +++ b/include/torch-mlir/Dialect/Torch/IR/TorchTypes.td @@ -45,7 +45,7 @@ def Torch_NnModuleType : Torch_Type<"NnModule", "nn.Module"> { } // For standard ArrayRefs, which require allocation. -class OptionalArrayRefParameter : +class OptionalArrayRefTorchParameter : AttrOrTypeParameter< "::llvm::Optional<::llvm::ArrayRef<" # arrayOf # ">>", desc> { let allocator = [{ @@ -146,7 +146,7 @@ class AnyTorchTensorType - `getElementType()` -> `getDtype()` (but be sure that `hasDtype()` though). }]; let parameters = (ins - OptionalArrayRefParameter<"int64_t", "sizes of dimensions">:$optionalSizes, + OptionalArrayRefTorchParameter<"int64_t", "sizes of dimensions">:$optionalSizes, "::mlir::Type":$optionalDtype ); let genVerifyDecl = 1; diff --git a/lib/Conversion/TorchToMhlo/CMakeLists.txt b/lib/Conversion/TorchToMhlo/CMakeLists.txt index 819510724..94acb4f54 100644 --- a/lib/Conversion/TorchToMhlo/CMakeLists.txt +++ b/lib/Conversion/TorchToMhlo/CMakeLists.txt @@ -22,7 +22,6 @@ add_mlir_conversion_library(TorchMLIRTorchToMhlo Core LINK_LIBS PUBLIC - ChloOps MLIRIR MLIRPass MhloDialect diff --git a/lib/Conversion/TorchToMhlo/Linear.cpp b/lib/Conversion/TorchToMhlo/Linear.cpp index a8c3c1544..2f5c7df4d 100644 --- a/lib/Conversion/TorchToMhlo/Linear.cpp +++ b/lib/Conversion/TorchToMhlo/Linear.cpp @@ -102,13 +102,13 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op, } SmallVector outShape; // set batch dims, will skip invalid dimensions - for (size_t k = 0; k < lhsShape.size(); ++k) { + for (int64_t k = 0; k < static_cast(lhsShape.size()); ++k) { if (k == lhsResultDim || k == lhsContractingDim) continue; outShape.push_back(lhsShape[k]); } - for (size_t k = 0, b = 0; k < rhsShape.size(); ++k) { - if (b >= outShape.size()) + for (int64_t k = 0, b = 0; k < static_cast(rhsShape.size()); ++k) { + if (b >= static_cast(outShape.size())) break; if (k == rhsResultDim || k == rhsContractingDim) continue; @@ -119,10 +119,10 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op, } // set result dimensions - if (lhsResultDim < lhsShape.size() && lhsResultDim >= 0) { + if (lhsResultDim < static_cast(lhsShape.size()) && lhsResultDim >= 0) { outShape.push_back(lhsShape[lhsResultDim]); } - if (rhsResultDim < rhsShape.size() && rhsResultDim >= 0) { + if (rhsResultDim < static_cast(rhsShape.size()) && rhsResultDim >= 0) { outShape.push_back(rhsShape[rhsResultDim]); } return RankedTensorType::get(outShape, lhsTy.getElementType());