From 8252656b6dadb9781eaf7d025380ca9d664f45e0 Mon Sep 17 00:00:00 2001 From: Stella Laurenzo Date: Thu, 7 Dec 2023 23:13:42 -0800 Subject: [PATCH] Advance llvm-project and stablehlo. (#2619) llvm-project: bbd2b08b95fe76bea138c1b03c1cd42ed3ee04df stablehlo: ab709fe48de88c67717abfbd7ef17425eb95ddaf These commits were chosen in order to account for an MLIR API break from https://github.com/llvm/llvm-project/commit/3dbac2c007c114a720300d2a4d79abe9ca1351e7 which required a patch to stablehlo. We integrate a bit beyond that commit to deal with some revert/reapply cycles in the intervening range which were discovered in another downstream. Further, it requires adaptation to the stablehlo API breaks introduced from https://github.com/openxla/stablehlo/pull/1872 which are along for the ride. Since some stablehlo builders were changed to directly take int64_t array refs, also traced that up some call stacks to eliminate some signed/unsigned mismatches that result. Also adds a few TOSA tests to the passing set that seem to work now. --- externals/llvm-project | 2 +- externals/stablehlo | 2 +- .../TorchToStablehlo/StablehloLegalizeUtils.h | 2 +- lib/Conversion/TorchToStablehlo/Basic.cpp | 15 ++----- lib/Conversion/TorchToStablehlo/Linear.cpp | 16 +++----- .../StablehloLegalizeUtils.cpp | 16 ++++---- projects/pt1/e2e_testing/xfail_sets.py | 41 +++++++++++-------- 7 files changed, 43 insertions(+), 51 deletions(-) diff --git a/externals/llvm-project b/externals/llvm-project index 5e5a22caf..bbd2b08b9 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 5e5a22caf88ac1ccfa8dc5720295fdeba0ad9372 +Subproject commit bbd2b08b95fe76bea138c1b03c1cd42ed3ee04df diff --git a/externals/stablehlo b/externals/stablehlo index 83f095e72..ab709fe48 160000 --- a/externals/stablehlo +++ b/externals/stablehlo @@ -1 +1 @@ -Subproject commit 83f095e7217c897f1eccac5652600ceb944cb0e0 +Subproject commit ab709fe48de88c67717abfbd7ef17425eb95ddaf diff --git a/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h b/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h index e8d57b7f6..6e14b324b 100644 --- a/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h +++ b/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h @@ -51,7 +51,7 @@ Value promoteType(PatternRewriter &rewriter, Location loc, Value input, Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input, TensorType outType); -SmallVector toPositiveDims(ArrayRef dims, int64_t rank); +SmallVector toPositiveDims(ArrayRef dims, int64_t rank); // Get the dimension sizes of the input tensor, given the dimension axes FailureOr> getDimSizesOfTensor(PatternRewriter &rewriter, diff --git a/lib/Conversion/TorchToStablehlo/Basic.cpp b/lib/Conversion/TorchToStablehlo/Basic.cpp index 737109977..f0dc4aaf2 100644 --- a/lib/Conversion/TorchToStablehlo/Basic.cpp +++ b/lib/Conversion/TorchToStablehlo/Basic.cpp @@ -615,12 +615,8 @@ public: SmallVector permValues(inputRank); std::iota(std::begin(permValues), std::end(permValues), 0); std::swap(permValues[dim0], permValues[dim1]); - DenseIntElementsAttr permutation = DenseIntElementsAttr::get( - RankedTensorType::get({static_cast(permValues.size())}, - rewriter.getI64Type()), - permValues); rewriter.replaceOpWithNewOp(op, outType, self, - permutation); + permValues); return success(); } }; @@ -793,12 +789,8 @@ LogicalResult ConvertAtenOp::matchAndRewrite( return op.emitError("not all dims are valid"); } - DenseIntElementsAttr permutation = DenseIntElementsAttr::get( - RankedTensorType::get({static_cast(permValues.size())}, - rewriter.getI64Type()), - permValues); rewriter.replaceOpWithNewOp(op, outType, self, - permutation); + permValues); return success(); } @@ -1755,8 +1747,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( } } - rewriter.replaceOpWithNewOp( - op, outType, self, rewriter.getI64TensorAttr(dims)); + rewriter.replaceOpWithNewOp(op, outType, self, dims); return success(); } diff --git a/lib/Conversion/TorchToStablehlo/Linear.cpp b/lib/Conversion/TorchToStablehlo/Linear.cpp index 71d679aea..df9231782 100644 --- a/lib/Conversion/TorchToStablehlo/Linear.cpp +++ b/lib/Conversion/TorchToStablehlo/Linear.cpp @@ -62,13 +62,9 @@ Value getPermutedTensor(PatternRewriter &rewriter, Operation *op, Value input, newShape.push_back(inpShape[d]); } - auto attrTy = RankedTensorType::get({static_cast(transDims.size())}, - rewriter.getIntegerType(64)); - auto permuteAttr = DenseIntElementsAttr::get(attrTy, transDims); - auto outTy = RankedTensorType::get(newShape, inputTy.getElementType()); auto result = rewriter.create(op->getLoc(), outTy, - input, permuteAttr); + input, transDims); return result.getResult(); } @@ -500,8 +496,8 @@ public: for (int64_t i = 0; i <= rank; i++) transposeDims[i] = i; std::swap(transposeDims[rank - 1], transposeDims[rank - 2]); - weight = rewriter.create( - op->getLoc(), weight, rewriter.getI64TensorAttr(transposeDims)); + weight = rewriter.create(op->getLoc(), weight, + transposeDims); // 3. [H, W, ..., G, OC, IC//G] => [H, W, ..., G*OC, IC//G] weightShapeInt.erase(weightShapeInt.end() - 2); @@ -546,12 +542,10 @@ public: } auto transposeTy = RankedTensorType::get(transposeShape, weightTy.getElementType()); - DenseIntElementsAttr permAttr = DenseIntElementsAttr::get( - RankedTensorType::get({nDims}, rewriter.getI64Type()), perm); auto transposeOp = rewriter.create( - op->getLoc(), transposeTy, weight, permAttr); + op->getLoc(), transposeTy, weight, perm); auto reverseOp = rewriter.create( - op->getLoc(), transposeOp, rewriter.getI64TensorAttr({0, 1})); + op->getLoc(), transposeOp, ArrayRef{0, 1}); // Prepare for transposed convolution SmallVector stablehloStrideVec(nSpatialDims, 1); diff --git a/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp b/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp index a25a66bbb..ed203cb0f 100644 --- a/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp +++ b/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp @@ -250,12 +250,12 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input, return bcast_op.getResult(); } -SmallVector toPositiveDims(ArrayRef dims, int64_t rank) { - SmallVector posDims; +SmallVector toPositiveDims(ArrayRef dims, int64_t rank) { + SmallVector posDims; posDims.reserve(rank); std::transform( dims.begin(), dims.end(), std::back_inserter(posDims), - [rank](int64_t d) -> size_t { return toPositiveDim(d, rank); }); + [rank](int64_t d) -> int64_t { return toPositiveDim(d, rank); }); return posDims; } @@ -316,10 +316,10 @@ FailureOr unsqueezeTensor(PatternRewriter &rewriter, Operation *op, op, "failed to get dimension sizes of the input"); auto dimSizes = *dimSizesInfo; - auto rank = dimSizes.size(); - size_t newRank = rank + inputUnsqzDims.size(); + int64_t rank = dimSizes.size(); + int64_t newRank = rank + inputUnsqzDims.size(); auto unsqzDims = toPositiveDims(inputUnsqzDims, newRank); - for (size_t k = 0, sz = unsqzDims.size(); k < sz; ++k) + for (int64_t k = 0, sz = unsqzDims.size(); k < sz; ++k) if (k > 1 && unsqzDims[k] <= unsqzDims[k - 1]) return rewriter.notifyMatchFailure( op, "unsqueeze dimensions must be specified in order"); @@ -335,8 +335,8 @@ FailureOr unsqueezeTensor(PatternRewriter &rewriter, Operation *op, std::vector newShape; newDimSizes.reserve(newRank); newShape.reserve(newRank); - for (size_t k = 0, i = 0, j = 0; k < newRank; ++k) { - if (j < unsqzDims.size() && unsqzDims[j] == k) { + for (int64_t k = 0, i = 0, j = 0; k < newRank; ++k) { + if (j < static_cast(unsqzDims.size()) && unsqzDims[j] == k) { newDimSizes.push_back(one); newShape.push_back(1); j++; diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index c4b0aab05..7b5e74e51 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -13,6 +13,8 @@ from torch_mlir_e2e_test.test_suite import COMMON_TORCH_MLIR_LOWERING_XFAILS from torch_mlir._version import torch_version_for_comparison, version +print(f"TORCH_VERSION_FOR_COMPARISON =", torch_version_for_comparison()) + LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | { # Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR failed # 'linalg.depthwise_conv_2d_nchw_chw' op inferred input/output operand #1 has shape's dimension #0 to be 4, but found 8 @@ -21,6 +23,14 @@ LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | { "IscloseStaticModuleTrue_basic" } +if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"): + LINALG_XFAIL_SET |= { + "Conv2dWithPaddingDilationStrideStaticModule_grouped", + "Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier", + "ConvolutionModule2DGroups_basic", + } + + TORCHDYNAMO_XFAIL_SET = { #### General TorchDynamo/PyTorch errors @@ -306,10 +316,11 @@ TORCHDYNAMO_XFAIL_SET = { "ArangeStartOutViewModule_basic", } -if torch_version_for_comparison() < version.parse("2.1.0.dev"): - TORCHDYNAMO_XFAIL_SET -= { - "ScaledDotProductAttentionSameModule_basic", - "ScaledDotProductAttentionDifferentModule_basic", +if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"): + TORCHDYNAMO_XFAIL_SET |= { + "Conv2dWithPaddingDilationStrideStaticModule_grouped", + "Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier", + "ConvolutionModule2DGroups_basic", } TORCHDYNAMO_CRASHING_SET = { @@ -1305,6 +1316,10 @@ TOSA_PASS_SET = { "MeanModule_basic", "ArangeStartOutModule_basic", "ArangeStartOutViewModule_basic", + "Conv2dBiasNoPaddingModule_basic", + "Conv2dNoPaddingModule_basic", + "Conv2dWithPaddingDilationStrideModule_basic", + "Conv2dWithPaddingModule_basic", } MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | { @@ -1335,21 +1350,13 @@ MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | { # failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal "AtenEyeModuleInt2D_basic", "AtenEyeMModuleInt2D_basic", + + "Conv2dBiasNoPaddingModule_basic", + "Conv2dNoPaddingModule_basic", + "Conv2dWithPaddingDilationStrideModule_basic", + "Conv2dWithPaddingModule_basic", } -if torch_version_for_comparison() < version.parse("2.1.0.dev"): - MAKE_FX_TOSA_PASS_SET -= { - # 'tensor.expand_shape' op expected rank expansion, but found source rank 1 >= result rank 1 - "ReshapeCollapseModule_basic", - - # failed to lower torch.aten.empty.memory_format - "BatchNorm1DModule_basic", - "BatchNorm1DWith2DInputModule_basic", - "BatchNorm2DModule_basic", - "BatchNorm3DModule_basic", - "BatchNorm1DStaticShapeModule_basic", - } - LTC_CRASHING_SET = { # TODO: update test to move all inputs to the lazy device. Otherwise test fails with: # Check failed: lazy_tensor Input tensor is not a lazy tensor: CPUBoolType.