mirror of https://github.com/llvm/torch-mlir
Advance llvm-project and stablehlo. (#2619)
llvm-project: bbd2b08b95fe76bea138c1b03c1cd42ed3ee04df
stablehlo: ab709fe48de88c67717abfbd7ef17425eb95ddaf
These commits were chosen in order to account for an MLIR API break from
3dbac2c007
which required a patch to stablehlo. We integrate a bit beyond that
commit to deal with some revert/reapply cycles in the intervening range
which were discovered in another downstream.
Further, it requires adaptation to the stablehlo API breaks introduced
from https://github.com/openxla/stablehlo/pull/1872 which are along for
the ride.
Since some stablehlo builders were changed to directly take int64_t
array refs, also traced that up some call stacks to eliminate some
signed/unsigned mismatches that result.
Also adds a few TOSA tests to the passing set that seem to work now.
pull/2623/head
snapshot-20231208.1046
parent
63505ad6b2
commit
8252656b6d
|
@ -1 +1 @@
|
||||||
Subproject commit 5e5a22caf88ac1ccfa8dc5720295fdeba0ad9372
|
Subproject commit bbd2b08b95fe76bea138c1b03c1cd42ed3ee04df
|
|
@ -1 +1 @@
|
||||||
Subproject commit 83f095e7217c897f1eccac5652600ceb944cb0e0
|
Subproject commit ab709fe48de88c67717abfbd7ef17425eb95ddaf
|
|
@ -51,7 +51,7 @@ Value promoteType(PatternRewriter &rewriter, Location loc, Value input,
|
||||||
Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
|
Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
|
||||||
TensorType outType);
|
TensorType outType);
|
||||||
|
|
||||||
SmallVector<size_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank);
|
SmallVector<int64_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank);
|
||||||
|
|
||||||
// Get the dimension sizes of the input tensor, given the dimension axes
|
// Get the dimension sizes of the input tensor, given the dimension axes
|
||||||
FailureOr<SmallVector<Value, 4>> getDimSizesOfTensor(PatternRewriter &rewriter,
|
FailureOr<SmallVector<Value, 4>> getDimSizesOfTensor(PatternRewriter &rewriter,
|
||||||
|
|
|
@ -615,12 +615,8 @@ public:
|
||||||
SmallVector<int64_t> permValues(inputRank);
|
SmallVector<int64_t> permValues(inputRank);
|
||||||
std::iota(std::begin(permValues), std::end(permValues), 0);
|
std::iota(std::begin(permValues), std::end(permValues), 0);
|
||||||
std::swap(permValues[dim0], permValues[dim1]);
|
std::swap(permValues[dim0], permValues[dim1]);
|
||||||
DenseIntElementsAttr permutation = DenseIntElementsAttr::get(
|
|
||||||
RankedTensorType::get({static_cast<long int>(permValues.size())},
|
|
||||||
rewriter.getI64Type()),
|
|
||||||
permValues);
|
|
||||||
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
|
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
|
||||||
permutation);
|
permValues);
|
||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -793,12 +789,8 @@ LogicalResult ConvertAtenOp<AtenPermuteOp>::matchAndRewrite(
|
||||||
return op.emitError("not all dims are valid");
|
return op.emitError("not all dims are valid");
|
||||||
}
|
}
|
||||||
|
|
||||||
DenseIntElementsAttr permutation = DenseIntElementsAttr::get(
|
|
||||||
RankedTensorType::get({static_cast<long int>(permValues.size())},
|
|
||||||
rewriter.getI64Type()),
|
|
||||||
permValues);
|
|
||||||
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
|
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
|
||||||
permutation);
|
permValues);
|
||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1755,8 +1747,7 @@ LogicalResult ConvertAtenOp<AtenFlipOp>::matchAndRewrite(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rewriter.replaceOpWithNewOp<stablehlo::ReverseOp>(
|
rewriter.replaceOpWithNewOp<stablehlo::ReverseOp>(op, outType, self, dims);
|
||||||
op, outType, self, rewriter.getI64TensorAttr(dims));
|
|
||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,13 +62,9 @@ Value getPermutedTensor(PatternRewriter &rewriter, Operation *op, Value input,
|
||||||
newShape.push_back(inpShape[d]);
|
newShape.push_back(inpShape[d]);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto attrTy = RankedTensorType::get({static_cast<int64_t>(transDims.size())},
|
|
||||||
rewriter.getIntegerType(64));
|
|
||||||
auto permuteAttr = DenseIntElementsAttr::get(attrTy, transDims);
|
|
||||||
|
|
||||||
auto outTy = RankedTensorType::get(newShape, inputTy.getElementType());
|
auto outTy = RankedTensorType::get(newShape, inputTy.getElementType());
|
||||||
auto result = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), outTy,
|
auto result = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), outTy,
|
||||||
input, permuteAttr);
|
input, transDims);
|
||||||
return result.getResult();
|
return result.getResult();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,8 +496,8 @@ public:
|
||||||
for (int64_t i = 0; i <= rank; i++)
|
for (int64_t i = 0; i <= rank; i++)
|
||||||
transposeDims[i] = i;
|
transposeDims[i] = i;
|
||||||
std::swap(transposeDims[rank - 1], transposeDims[rank - 2]);
|
std::swap(transposeDims[rank - 1], transposeDims[rank - 2]);
|
||||||
weight = rewriter.create<stablehlo::TransposeOp>(
|
weight = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), weight,
|
||||||
op->getLoc(), weight, rewriter.getI64TensorAttr(transposeDims));
|
transposeDims);
|
||||||
|
|
||||||
// 3. [H, W, ..., G, OC, IC//G] => [H, W, ..., G*OC, IC//G]
|
// 3. [H, W, ..., G, OC, IC//G] => [H, W, ..., G*OC, IC//G]
|
||||||
weightShapeInt.erase(weightShapeInt.end() - 2);
|
weightShapeInt.erase(weightShapeInt.end() - 2);
|
||||||
|
@ -546,12 +542,10 @@ public:
|
||||||
}
|
}
|
||||||
auto transposeTy =
|
auto transposeTy =
|
||||||
RankedTensorType::get(transposeShape, weightTy.getElementType());
|
RankedTensorType::get(transposeShape, weightTy.getElementType());
|
||||||
DenseIntElementsAttr permAttr = DenseIntElementsAttr::get(
|
|
||||||
RankedTensorType::get({nDims}, rewriter.getI64Type()), perm);
|
|
||||||
auto transposeOp = rewriter.create<stablehlo::TransposeOp>(
|
auto transposeOp = rewriter.create<stablehlo::TransposeOp>(
|
||||||
op->getLoc(), transposeTy, weight, permAttr);
|
op->getLoc(), transposeTy, weight, perm);
|
||||||
auto reverseOp = rewriter.create<stablehlo::ReverseOp>(
|
auto reverseOp = rewriter.create<stablehlo::ReverseOp>(
|
||||||
op->getLoc(), transposeOp, rewriter.getI64TensorAttr({0, 1}));
|
op->getLoc(), transposeOp, ArrayRef<int64_t>{0, 1});
|
||||||
|
|
||||||
// Prepare for transposed convolution
|
// Prepare for transposed convolution
|
||||||
SmallVector<int64_t> stablehloStrideVec(nSpatialDims, 1);
|
SmallVector<int64_t> stablehloStrideVec(nSpatialDims, 1);
|
||||||
|
|
|
@ -250,12 +250,12 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
|
||||||
return bcast_op.getResult();
|
return bcast_op.getResult();
|
||||||
}
|
}
|
||||||
|
|
||||||
SmallVector<size_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank) {
|
SmallVector<int64_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank) {
|
||||||
SmallVector<size_t> posDims;
|
SmallVector<int64_t> posDims;
|
||||||
posDims.reserve(rank);
|
posDims.reserve(rank);
|
||||||
std::transform(
|
std::transform(
|
||||||
dims.begin(), dims.end(), std::back_inserter(posDims),
|
dims.begin(), dims.end(), std::back_inserter(posDims),
|
||||||
[rank](int64_t d) -> size_t { return toPositiveDim(d, rank); });
|
[rank](int64_t d) -> int64_t { return toPositiveDim(d, rank); });
|
||||||
return posDims;
|
return posDims;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,10 +316,10 @@ FailureOr<Value> unsqueezeTensor(PatternRewriter &rewriter, Operation *op,
|
||||||
op, "failed to get dimension sizes of the input");
|
op, "failed to get dimension sizes of the input");
|
||||||
|
|
||||||
auto dimSizes = *dimSizesInfo;
|
auto dimSizes = *dimSizesInfo;
|
||||||
auto rank = dimSizes.size();
|
int64_t rank = dimSizes.size();
|
||||||
size_t newRank = rank + inputUnsqzDims.size();
|
int64_t newRank = rank + inputUnsqzDims.size();
|
||||||
auto unsqzDims = toPositiveDims(inputUnsqzDims, newRank);
|
auto unsqzDims = toPositiveDims(inputUnsqzDims, newRank);
|
||||||
for (size_t k = 0, sz = unsqzDims.size(); k < sz; ++k)
|
for (int64_t k = 0, sz = unsqzDims.size(); k < sz; ++k)
|
||||||
if (k > 1 && unsqzDims[k] <= unsqzDims[k - 1])
|
if (k > 1 && unsqzDims[k] <= unsqzDims[k - 1])
|
||||||
return rewriter.notifyMatchFailure(
|
return rewriter.notifyMatchFailure(
|
||||||
op, "unsqueeze dimensions must be specified in order");
|
op, "unsqueeze dimensions must be specified in order");
|
||||||
|
@ -335,8 +335,8 @@ FailureOr<Value> unsqueezeTensor(PatternRewriter &rewriter, Operation *op,
|
||||||
std::vector<int64_t> newShape;
|
std::vector<int64_t> newShape;
|
||||||
newDimSizes.reserve(newRank);
|
newDimSizes.reserve(newRank);
|
||||||
newShape.reserve(newRank);
|
newShape.reserve(newRank);
|
||||||
for (size_t k = 0, i = 0, j = 0; k < newRank; ++k) {
|
for (int64_t k = 0, i = 0, j = 0; k < newRank; ++k) {
|
||||||
if (j < unsqzDims.size() && unsqzDims[j] == k) {
|
if (j < static_cast<int64_t>(unsqzDims.size()) && unsqzDims[j] == k) {
|
||||||
newDimSizes.push_back(one);
|
newDimSizes.push_back(one);
|
||||||
newShape.push_back(1);
|
newShape.push_back(1);
|
||||||
j++;
|
j++;
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
from torch_mlir_e2e_test.test_suite import COMMON_TORCH_MLIR_LOWERING_XFAILS
|
from torch_mlir_e2e_test.test_suite import COMMON_TORCH_MLIR_LOWERING_XFAILS
|
||||||
from torch_mlir._version import torch_version_for_comparison, version
|
from torch_mlir._version import torch_version_for_comparison, version
|
||||||
|
|
||||||
|
print(f"TORCH_VERSION_FOR_COMPARISON =", torch_version_for_comparison())
|
||||||
|
|
||||||
LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | {
|
LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | {
|
||||||
# Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR failed
|
# Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR failed
|
||||||
# 'linalg.depthwise_conv_2d_nchw_chw' op inferred input/output operand #1 has shape's dimension #0 to be 4, but found 8
|
# 'linalg.depthwise_conv_2d_nchw_chw' op inferred input/output operand #1 has shape's dimension #0 to be 4, but found 8
|
||||||
|
@ -21,6 +23,14 @@ LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | {
|
||||||
"IscloseStaticModuleTrue_basic"
|
"IscloseStaticModuleTrue_basic"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"):
|
||||||
|
LINALG_XFAIL_SET |= {
|
||||||
|
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
|
||||||
|
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
|
||||||
|
"ConvolutionModule2DGroups_basic",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
TORCHDYNAMO_XFAIL_SET = {
|
TORCHDYNAMO_XFAIL_SET = {
|
||||||
#### General TorchDynamo/PyTorch errors
|
#### General TorchDynamo/PyTorch errors
|
||||||
|
|
||||||
|
@ -306,10 +316,11 @@ TORCHDYNAMO_XFAIL_SET = {
|
||||||
"ArangeStartOutViewModule_basic",
|
"ArangeStartOutViewModule_basic",
|
||||||
}
|
}
|
||||||
|
|
||||||
if torch_version_for_comparison() < version.parse("2.1.0.dev"):
|
if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"):
|
||||||
TORCHDYNAMO_XFAIL_SET -= {
|
TORCHDYNAMO_XFAIL_SET |= {
|
||||||
"ScaledDotProductAttentionSameModule_basic",
|
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
|
||||||
"ScaledDotProductAttentionDifferentModule_basic",
|
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
|
||||||
|
"ConvolutionModule2DGroups_basic",
|
||||||
}
|
}
|
||||||
|
|
||||||
TORCHDYNAMO_CRASHING_SET = {
|
TORCHDYNAMO_CRASHING_SET = {
|
||||||
|
@ -1305,6 +1316,10 @@ TOSA_PASS_SET = {
|
||||||
"MeanModule_basic",
|
"MeanModule_basic",
|
||||||
"ArangeStartOutModule_basic",
|
"ArangeStartOutModule_basic",
|
||||||
"ArangeStartOutViewModule_basic",
|
"ArangeStartOutViewModule_basic",
|
||||||
|
"Conv2dBiasNoPaddingModule_basic",
|
||||||
|
"Conv2dNoPaddingModule_basic",
|
||||||
|
"Conv2dWithPaddingDilationStrideModule_basic",
|
||||||
|
"Conv2dWithPaddingModule_basic",
|
||||||
}
|
}
|
||||||
|
|
||||||
MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | {
|
MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | {
|
||||||
|
@ -1335,21 +1350,13 @@ MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | {
|
||||||
# failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal
|
# failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal
|
||||||
"AtenEyeModuleInt2D_basic",
|
"AtenEyeModuleInt2D_basic",
|
||||||
"AtenEyeMModuleInt2D_basic",
|
"AtenEyeMModuleInt2D_basic",
|
||||||
|
|
||||||
|
"Conv2dBiasNoPaddingModule_basic",
|
||||||
|
"Conv2dNoPaddingModule_basic",
|
||||||
|
"Conv2dWithPaddingDilationStrideModule_basic",
|
||||||
|
"Conv2dWithPaddingModule_basic",
|
||||||
}
|
}
|
||||||
|
|
||||||
if torch_version_for_comparison() < version.parse("2.1.0.dev"):
|
|
||||||
MAKE_FX_TOSA_PASS_SET -= {
|
|
||||||
# 'tensor.expand_shape' op expected rank expansion, but found source rank 1 >= result rank 1
|
|
||||||
"ReshapeCollapseModule_basic",
|
|
||||||
|
|
||||||
# failed to lower torch.aten.empty.memory_format
|
|
||||||
"BatchNorm1DModule_basic",
|
|
||||||
"BatchNorm1DWith2DInputModule_basic",
|
|
||||||
"BatchNorm2DModule_basic",
|
|
||||||
"BatchNorm3DModule_basic",
|
|
||||||
"BatchNorm1DStaticShapeModule_basic",
|
|
||||||
}
|
|
||||||
|
|
||||||
LTC_CRASHING_SET = {
|
LTC_CRASHING_SET = {
|
||||||
# TODO: update test to move all inputs to the lazy device. Otherwise test fails with:
|
# TODO: update test to move all inputs to the lazy device. Otherwise test fails with:
|
||||||
# Check failed: lazy_tensor Input tensor is not a lazy tensor: CPUBoolType.
|
# Check failed: lazy_tensor Input tensor is not a lazy tensor: CPUBoolType.
|
||||||
|
|
Loading…
Reference in New Issue