2022-08-04 10:10:54 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
#include "torch-mlir/Conversion/TorchToStablehlo/TorchToStablehlo.h"
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
#include "../PassDetail.h"
|
2023-02-02 21:29:47 +08:00
|
|
|
#include "PopulatePatterns.h"
|
|
|
|
|
2022-10-05 21:28:06 +08:00
|
|
|
#include "mlir/Dialect/Arith/IR/Arith.h"
|
2022-08-04 10:10:54 +08:00
|
|
|
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
2022-08-31 03:44:00 +08:00
|
|
|
#include "stablehlo/dialect/ChloOps.h"
|
2023-02-02 21:29:47 +08:00
|
|
|
#include "stablehlo/dialect/StablehloOps.h"
|
2023-04-14 02:24:39 +08:00
|
|
|
#include "torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h"
|
2022-08-04 10:10:54 +08:00
|
|
|
#include "torch-mlir/Conversion/Utils/Utils.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
|
|
|
#include "torch-mlir/Dialect/TorchConversion/IR/TorchConversionOps.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::torch;
|
|
|
|
using namespace mlir::torch::Torch;
|
2023-02-02 21:29:47 +08:00
|
|
|
using namespace mlir::torch::torch_to_stablehlo;
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
Value getBroadcastTensor(PatternRewriter &rewriter, Operation *op, Value tensor,
|
|
|
|
ArrayRef<int64_t> shape, ArrayRef<Value> dimSizes,
|
|
|
|
ArrayRef<int64_t> broadcastDims) {
|
|
|
|
auto tensorTy = tensor.getType().dyn_cast<RankedTensorType>();
|
|
|
|
auto loc = op->getLoc();
|
2023-02-02 21:29:47 +08:00
|
|
|
Value stablehloShape = rewriter.create<tensor::FromElementsOp>(loc, dimSizes);
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
RankedTensorType outTy =
|
|
|
|
RankedTensorType::get(shape, tensorTy.getElementType());
|
|
|
|
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
auto broadcastAttr = rewriter.getDenseI64ArrayAttr(broadcastDims);
|
2022-08-04 10:10:54 +08:00
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
auto broadcast = rewriter.create<stablehlo::DynamicBroadcastInDimOp>(
|
|
|
|
loc, outTy, tensor, stablehloShape, broadcastAttr);
|
2022-08-04 10:10:54 +08:00
|
|
|
return broadcast;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value getPermutedTensor(PatternRewriter &rewriter, Operation *op, Value input,
|
|
|
|
ArrayRef<int64_t> inpTransDims) {
|
|
|
|
auto inputTy = input.getType().dyn_cast<RankedTensorType>();
|
|
|
|
auto rank = inputTy.getRank();
|
2023-02-02 21:29:47 +08:00
|
|
|
auto transDims = hlo::toPositiveDims(inpTransDims, rank);
|
2022-08-04 10:10:54 +08:00
|
|
|
auto inpShape = inputTy.getShape();
|
|
|
|
std::vector<int64_t> newShape;
|
|
|
|
newShape.reserve(rank);
|
|
|
|
|
|
|
|
for (auto d : transDims) {
|
|
|
|
newShape.push_back(inpShape[d]);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto outTy = RankedTensorType::get(newShape, inputTy.getElementType());
|
2023-02-02 21:29:47 +08:00
|
|
|
auto result = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), outTy,
|
2023-12-08 15:13:42 +08:00
|
|
|
input, transDims);
|
2022-08-04 10:10:54 +08:00
|
|
|
return result.getResult();
|
|
|
|
}
|
|
|
|
|
2022-09-23 20:50:29 +08:00
|
|
|
RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op,
|
|
|
|
Value &lhs, Value &rhs,
|
|
|
|
int64_t lhsResultDim, int64_t rhsResultDim,
|
|
|
|
int64_t lhsContractingDim,
|
|
|
|
int64_t rhsContractingDim) {
|
|
|
|
auto lhsTy = lhs.getType().dyn_cast<RankedTensorType>();
|
|
|
|
auto rhsTy = rhs.getType().dyn_cast<RankedTensorType>();
|
|
|
|
|
|
|
|
auto oldLhsShape = lhsTy.getShape();
|
|
|
|
auto oldRhsShape = rhsTy.getShape();
|
|
|
|
SmallVector<int64_t> lhsShape;
|
|
|
|
SmallVector<int64_t> rhsShape;
|
|
|
|
lhsShape.append(oldLhsShape.begin(), oldLhsShape.end());
|
|
|
|
rhsShape.append(oldRhsShape.begin(), oldRhsShape.end());
|
|
|
|
auto lhsContractingDimSize = lhsShape[lhsContractingDim];
|
|
|
|
auto rhsContractingDimSize = rhsShape[rhsContractingDim];
|
|
|
|
if (lhsContractingDimSize != rhsContractingDimSize) {
|
2022-12-02 12:38:28 +08:00
|
|
|
if (lhsContractingDimSize == ShapedType::kDynamic &&
|
2022-09-23 20:50:29 +08:00
|
|
|
rhsContractingDimSize >= 0) {
|
|
|
|
lhsShape[lhsContractingDim] = rhsContractingDimSize;
|
|
|
|
auto newRankTy = RankedTensorType::get(lhsShape, lhsTy.getElementType());
|
|
|
|
lhs = rewriter.create<tensor::CastOp>(op->getLoc(), newRankTy, lhs);
|
2022-12-02 12:38:28 +08:00
|
|
|
} else if (rhsContractingDimSize == ShapedType::kDynamic &&
|
2022-09-23 20:50:29 +08:00
|
|
|
lhsContractingDimSize >= 0) {
|
|
|
|
rhsShape[rhsContractingDim] = lhsContractingDimSize;
|
|
|
|
auto newRankTy = RankedTensorType::get(rhsShape, rhsTy.getElementType());
|
|
|
|
rhs = rewriter.create<tensor::CastOp>(op->getLoc(), newRankTy, rhs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SmallVector<int64_t> outShape;
|
|
|
|
// set batch dims, will skip invalid dimensions
|
2022-09-27 00:44:54 +08:00
|
|
|
for (int64_t k = 0; k < static_cast<int64_t>(lhsShape.size()); ++k) {
|
2022-09-23 20:50:29 +08:00
|
|
|
if (k == lhsResultDim || k == lhsContractingDim)
|
|
|
|
continue;
|
|
|
|
outShape.push_back(lhsShape[k]);
|
|
|
|
}
|
2022-09-27 00:44:54 +08:00
|
|
|
for (int64_t k = 0, b = 0; k < static_cast<int64_t>(rhsShape.size()); ++k) {
|
|
|
|
if (b >= static_cast<int64_t>(outShape.size()))
|
2022-09-23 20:50:29 +08:00
|
|
|
break;
|
|
|
|
if (k == rhsResultDim || k == rhsContractingDim)
|
|
|
|
continue;
|
2022-12-02 12:38:28 +08:00
|
|
|
if (outShape[b] == ShapedType::kDynamic && rhsShape[k] >= 0) {
|
2022-09-23 20:50:29 +08:00
|
|
|
outShape[b] = rhsShape[k];
|
|
|
|
}
|
|
|
|
b++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set result dimensions
|
2023-02-02 21:29:47 +08:00
|
|
|
if (lhsResultDim < static_cast<int64_t>(lhsShape.size()) &&
|
|
|
|
lhsResultDim >= 0) {
|
2022-09-23 20:50:29 +08:00
|
|
|
outShape.push_back(lhsShape[lhsResultDim]);
|
|
|
|
}
|
2023-02-02 21:29:47 +08:00
|
|
|
if (rhsResultDim < static_cast<int64_t>(rhsShape.size()) &&
|
|
|
|
rhsResultDim >= 0) {
|
2022-09-23 20:50:29 +08:00
|
|
|
outShape.push_back(rhsShape[rhsResultDim]);
|
|
|
|
}
|
|
|
|
return RankedTensorType::get(outShape, lhsTy.getElementType());
|
|
|
|
}
|
|
|
|
|
2022-08-04 10:10:54 +08:00
|
|
|
void getBmmBroadcast(PatternRewriter &rewriter, Operation *op, Value &inpLhs,
|
2022-09-01 10:36:02 +08:00
|
|
|
Value &inpRhs, int64_t leadingRank,
|
|
|
|
size_t dimSizeIndexBits) {
|
2022-08-04 10:10:54 +08:00
|
|
|
Value lhs = inpLhs;
|
|
|
|
Value rhs = inpRhs;
|
|
|
|
auto lhsRankTy = inpLhs.getType().dyn_cast<RankedTensorType>();
|
|
|
|
auto rhsRankTy = inpRhs.getType().dyn_cast<RankedTensorType>();
|
|
|
|
|
|
|
|
auto lhsRank = lhsRankTy.getRank();
|
|
|
|
auto rhsRank = rhsRankTy.getRank();
|
[Stablehlo] Enhance broadcast pattern in matmul Ops (#3161)
To pass test "MatmulStaticBroadcast_basic" in stablehlo:
```python
class MatmulStaticBroadcast(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([4, 1, 6, 7], torch.float32, True),
([8, 1, 5, 7, 6], torch.float32, True),
])
def forward(self, lhs, rhs):
return torch.matmul(lhs, rhs)
@register_test_case(module_factory=lambda: MatmulStaticBroadcast())
def MatmulStaticBroadcast_basic(module, tu: TestUtils):
module.forward(tu.rand(4, 1, 6, 7), tu.rand(8, 1, 5, 7, 6))
```
2024-04-16 10:10:36 +08:00
|
|
|
int64_t nBatchDims = std::max(lhsRank - 2, rhsRank - 2);
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
// The non-matrix (i.e. batch) dimensions are broadcasted (and thus must be
|
|
|
|
// broadcastable).
|
|
|
|
auto minRank = std::min(lhsRank, rhsRank);
|
|
|
|
auto leadingDims = llvm::to_vector<4>(llvm::seq<int64_t>(0, leadingRank));
|
|
|
|
auto broadcastDims = llvm::to_vector<4>(
|
|
|
|
llvm::seq<int64_t>(leadingRank, minRank + leadingRank));
|
|
|
|
auto lhsShape = lhsRankTy.getShape();
|
|
|
|
auto rhsShape = rhsRankTy.getShape();
|
[Stablehlo] Enhance broadcast pattern in matmul Ops (#3161)
To pass test "MatmulStaticBroadcast_basic" in stablehlo:
```python
class MatmulStaticBroadcast(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([4, 1, 6, 7], torch.float32, True),
([8, 1, 5, 7, 6], torch.float32, True),
])
def forward(self, lhs, rhs):
return torch.matmul(lhs, rhs)
@register_test_case(module_factory=lambda: MatmulStaticBroadcast())
def MatmulStaticBroadcast_basic(module, tu: TestUtils):
module.forward(tu.rand(4, 1, 6, 7), tu.rand(8, 1, 5, 7, 6))
```
2024-04-16 10:10:36 +08:00
|
|
|
|
2022-08-04 10:10:54 +08:00
|
|
|
if (lhsRank < rhsRank) {
|
|
|
|
std::vector<int64_t> newShape(rhsShape.begin(),
|
|
|
|
rhsShape.begin() + leadingRank);
|
|
|
|
newShape.insert(newShape.end(), lhsShape.begin(), lhsShape.end());
|
2023-02-02 21:29:47 +08:00
|
|
|
auto newDimSizes = *hlo::getDimSizesOfTensor(rewriter, op, rhs, leadingDims,
|
|
|
|
dimSizeIndexBits);
|
2022-09-01 10:36:02 +08:00
|
|
|
auto lhsDimSizes =
|
2023-02-02 21:29:47 +08:00
|
|
|
*hlo::getDimSizesOfTensor(rewriter, op, lhs, dimSizeIndexBits);
|
2022-08-04 10:10:54 +08:00
|
|
|
newDimSizes.insert(newDimSizes.end(), lhsDimSizes.begin(),
|
|
|
|
lhsDimSizes.end());
|
|
|
|
lhs = getBroadcastTensor(rewriter, op, lhs, newShape, newDimSizes,
|
|
|
|
broadcastDims);
|
|
|
|
} else {
|
|
|
|
std::vector<int64_t> newShape(lhsShape.begin(),
|
|
|
|
lhsShape.begin() + leadingRank);
|
|
|
|
newShape.insert(newShape.end(), rhsShape.begin(), rhsShape.end());
|
2023-02-02 21:29:47 +08:00
|
|
|
auto newDimSizes = *hlo::getDimSizesOfTensor(rewriter, op, lhs, leadingDims,
|
|
|
|
dimSizeIndexBits);
|
2022-09-01 10:36:02 +08:00
|
|
|
auto rhsDimSizes =
|
2023-02-02 21:29:47 +08:00
|
|
|
*hlo::getDimSizesOfTensor(rewriter, op, rhs, dimSizeIndexBits);
|
2022-08-04 10:10:54 +08:00
|
|
|
newDimSizes.insert(newDimSizes.end(), rhsDimSizes.begin(),
|
|
|
|
rhsDimSizes.end());
|
|
|
|
rhs = getBroadcastTensor(rewriter, op, rhs, newShape, newDimSizes,
|
|
|
|
broadcastDims);
|
|
|
|
}
|
|
|
|
|
[Stablehlo] Enhance broadcast pattern in matmul Ops (#3161)
To pass test "MatmulStaticBroadcast_basic" in stablehlo:
```python
class MatmulStaticBroadcast(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([4, 1, 6, 7], torch.float32, True),
([8, 1, 5, 7, 6], torch.float32, True),
])
def forward(self, lhs, rhs):
return torch.matmul(lhs, rhs)
@register_test_case(module_factory=lambda: MatmulStaticBroadcast())
def MatmulStaticBroadcast_basic(module, tu: TestUtils):
module.forward(tu.rand(4, 1, 6, 7), tu.rand(8, 1, 5, 7, 6))
```
2024-04-16 10:10:36 +08:00
|
|
|
if (lhsRank <= 2 || rhsRank <= 2) {
|
|
|
|
inpLhs = lhs;
|
|
|
|
inpRhs = rhs;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
lhsShape = lhs.getType().cast<RankedTensorType>().getShape();
|
|
|
|
rhsShape = rhs.getType().cast<RankedTensorType>().getShape();
|
|
|
|
|
|
|
|
// check shape compatibility, check if we should broadcast
|
|
|
|
// first, we should got a new batch shape. Check from (0, nBatchDims)
|
|
|
|
SmallVector<int64_t> lhsBroadcastDims;
|
|
|
|
SmallVector<int64_t> rhsBroadcastDims;
|
|
|
|
SmallVector<int64_t> newBatchShape;
|
|
|
|
|
|
|
|
for (int64_t i = 0; i < nBatchDims; i++) {
|
|
|
|
if (lhsShape[i] != rhsShape[i]) {
|
|
|
|
if (lhsShape[i] == 1) {
|
|
|
|
lhsBroadcastDims.push_back(i);
|
|
|
|
newBatchShape.push_back(rhsShape[i]);
|
|
|
|
} else if (rhsShape[i] == 1) {
|
|
|
|
rhsBroadcastDims.push_back(i);
|
|
|
|
newBatchShape.push_back(lhsShape[i]);
|
|
|
|
} else {
|
|
|
|
assert(false && "shape mismatch in matmul op");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
newBatchShape.push_back(lhsShape[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lhsBroadcastDims.empty() && rhsBroadcastDims.empty()) {
|
|
|
|
inpLhs = lhs;
|
|
|
|
inpRhs = rhs;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto lhsDimSizes =
|
|
|
|
*hlo::getDimSizesOfTensor(rewriter, op, lhs, dimSizeIndexBits);
|
|
|
|
auto rhsDimSizes =
|
|
|
|
*hlo::getDimSizesOfTensor(rewriter, op, rhs, dimSizeIndexBits);
|
|
|
|
|
|
|
|
if (!lhsBroadcastDims.empty()) {
|
|
|
|
SmallVector<int64_t> lhsNewShape(newBatchShape);
|
|
|
|
lhsNewShape.insert(lhsNewShape.end(), lhsShape.begin() + nBatchDims,
|
|
|
|
lhsShape.end());
|
|
|
|
for (auto i : lhsBroadcastDims) {
|
|
|
|
lhsDimSizes[i] = rhsDimSizes[i];
|
|
|
|
}
|
|
|
|
broadcastDims =
|
|
|
|
llvm::to_vector<4>(llvm::seq<int64_t>(0, lhsNewShape.size()));
|
|
|
|
lhs = getBroadcastTensor(rewriter, op, lhs, lhsNewShape, lhsDimSizes,
|
|
|
|
broadcastDims);
|
|
|
|
}
|
|
|
|
if (!rhsBroadcastDims.empty()) {
|
|
|
|
SmallVector<int64_t> rhsNewShape(newBatchShape);
|
|
|
|
rhsNewShape.insert(rhsNewShape.end(), rhsShape.begin() + nBatchDims,
|
|
|
|
rhsShape.end());
|
|
|
|
for (auto i : rhsBroadcastDims) {
|
|
|
|
rhsDimSizes[i] = lhsDimSizes[i];
|
|
|
|
}
|
|
|
|
broadcastDims =
|
|
|
|
llvm::to_vector<4>(llvm::seq<int64_t>(0, rhsNewShape.size()));
|
|
|
|
rhs = getBroadcastTensor(rewriter, op, rhs, rhsNewShape, rhsDimSizes,
|
|
|
|
broadcastDims);
|
|
|
|
}
|
|
|
|
|
2022-08-04 10:10:54 +08:00
|
|
|
inpLhs = lhs;
|
|
|
|
inpRhs = rhs;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the basic n-dim matmul operation encompassing the handling of
|
|
|
|
// broadcasting and dynamic shape propagation.
|
|
|
|
// All PyTorch ops that leverage matrix multiplication will derive this and
|
|
|
|
// implement their specialized input processing (e.g transpose), and output
|
|
|
|
// processing, e.g. GEMM or fully connected bias handling.
|
|
|
|
template <typename AtenOpT>
|
2022-09-01 10:36:02 +08:00
|
|
|
class ConvertAtenMatmulBaseOp : public ConvertAtenOp<AtenOpT> {
|
2022-08-04 10:10:54 +08:00
|
|
|
public:
|
2022-09-01 10:36:02 +08:00
|
|
|
using ConvertAtenOp<AtenOpT>::ConvertAtenOp;
|
2022-08-04 10:10:54 +08:00
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
// Each variant must implement corresponding parameter parsing options.
|
|
|
|
// Maintain separate input read functions for each variant because it is not
|
|
|
|
// necessarily true with all variants that the first two operands are the lhs
|
|
|
|
// and rhs.
|
|
|
|
virtual LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op,
|
|
|
|
"unimplemented matrix multiplication variant input parsing function");
|
|
|
|
}
|
|
|
|
LogicalResult performMatmul(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter, Value &lhs,
|
|
|
|
Value &rhs, Value &output) const {
|
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
auto lhsRank = lhsTy.getRank();
|
|
|
|
auto rhsRank = rhsTy.getRank();
|
|
|
|
auto lhsElemTy = lhsTy.getElementType();
|
|
|
|
auto rhsElemTy = rhsTy.getElementType();
|
|
|
|
|
|
|
|
if (lhsElemTy != rhsElemTy)
|
|
|
|
return op.emitError("matmul: input datatypes mismatched");
|
|
|
|
if (lhsRank < 1 || rhsRank < 1) {
|
|
|
|
return op.emitError("matmul: inputs can't be 0-rank");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lhsRank <= 2 && rhsRank <= 2) {
|
2023-01-11 07:07:19 +08:00
|
|
|
auto tensorType =
|
|
|
|
ConvertAtenOp<AtenOpT>::getTypeConverter()->convertType(op.getType());
|
2023-02-02 21:29:47 +08:00
|
|
|
output = rewriter.create<stablehlo::DotOp>(op->getLoc(), tensorType, lhs,
|
|
|
|
rhs, nullptr);
|
2022-08-04 10:10:54 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-09-01 10:36:02 +08:00
|
|
|
const auto &options = ConvertAtenOp<AtenOpT>::getOptions();
|
2022-08-04 10:10:54 +08:00
|
|
|
int64_t nBatchDims;
|
|
|
|
if (rhsRank <= 2) {
|
|
|
|
auto leadingRank = lhsRank - 2;
|
2022-09-01 10:36:02 +08:00
|
|
|
getBmmBroadcast(rewriter, op, lhs, rhs, leadingRank,
|
|
|
|
options.dimSizeIndexBits);
|
2022-08-04 10:10:54 +08:00
|
|
|
nBatchDims = leadingRank;
|
|
|
|
} else if (lhsRank <= 2) {
|
|
|
|
auto leadingRank = rhsRank - 2;
|
2022-09-01 10:36:02 +08:00
|
|
|
getBmmBroadcast(rewriter, op, lhs, rhs, leadingRank,
|
|
|
|
options.dimSizeIndexBits);
|
2022-08-04 10:10:54 +08:00
|
|
|
nBatchDims = leadingRank;
|
|
|
|
} else {
|
|
|
|
assert(rhsRank > 2 && lhsRank > 2);
|
|
|
|
auto leadingRank = std::max(lhsRank - rhsRank, rhsRank - lhsRank);
|
|
|
|
nBatchDims = std::max(lhsRank - 2, rhsRank - 2);
|
2022-09-01 10:36:02 +08:00
|
|
|
getBmmBroadcast(rewriter, op, lhs, rhs, leadingRank,
|
|
|
|
options.dimSizeIndexBits);
|
2022-08-04 10:10:54 +08:00
|
|
|
}
|
|
|
|
auto batchDims = llvm::to_vector<4>(llvm::seq<int64_t>(0, nBatchDims));
|
2022-09-23 20:50:29 +08:00
|
|
|
|
|
|
|
auto lhsResultDim = nBatchDims;
|
|
|
|
auto rhsResultDim = nBatchDims + 1;
|
2022-08-04 10:10:54 +08:00
|
|
|
auto lhsContractingDim = nBatchDims + 1;
|
|
|
|
auto rhsContractingDim = nBatchDims;
|
2022-09-23 20:50:29 +08:00
|
|
|
if (lhsRank == 1) {
|
|
|
|
lhsResultDim = nBatchDims + 1;
|
2022-08-04 10:10:54 +08:00
|
|
|
lhsContractingDim = nBatchDims;
|
2022-09-23 20:50:29 +08:00
|
|
|
}
|
2022-08-04 10:10:54 +08:00
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehlo::DotDimensionNumbersAttr dotDimensionNumbers =
|
|
|
|
stablehlo::DotDimensionNumbersAttr::get(
|
2022-08-04 10:10:54 +08:00
|
|
|
rewriter.getContext(),
|
|
|
|
/*lhsBatchingDimensions=*/batchDims,
|
|
|
|
/*rhsBatchingDimensions=*/batchDims,
|
|
|
|
/*lhsContractingDimensions=*/{lhsContractingDim},
|
|
|
|
/*rhsContractingDimensions=*/{rhsContractingDim});
|
2022-09-23 20:50:29 +08:00
|
|
|
auto outTy =
|
|
|
|
castContractingDim(rewriter, op, lhs, rhs, lhsResultDim, rhsResultDim,
|
|
|
|
lhsContractingDim, rhsContractingDim);
|
2022-08-04 10:10:54 +08:00
|
|
|
output = rewriter
|
2023-02-02 21:29:47 +08:00
|
|
|
.create<stablehlo::DotGeneralOp>(op->getLoc(), outTy, lhs, rhs,
|
|
|
|
dotDimensionNumbers, nullptr)
|
2022-08-04 10:10:54 +08:00
|
|
|
.getResult();
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// The default version just reads two inputs, computes output and returns it.
|
|
|
|
// Other versions may add a bias, apply GEMM-style alpha/beta scaling etc.
|
|
|
|
virtual LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value lhs, rhs;
|
|
|
|
if (failed(readMatMulInputs(op, adaptor, rewriter, lhs, rhs)))
|
|
|
|
return op.emitError("failed to read matmul inputs");
|
|
|
|
|
|
|
|
Value output;
|
|
|
|
|
|
|
|
if (failed(performMatmul(op, adaptor, rewriter, lhs, rhs, output)))
|
|
|
|
return op.emitError("failed to perform matmul operation");
|
|
|
|
|
2022-09-23 20:50:29 +08:00
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(
|
2022-08-04 10:10:54 +08:00
|
|
|
op,
|
2022-09-01 10:36:02 +08:00
|
|
|
ConvertAtenOp<AtenOpT>::getTypeConverter()
|
2022-08-04 10:10:54 +08:00
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<RankedTensorType>(),
|
|
|
|
output);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Legalizes the torch.matmul op for general n-dim matmul.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenMatMulOp : public ConvertAtenMatmulBaseOp<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenMatmulBaseOp<AtenOpT>::ConvertAtenMatmulBaseOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const override {
|
2022-12-08 04:20:41 +08:00
|
|
|
lhs = adaptor.getSelf();
|
2022-08-04 10:10:54 +08:00
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
|
2022-12-08 04:20:41 +08:00
|
|
|
rhs = adaptor.getOther();
|
2022-08-04 10:10:54 +08:00
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy || !rhsTy)
|
|
|
|
return op.emitError(
|
2023-02-02 21:29:47 +08:00
|
|
|
"only ranked tensor types are supported in StableHLO matmul");
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Implements handling of aten.mm and aten.bmm ops.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenMmOp : public ConvertAtenMatmulBaseOp<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenMatmulBaseOp<AtenOpT>::ConvertAtenMatmulBaseOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const override {
|
2022-12-08 04:20:41 +08:00
|
|
|
lhs = adaptor.getSelf();
|
2022-08-04 10:10:54 +08:00
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
|
2022-12-08 04:20:41 +08:00
|
|
|
rhs = adaptor.getMat2();
|
2022-08-04 10:10:54 +08:00
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy || !rhsTy)
|
|
|
|
return op.emitError(
|
2023-02-02 21:29:47 +08:00
|
|
|
"only ranked tensor types are supported in StableHLO matmul");
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
auto lhsRank = lhsTy.getRank();
|
|
|
|
auto rhsRank = rhsTy.getRank();
|
|
|
|
|
|
|
|
if (isa<AtenMmOp>(op)) {
|
|
|
|
// Mm takes two 2D tensors.
|
|
|
|
if (lhsRank != 2 || rhsRank != 2)
|
|
|
|
return op.emitError("aten.mm called but matrix rank != 2");
|
|
|
|
} else if (isa<AtenBmmOp>(op)) {
|
|
|
|
// Bmm takes two 3D tensors.
|
|
|
|
if (lhsRank != 3 || rhsRank != 3)
|
|
|
|
return op.emitError("aten.bmm called but matrix rank != 3");
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Implements handling of aten.linear op.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenLinearOp : public ConvertAtenMatmulBaseOp<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenMatmulBaseOp<AtenOpT>::ConvertAtenMatmulBaseOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const override {
|
2022-12-08 04:20:41 +08:00
|
|
|
lhs = adaptor.getInput();
|
2022-08-04 10:10:54 +08:00
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
|
2022-12-08 04:20:41 +08:00
|
|
|
rhs = adaptor.getWeight();
|
2022-08-04 10:10:54 +08:00
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy || !rhsTy)
|
|
|
|
return op.emitError(
|
2023-02-02 21:29:47 +08:00
|
|
|
"only ranked tensor types are supported in StableHLO matmul");
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
auto lhsRank = lhsTy.getRank();
|
|
|
|
auto rhsRank = rhsTy.getRank();
|
|
|
|
|
2022-12-22 13:24:07 +08:00
|
|
|
if (lhsRank != 2 && lhsRank != 3)
|
|
|
|
return op.emitError("aten.Linear called but input rank not 2 or 3");
|
|
|
|
if (rhsRank != 2 && rhsRank != 3)
|
|
|
|
return op.emitError("aten.Linear called but weight rank not 2 or 3");
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
// Override the default rewriter to perform RHS transpose and bias addition
|
|
|
|
// as well.
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value lhs, rhs;
|
|
|
|
|
|
|
|
if (failed(readMatMulInputs(op, adaptor, rewriter, lhs, rhs)))
|
|
|
|
return op.emitError("failed to read matmul inputs");
|
|
|
|
|
|
|
|
// The aten.Linear op has a bias tensor that is added to the matmul
|
|
|
|
// output.
|
2022-12-08 04:20:41 +08:00
|
|
|
auto bias = adaptor.getBias();
|
2022-08-04 10:10:54 +08:00
|
|
|
auto biasTy = bias.getType();
|
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
// StableHLO does not mandate that elementwise op tensors need to be ranked.
|
2022-08-04 10:10:54 +08:00
|
|
|
if (!biasTy.template isa<Torch::NoneType>() &&
|
|
|
|
!biasTy.template isa<RankedTensorType>())
|
2023-02-02 21:29:47 +08:00
|
|
|
return op.emitError("only ranked tensor types are supported in StableHLO "
|
2022-08-04 10:10:54 +08:00
|
|
|
"matmul for bias tensor");
|
|
|
|
|
|
|
|
// weight.T
|
|
|
|
rhs = getPermutedTensor(rewriter, op, rhs, {1, 0});
|
|
|
|
|
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
2022-12-22 13:24:07 +08:00
|
|
|
auto leadingRank = std::max(lhsTy.getRank() - rhsTy.getRank(),
|
|
|
|
rhsTy.getRank() - lhsTy.getRank());
|
|
|
|
|
|
|
|
const auto &options = ConvertAtenOp<AtenOpT>::getOptions();
|
|
|
|
getBmmBroadcast(rewriter, op, lhs, rhs, leadingRank,
|
|
|
|
options.dimSizeIndexBits);
|
|
|
|
auto resultRank = std::max(lhsTy.getRank(), rhsTy.getRank());
|
|
|
|
auto nBatchDims = resultRank - 2;
|
|
|
|
auto batchDims = llvm::to_vector<4>(llvm::seq<int64_t>(0, nBatchDims));
|
|
|
|
|
|
|
|
auto lhsResultDim = nBatchDims;
|
|
|
|
auto rhsResultDim = nBatchDims + 1;
|
|
|
|
auto lhsContractingDim = nBatchDims + 1;
|
|
|
|
auto rhsContractingDim = nBatchDims;
|
2022-08-04 10:10:54 +08:00
|
|
|
|
2022-09-23 20:50:29 +08:00
|
|
|
auto outTy =
|
2022-12-22 13:24:07 +08:00
|
|
|
castContractingDim(rewriter, op, lhs, rhs, lhsResultDim, rhsResultDim,
|
|
|
|
lhsContractingDim, rhsContractingDim);
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehlo::DotDimensionNumbersAttr dotDimensionNumbers =
|
|
|
|
stablehlo::DotDimensionNumbersAttr::get(
|
2022-12-22 13:24:07 +08:00
|
|
|
rewriter.getContext(),
|
|
|
|
/*lhsBatchingDimensions=*/batchDims,
|
|
|
|
/*rhsBatchingDimensions=*/batchDims,
|
|
|
|
/*lhsContractingDimensions=*/{lhsContractingDim},
|
|
|
|
/*rhsContractingDimensions=*/{rhsContractingDim});
|
2023-02-02 21:29:47 +08:00
|
|
|
Value matmulOutput = rewriter.create<stablehlo::DotGeneralOp>(
|
2022-12-22 13:24:07 +08:00
|
|
|
op->getLoc(), outTy, lhs, rhs, dotDimensionNumbers, nullptr);
|
2022-08-04 10:10:54 +08:00
|
|
|
|
|
|
|
Value matmulPlusBias = matmulOutput;
|
|
|
|
if (!biasTy.template isa<Torch::NoneType>()) {
|
|
|
|
// Bias addition broadcasts to the matmul output shape.
|
2022-09-23 20:50:29 +08:00
|
|
|
matmulPlusBias = rewriter
|
|
|
|
.create<chlo::BroadcastAddOp>(
|
|
|
|
op->getLoc(), outTy, matmulOutput, bias, nullptr)
|
|
|
|
.getResult();
|
2022-08-04 10:10:54 +08:00
|
|
|
}
|
|
|
|
|
2022-12-22 13:24:07 +08:00
|
|
|
auto resultTy =
|
|
|
|
ConvertAtenOp<AtenOpT>::getTypeConverter()->convertType(op.getType());
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultTy, matmulPlusBias);
|
2022-08-04 10:10:54 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-09-01 10:36:02 +08:00
|
|
|
class ConvertAtenConvolutionOp : public ConvertAtenOp<AtenConvolutionOp> {
|
2022-08-04 15:41:35 +08:00
|
|
|
public:
|
2022-09-01 10:36:02 +08:00
|
|
|
using ConvertAtenOp<AtenConvolutionOp>::ConvertAtenOp;
|
2022-08-04 15:41:35 +08:00
|
|
|
using OpAdaptor = typename AtenConvolutionOp::Adaptor;
|
|
|
|
|
2022-08-09 09:50:07 +08:00
|
|
|
Value reshapeConvWeight(PatternRewriter &rewriter, Operation *op,
|
|
|
|
Value weight, int64_t groups) const {
|
|
|
|
auto weightTy = weight.getType().cast<RankedTensorType>();
|
|
|
|
auto weightElemTy = weightTy.getElementType();
|
|
|
|
auto rank = weightTy.getRank();
|
2022-09-01 10:36:02 +08:00
|
|
|
const auto &options = getOptions();
|
2023-02-02 21:29:47 +08:00
|
|
|
SmallVector<Value> weightShapeVec = *hlo::getDimSizesOfTensor(
|
2022-09-01 10:36:02 +08:00
|
|
|
rewriter, op, weight, options.dimSizeIndexBits);
|
2022-08-09 09:50:07 +08:00
|
|
|
auto weightShape = weightTy.getShape();
|
|
|
|
SmallVector<int64_t> weightShapeInt(rank);
|
|
|
|
std::copy(weightShape.begin(), weightShape.end(), weightShapeInt.begin());
|
|
|
|
|
2023-04-14 02:24:39 +08:00
|
|
|
// 1. [H, W, ..., OC, IC] => [H, W, ..., OC, G, IC//G]
|
2022-08-09 09:50:07 +08:00
|
|
|
Value GValue = rewriter.create<mlir::arith::ConstantOp>(
|
|
|
|
op->getLoc(), rewriter.getI64IntegerAttr(groups));
|
|
|
|
Value ICDivGValue = rewriter.create<mlir::arith::DivSIOp>(
|
2023-04-14 02:24:39 +08:00
|
|
|
op->getLoc(), weightShapeVec[rank - 1], GValue);
|
2022-08-09 09:50:07 +08:00
|
|
|
Value OCMulGValue = rewriter.create<mlir::arith::MulIOp>(
|
2023-04-14 02:24:39 +08:00
|
|
|
op->getLoc(), weightShapeVec[rank - 2], GValue);
|
|
|
|
weightShapeVec[rank - 1] = ICDivGValue;
|
|
|
|
weightShapeVec.insert(weightShapeVec.end() - 1, GValue);
|
2022-08-09 09:50:07 +08:00
|
|
|
|
2023-04-14 02:24:39 +08:00
|
|
|
if (weightShapeInt[rank - 1] == ShapedType::kDynamic) {
|
|
|
|
weightShapeInt.insert(weightShapeInt.end() - 1, groups);
|
2022-08-09 09:50:07 +08:00
|
|
|
} else {
|
2023-04-14 02:24:39 +08:00
|
|
|
weightShapeInt[rank - 1] /= groups;
|
|
|
|
weightShapeInt.insert(weightShapeInt.end() - 1, groups);
|
2022-08-04 15:41:35 +08:00
|
|
|
}
|
2022-08-09 09:50:07 +08:00
|
|
|
Value weightShapeTensor = rewriter.create<mlir::tensor::FromElementsOp>(
|
|
|
|
op->getLoc(), weightShapeVec);
|
2023-02-02 21:29:47 +08:00
|
|
|
weight = rewriter.create<stablehlo::DynamicReshapeOp>(
|
2022-08-09 09:50:07 +08:00
|
|
|
op->getLoc(), RankedTensorType::get(weightShapeInt, weightElemTy),
|
|
|
|
weight, weightShapeTensor);
|
|
|
|
|
2023-04-14 02:24:39 +08:00
|
|
|
// 2. [H, W, ..., OC, G, IC//G] => [H, W, ..., G, OC, IC//G]
|
2022-08-09 09:50:07 +08:00
|
|
|
std::vector<int64_t> transposeDims(rank + 1);
|
|
|
|
for (int64_t i = 0; i <= rank; i++)
|
|
|
|
transposeDims[i] = i;
|
2023-04-14 02:24:39 +08:00
|
|
|
std::swap(transposeDims[rank - 1], transposeDims[rank - 2]);
|
2023-12-08 15:13:42 +08:00
|
|
|
weight = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), weight,
|
|
|
|
transposeDims);
|
2022-08-09 09:50:07 +08:00
|
|
|
|
2023-04-14 02:24:39 +08:00
|
|
|
// 3. [H, W, ..., G, OC, IC//G] => [H, W, ..., G*OC, IC//G]
|
|
|
|
weightShapeInt.erase(weightShapeInt.end() - 2);
|
|
|
|
if (weightShapeInt[weightShapeInt.size() - 2] != ShapedType::kDynamic) {
|
|
|
|
weightShapeInt[weightShapeInt.size() - 2] *= groups;
|
2022-08-04 15:41:35 +08:00
|
|
|
}
|
2023-04-14 02:24:39 +08:00
|
|
|
weightShapeVec.erase(weightShapeVec.end() - 2);
|
|
|
|
weightShapeVec[weightShapeVec.size() - 2] = OCMulGValue;
|
2022-08-09 09:50:07 +08:00
|
|
|
weightShapeTensor = rewriter.create<mlir::tensor::FromElementsOp>(
|
|
|
|
op->getLoc(), weightShapeVec);
|
2023-02-02 21:29:47 +08:00
|
|
|
weight = rewriter.create<stablehlo::DynamicReshapeOp>(
|
2022-08-09 09:50:07 +08:00
|
|
|
op->getLoc(), RankedTensorType::get(weightShapeInt, weightElemTy),
|
|
|
|
weight, weightShapeTensor);
|
|
|
|
return weight;
|
|
|
|
}
|
2022-08-04 15:41:35 +08:00
|
|
|
|
2022-08-09 09:50:07 +08:00
|
|
|
Value convertTransposedConv(AtenConvolutionOp op,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
RankedTensorType outType, Value input,
|
|
|
|
Value weight, ArrayRef<int64_t> stride,
|
|
|
|
ArrayRef<int64_t> padding,
|
|
|
|
ArrayRef<int64_t> dilation,
|
2023-04-14 02:24:39 +08:00
|
|
|
ArrayRef<int64_t> outputPadding,
|
|
|
|
int64_t groups) const {
|
2022-08-09 09:50:07 +08:00
|
|
|
auto inputTy = input.getType().cast<RankedTensorType>();
|
|
|
|
auto weightTy = weight.getType().cast<RankedTensorType>();
|
|
|
|
auto weightShape = weightTy.getShape();
|
|
|
|
|
|
|
|
auto nDims = inputTy.getRank();
|
|
|
|
auto nSpatialDims = nDims - 2;
|
|
|
|
auto convOutTy = outType;
|
|
|
|
|
2023-04-14 02:24:39 +08:00
|
|
|
// Transpose weight
|
|
|
|
SmallVector<int64_t> perm(nDims);
|
|
|
|
SmallVector<int64_t> transposeShape(nDims);
|
|
|
|
for (int i = 0; i < nDims; i++) {
|
|
|
|
if (i < 2)
|
|
|
|
perm[i] = nDims - 2 + i;
|
|
|
|
else
|
|
|
|
perm[i] = nDims - i - 1;
|
|
|
|
transposeShape[i] = weightShape[perm[i]];
|
2022-08-04 15:41:35 +08:00
|
|
|
}
|
2023-04-14 02:24:39 +08:00
|
|
|
auto transposeTy =
|
|
|
|
RankedTensorType::get(transposeShape, weightTy.getElementType());
|
|
|
|
auto transposeOp = rewriter.create<stablehlo::TransposeOp>(
|
2023-12-08 15:13:42 +08:00
|
|
|
op->getLoc(), transposeTy, weight, perm);
|
2023-04-14 02:24:39 +08:00
|
|
|
auto reverseOp = rewriter.create<stablehlo::ReverseOp>(
|
2023-12-08 15:13:42 +08:00
|
|
|
op->getLoc(), transposeOp, ArrayRef<int64_t>{0, 1});
|
2022-08-04 15:41:35 +08:00
|
|
|
|
2022-08-09 09:50:07 +08:00
|
|
|
// Prepare for transposed convolution
|
2023-02-02 21:29:47 +08:00
|
|
|
SmallVector<int64_t> stablehloStrideVec(nSpatialDims, 1);
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
auto stablehloStride = rewriter.getDenseI64ArrayAttr(stablehloStrideVec);
|
2023-02-02 21:29:47 +08:00
|
|
|
SmallVector<int64_t> stablehloPaddingVec(nSpatialDims * 2, 0);
|
2022-08-09 09:50:07 +08:00
|
|
|
for (int i = 0; i < nSpatialDims; ++i) {
|
|
|
|
int64_t padInt = dilation[i] * (weightShape[i + 2] - 1) - padding[i];
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehloPaddingVec[i * 2] = padInt;
|
2023-04-14 02:24:39 +08:00
|
|
|
stablehloPaddingVec[i * 2 + 1] =
|
|
|
|
padInt + outputPadding[outputPadding.size() - i - 1];
|
2022-08-04 15:41:35 +08:00
|
|
|
}
|
2023-02-02 21:29:47 +08:00
|
|
|
DenseIntElementsAttr stablehloPadding = DenseIntElementsAttr::get(
|
2022-08-09 09:50:07 +08:00
|
|
|
RankedTensorType::get({nSpatialDims, 2}, rewriter.getI64Type()),
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehloPaddingVec);
|
|
|
|
SmallVector<int64_t> stablehloLhsDilationVec(nSpatialDims);
|
|
|
|
std::copy(stride.begin(), stride.end(), stablehloLhsDilationVec.begin());
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
auto stablehloLhsDilation =
|
|
|
|
rewriter.getDenseI64ArrayAttr(stablehloLhsDilationVec);
|
2023-02-02 21:29:47 +08:00
|
|
|
SmallVector<int64_t> stablehloRhsDilationVec(nSpatialDims);
|
|
|
|
std::copy(dilation.begin(), dilation.end(),
|
|
|
|
stablehloRhsDilationVec.begin());
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
auto stablehloRhsDilation =
|
|
|
|
rewriter.getDenseI64ArrayAttr(stablehloRhsDilationVec);
|
2022-08-04 15:41:35 +08:00
|
|
|
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
DenseBoolArrayAttr windowReversal;
|
2022-08-09 09:50:07 +08:00
|
|
|
ArrayAttr precisionConfig;
|
|
|
|
|
|
|
|
SmallVector<int64_t> spatialDims;
|
2023-04-14 02:24:39 +08:00
|
|
|
SmallVector<int64_t> transposedSpatialDims;
|
2022-08-09 09:50:07 +08:00
|
|
|
for (int i = 0; i < nSpatialDims; ++i) {
|
|
|
|
spatialDims.push_back(i + 2);
|
2023-04-14 02:24:39 +08:00
|
|
|
transposedSpatialDims.push_back(i);
|
2022-08-04 15:41:35 +08:00
|
|
|
}
|
2023-04-14 02:24:39 +08:00
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehlo::ConvDimensionNumbersAttr dimensionNumbers =
|
|
|
|
stablehlo::ConvDimensionNumbersAttr::get(
|
2022-08-09 09:50:07 +08:00
|
|
|
/*context=*/rewriter.getContext(), /*inputBatchDimension=*/0,
|
|
|
|
/*inputFeatureDimension=*/1,
|
|
|
|
/*inputSpatialDimensions=*/spatialDims,
|
2023-04-14 02:24:39 +08:00
|
|
|
/*kernelInputFeatureDimension=*/nDims - 1,
|
|
|
|
/*kernelOutputFeatureDimension=*/nDims - 2,
|
|
|
|
/*kernelSpatialDimensions=*/transposedSpatialDims,
|
2022-08-09 09:50:07 +08:00
|
|
|
/*outputBatchDimension=*/0, /*outputFeatureDimension=*/1,
|
|
|
|
/*outputSpatialDimensions=*/spatialDims);
|
2022-08-04 15:41:35 +08:00
|
|
|
|
2023-04-14 02:24:39 +08:00
|
|
|
Value weightInput = reverseOp.getResult();
|
2022-08-09 09:50:07 +08:00
|
|
|
if (groups != 1) {
|
2023-04-14 02:24:39 +08:00
|
|
|
weightInput = reshapeConvWeight(rewriter, op, reverseOp, groups);
|
2022-08-04 15:41:35 +08:00
|
|
|
}
|
2022-08-09 09:50:07 +08:00
|
|
|
|
|
|
|
// Create transposed convolution
|
2023-02-02 21:29:47 +08:00
|
|
|
auto transposedConvOp = rewriter.create<stablehlo::ConvolutionOp>(
|
2023-04-14 02:24:39 +08:00
|
|
|
op->getLoc(), convOutTy, input, weightInput, stablehloStride,
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehloPadding, stablehloLhsDilation, stablehloRhsDilation,
|
|
|
|
windowReversal, dimensionNumbers, static_cast<uint64_t>(groups), 1,
|
|
|
|
precisionConfig);
|
2023-04-14 02:24:39 +08:00
|
|
|
return transposedConvOp.getResult();
|
2022-08-09 09:50:07 +08:00
|
|
|
}
|
2022-08-04 15:41:35 +08:00
|
|
|
|
2022-08-09 09:50:07 +08:00
|
|
|
Value convertNormalConv(AtenConvolutionOp op,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
RankedTensorType outType, Value input, Value weight,
|
|
|
|
ArrayRef<int64_t> stride, ArrayRef<int64_t> padding,
|
|
|
|
ArrayRef<int64_t> dilation, int64_t groups) const {
|
|
|
|
int64_t nDims = outType.getRank();
|
2022-08-04 15:41:35 +08:00
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
// Get stablehlo::ConvolutionOp attributes
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
auto stablehloWindowStride = rewriter.getDenseI64ArrayAttr(stride);
|
2023-02-02 21:29:47 +08:00
|
|
|
std::vector<int64_t> stablehloPaddingVec;
|
2022-08-04 15:41:35 +08:00
|
|
|
for (size_t i = 0; i < padding.size(); i++) {
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehloPaddingVec.emplace_back(padding[i]);
|
|
|
|
stablehloPaddingVec.emplace_back(padding[i]);
|
2022-08-04 15:41:35 +08:00
|
|
|
}
|
2023-02-02 21:29:47 +08:00
|
|
|
DenseIntElementsAttr stablehloPadding = DenseIntElementsAttr::get(
|
2022-08-04 15:41:35 +08:00
|
|
|
RankedTensorType::get(
|
|
|
|
{static_cast<long int>(padding.size()), static_cast<long int>(2)},
|
|
|
|
rewriter.getI64Type()),
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehloPaddingVec);
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
auto stablehloRhsDilation = rewriter.getDenseI64ArrayAttr(dilation);
|
2022-08-04 15:41:35 +08:00
|
|
|
SmallVector<int64_t> spatialDimensions;
|
2022-08-09 09:50:07 +08:00
|
|
|
for (int64_t i = 2; i < nDims; i++) {
|
2022-08-04 15:41:35 +08:00
|
|
|
spatialDimensions.emplace_back(i);
|
|
|
|
}
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehlo::ConvDimensionNumbersAttr dimensionNumbers =
|
|
|
|
stablehlo::ConvDimensionNumbersAttr::get(
|
2022-08-04 15:41:35 +08:00
|
|
|
/*context=*/rewriter.getContext(), /*inputBatchDimension=*/0,
|
|
|
|
/*inputFeatureDimension=*/1,
|
|
|
|
/*inputSpatialDimensions=*/spatialDimensions,
|
|
|
|
/*kernelInputFeatureDimension=*/1,
|
|
|
|
/*kernelOutputFeatureDimension=*/0,
|
|
|
|
/*kernelSpatialDimensions=*/spatialDimensions,
|
|
|
|
/*outputBatchDimension=*/0, /*outputFeatureDimension=*/1,
|
|
|
|
/*outputSpatialDimensions=*/spatialDimensions);
|
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
// stablehlo::ConvolutionOp's optional attributes, leave them as default
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
DenseI64ArrayAttr stablehloLhsDilation;
|
|
|
|
DenseBoolArrayAttr windowReversal;
|
2022-08-04 15:41:35 +08:00
|
|
|
ArrayAttr precisionConfig;
|
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
auto stablehloConvOp = rewriter.create<stablehlo::ConvolutionOp>(
|
|
|
|
op->getLoc(), outType, input, weight, stablehloWindowStride,
|
|
|
|
stablehloPadding, stablehloLhsDilation, stablehloRhsDilation,
|
|
|
|
windowReversal, dimensionNumbers, static_cast<uint64_t>(groups), 1,
|
|
|
|
precisionConfig);
|
2022-08-09 09:50:07 +08:00
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
return stablehloConvOp.getResult();
|
2022-08-09 09:50:07 +08:00
|
|
|
}
|
|
|
|
|
2022-09-08 03:35:14 +08:00
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenConvolutionOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2022-12-08 04:20:41 +08:00
|
|
|
Value input = adaptor.getInput();
|
|
|
|
Value weight = adaptor.getWeight();
|
2022-08-09 09:50:07 +08:00
|
|
|
|
|
|
|
// The input shape is [N, C, H, W]
|
|
|
|
auto inputTy = input.getType().template cast<RankedTensorType>();
|
|
|
|
// The weight shape is [OC, (IC//G), KH, KW]
|
|
|
|
// If transposed is set to true,
|
|
|
|
// the weight shape changes to [IC, (OC//G), KH, KW]
|
|
|
|
auto weightTy = weight.getType().template cast<RankedTensorType>();
|
|
|
|
auto outTy = getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<RankedTensorType>();
|
|
|
|
if (!inputTy || !weightTy || !outTy) {
|
|
|
|
return op.emitError("input, weight and output must be ranked tensors");
|
|
|
|
}
|
|
|
|
if (inputTy.getRank() < 3)
|
|
|
|
return op.emitError("only input with at least 3 dims valid");
|
|
|
|
SmallVector<int64_t> stride;
|
2022-12-08 04:20:41 +08:00
|
|
|
if (!matchPattern(op.getStride(), m_TorchListOfConstantInts(stride))) {
|
2022-08-09 09:50:07 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const stride list unsupported");
|
|
|
|
}
|
|
|
|
SmallVector<int64_t> padding;
|
2022-12-08 04:20:41 +08:00
|
|
|
if (!matchPattern(op.getPadding(), m_TorchListOfConstantInts(padding))) {
|
2022-08-09 09:50:07 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const padding list unsupported");
|
|
|
|
}
|
|
|
|
SmallVector<int64_t> dilation;
|
2022-12-08 04:20:41 +08:00
|
|
|
if (!matchPattern(op.getDilation(), m_TorchListOfConstantInts(dilation))) {
|
2022-08-09 09:50:07 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const dilation list unsupported");
|
|
|
|
}
|
|
|
|
SmallVector<int64_t> outputPadding;
|
2022-12-08 04:20:41 +08:00
|
|
|
if (!matchPattern(op.getOutputPadding(),
|
2022-11-17 04:33:12 +08:00
|
|
|
m_TorchListOfConstantInts(outputPadding))) {
|
2022-08-09 09:50:07 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "non-const output_padding list unsupported");
|
|
|
|
}
|
|
|
|
int64_t groups;
|
2022-12-08 04:20:41 +08:00
|
|
|
if (!matchPattern(op.getGroups(), m_TorchConstantInt(&groups))) {
|
2022-08-09 09:50:07 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "non-int groups unsupported");
|
|
|
|
}
|
|
|
|
bool transposed;
|
2022-12-08 04:20:41 +08:00
|
|
|
if (!matchPattern(op.getTransposed(), m_TorchConstantBool(&transposed))) {
|
2022-08-09 09:50:07 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "non-bool transposed unsupported");
|
|
|
|
}
|
|
|
|
// Whether need to handle outputpadding
|
|
|
|
bool needHandleOutputPadding = false;
|
|
|
|
for (int64_t i : outputPadding) {
|
|
|
|
if (i != 0) {
|
|
|
|
needHandleOutputPadding = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Op validation check
|
|
|
|
if (needHandleOutputPadding && !transposed) {
|
|
|
|
return op->emitError(
|
|
|
|
"output padding attr is valid only in transposed convolution");
|
|
|
|
}
|
|
|
|
assert(padding.size() == dilation.size() &&
|
|
|
|
padding.size() == stride.size() &&
|
|
|
|
padding.size() == static_cast<size_t>(inputTy.getRank()) - 2 &&
|
|
|
|
inputTy.getRank() == weightTy.getRank());
|
|
|
|
|
|
|
|
auto nSpatialDims = padding.size();
|
|
|
|
auto nDims = inputTy.getRank();
|
2022-12-02 12:38:28 +08:00
|
|
|
|
2022-08-09 09:50:07 +08:00
|
|
|
// Kernel size must be constant.
|
|
|
|
auto weightShape = weightTy.getShape();
|
|
|
|
for (int i = 2; i < nDims; ++i) {
|
2022-12-02 12:38:28 +08:00
|
|
|
if (weightShape[i] == ShapedType::kDynamic) {
|
2022-08-09 09:50:07 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only constant kernel size is supported");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
Value stablehloConvResult;
|
2022-08-09 09:50:07 +08:00
|
|
|
if (transposed) {
|
2023-04-14 02:24:39 +08:00
|
|
|
stablehloConvResult =
|
|
|
|
convertTransposedConv(op, rewriter, outTy, input, weight, stride,
|
|
|
|
padding, dilation, outputPadding, groups);
|
2022-08-09 09:50:07 +08:00
|
|
|
} else {
|
2023-02-02 21:29:47 +08:00
|
|
|
stablehloConvResult =
|
|
|
|
convertNormalConv(op, rewriter, outTy, input, weight, stride, padding,
|
|
|
|
dilation, groups);
|
2022-08-09 09:50:07 +08:00
|
|
|
}
|
2022-08-04 15:41:35 +08:00
|
|
|
|
2022-12-08 04:20:41 +08:00
|
|
|
auto bias = adaptor.getBias();
|
2022-08-04 15:41:35 +08:00
|
|
|
|
|
|
|
// No bias provided
|
2022-12-08 04:20:41 +08:00
|
|
|
if (failed(checkNotNone(rewriter, op, op.getBias()))) {
|
2023-02-02 21:29:47 +08:00
|
|
|
rewriter.replaceOp(op, stablehloConvResult);
|
2022-08-04 15:41:35 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle bias
|
|
|
|
if (!bias.getType().cast<RankedTensorType>()) {
|
|
|
|
return op.emitError("bias provided but not a ranked tensor");
|
|
|
|
}
|
|
|
|
|
2022-12-08 04:20:41 +08:00
|
|
|
auto biasTy = bias.getType().cast<RankedTensorType>();
|
2022-08-04 15:41:35 +08:00
|
|
|
if (!biasTy.getElementType().isIntOrFloat()) {
|
|
|
|
return op.emitError("only floating-point or integer datatype "
|
|
|
|
"legalization for bias supported");
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(biasTy.getRank() <= 1);
|
|
|
|
|
|
|
|
// Reshape and promote bias
|
|
|
|
auto inputUnsqzDims =
|
|
|
|
llvm::to_vector<4>(llvm::seq<int64_t>(-nSpatialDims, 0));
|
2022-09-01 10:36:02 +08:00
|
|
|
|
|
|
|
const auto &options = getOptions();
|
2023-02-02 21:29:47 +08:00
|
|
|
bias = *hlo::unsqueezeTensor(rewriter, op, bias, inputUnsqzDims,
|
|
|
|
options.dimSizeIndexBits);
|
2023-06-26 00:04:17 +08:00
|
|
|
bias = hlo::promoteType(rewriter, op.getLoc(), bias, outTy);
|
2022-08-04 15:41:35 +08:00
|
|
|
|
Bump stablehlo to openxla/stablehlo@fd52182f76cadb82f2064fe5fc49a4fb4347a826 (#2821)
With the recent LLVM integrate and changes from
https://github.com/llvm/llvm-project/pull/78260, we hit this build error
in Stablehlo (which is quite old).
```
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1020:14: error: no member named 'startRootUpdate' in 'mlir::PatternRewriter'
rewriter.startRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1026:16: error: no member named 'finalizeRootUpdate' in 'mlir::PatternRewriter'
rewriter.finalizeRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1029:16: error: no member named 'cancelRootUpdate' in 'mlir::PatternRewriter'
rewriter.cancelRootUpdate(op);
~~~~~~~~ ^
external/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp:1108:14: error: no member named 'updateRootInPlace' in 'mlir::PatternRewriter'
rewriter.updateRootInPlace(op->getParentOp(), [&]() { return; });
~~~~~~~~ ^
4 errors generated.
Target @torch-mlir//:torch-mlir-opt failed to build
```
I'm still puzzled as to how this didn't fail with the CMake merge gating
CI (do we not test Stablehlo builds/tests?). In any case, bumping our
submodule to https://github.com/openxla/stablehlo/pull/1918 fixes it.
It exposes a new failing lit test in TorchToStablehlo though, that I
have looped stablehlo developers into
([here](https://discord.com/channels/999073994483433573/999074539138990131/1201235845391331419)).
```
bazel run @torch-mlir//test/Conversion:TorchToStablehlo/scatter.mlir.test
...external/torch-mlir/test/Conversion/TorchToStablehlo/scatter.mlir
within split at <stdin>:1 offset :33:8: error: unexpected error: Expects non-empty reduction block for type inference
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
^
LLVM ERROR: Failed to infer result type(s).
```
Bazel CI:
https://github.com/sjain-stanford/torch-mlir/actions/runs/7732673480/job/21083102228
2024-02-01 06:21:17 +08:00
|
|
|
DenseI64ArrayAttr bcastDimensions;
|
2023-02-02 21:29:47 +08:00
|
|
|
rewriter.replaceOpWithNewOp<chlo::BroadcastAddOp>(
|
|
|
|
op, outTy, stablehloConvResult, bias, bcastDimensions);
|
2022-08-04 15:41:35 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
void mlir::torch::torch_to_stablehlo::populateLinearOpPatternsAndLegality(
|
2022-08-04 10:10:54 +08:00
|
|
|
TypeConverter &typeConverter, RewritePatternSet &patterns,
|
2023-02-02 21:29:47 +08:00
|
|
|
ConversionTarget &target, const TorchToStablehloOptions &options) {
|
2022-08-04 10:10:54 +08:00
|
|
|
MLIRContext *context = patterns.getContext();
|
|
|
|
|
|
|
|
#define INSERT_MATMUL_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
2022-09-01 10:36:02 +08:00
|
|
|
patterns.add<ConvertAtenMatMulOp<AtenOp>>(typeConverter, context, options)
|
2022-08-04 10:10:54 +08:00
|
|
|
INSERT_MATMUL_ATENOP_PATTERN(AtenMatmulOp);
|
|
|
|
#undef INSERT_MATMUL_ATEMOP_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_MM_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
2022-09-01 10:36:02 +08:00
|
|
|
patterns.add<ConvertAtenMmOp<AtenOp>>(typeConverter, context, options)
|
2022-08-04 10:10:54 +08:00
|
|
|
INSERT_MM_ATENOP_PATTERN(AtenMmOp);
|
|
|
|
INSERT_MM_ATENOP_PATTERN(AtenBmmOp);
|
|
|
|
#undef INSERT_MM_ATEMOP_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_LINEAR_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
2022-09-01 10:36:02 +08:00
|
|
|
patterns.add<ConvertAtenLinearOp<AtenOp>>(typeConverter, context, options)
|
2022-08-04 10:10:54 +08:00
|
|
|
INSERT_LINEAR_ATENOP_PATTERN(AtenLinearOp);
|
|
|
|
#undef INSERT_LINEAR_ATEMOP_PATTERN
|
2022-08-04 15:41:35 +08:00
|
|
|
|
|
|
|
#define INSERT_CONVOLUTION_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
2022-09-01 10:36:02 +08:00
|
|
|
patterns.add<ConvertAtenConvolutionOp>(typeConverter, context, options)
|
2022-08-04 15:41:35 +08:00
|
|
|
INSERT_CONVOLUTION_ATENOP_PATTERN(AtenConvolutionOp);
|
|
|
|
#undef INSERT_CONVOLUTION_ATENOP_PATTERN
|
2022-08-04 10:10:54 +08:00
|
|
|
}
|