2021-10-08 10:07:03 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "torch-mlir/Conversion/TorchToTosa/TorchToTosa.h"
|
2021-12-03 08:52:01 +08:00
|
|
|
#include "torch-mlir/Conversion/TorchToTosa/TosaLegalizeCommon.h"
|
|
|
|
#include "torch-mlir/Conversion/TorchToTosa/TosaLegalizeUtils.h"
|
2021-10-08 10:07:03 +08:00
|
|
|
|
|
|
|
#include "../PassDetail.h"
|
2022-10-05 21:28:06 +08:00
|
|
|
#include "mlir/Dialect/Arith/IR/Arith.h"
|
2021-12-16 03:01:01 +08:00
|
|
|
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
2021-10-08 10:07:03 +08:00
|
|
|
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
|
|
|
|
#include "mlir/Dialect/Traits.h"
|
|
|
|
#include "mlir/IR/Matchers.h"
|
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
2022-02-01 05:34:09 +08:00
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
2021-10-08 10:07:03 +08:00
|
|
|
#include "torch-mlir/Dialect/TorchConversion/IR/TorchConversionDialect.h"
|
|
|
|
#include "torch-mlir/Dialect/TorchConversion/Transforms/BackendTypeConversion.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::torch;
|
|
|
|
using namespace mlir::torch::Torch;
|
|
|
|
|
|
|
|
namespace {
|
2021-11-12 08:15:58 +08:00
|
|
|
|
|
|
|
// These legalizations are for unary ops with only for floating point datatypes.
|
2021-11-11 11:03:36 +08:00
|
|
|
// There is no supported quantized integer mode for these.
|
2021-11-12 08:15:58 +08:00
|
|
|
template <typename AtenOpT, typename TosaOpT>
|
|
|
|
class ConvertAtenUnaryFPOnlyOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
2021-11-16 07:00:53 +08:00
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
2021-11-12 08:15:58 +08:00
|
|
|
LogicalResult
|
2021-11-16 07:00:53 +08:00
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
2021-11-12 08:15:58 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().cast<TensorType>();
|
|
|
|
|
|
|
|
if (!selfTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2021-11-12 08:15:58 +08:00
|
|
|
|
|
|
|
if (selfTy.getElementType().isa<mlir::FloatType>()) {
|
|
|
|
rewriter.replaceOpWithNewOp<TosaOpT>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
self);
|
|
|
|
return success();
|
|
|
|
} else {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization supported");
|
2021-11-12 08:15:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// These unary op legalizations are identical for floating-point
|
|
|
|
// or quantized types
|
|
|
|
template <typename AtenOpT, typename TosaOpT>
|
|
|
|
class ConvertAtenUnaryOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
2021-11-16 07:00:53 +08:00
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
2021-11-12 08:15:58 +08:00
|
|
|
LogicalResult
|
2021-11-16 07:00:53 +08:00
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
2021-11-12 08:15:58 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
rewriter.replaceOpWithNewOp<TosaOpT>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
adaptor.self());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-12-16 03:19:25 +08:00
|
|
|
// These binary op legalizations are identical for floating-point
|
|
|
|
// or quantized types
|
|
|
|
template <typename AtenOpT, typename TosaOpT>
|
|
|
|
class ConvertAtenBinaryOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value lhs = adaptor.self();
|
|
|
|
auto lhsTy = lhs.getType().cast<TensorType>();
|
|
|
|
Value rhs = adaptor.other();
|
|
|
|
auto rhsTy = rhs.getType().cast<TensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy || !rhsTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2021-12-16 03:19:25 +08:00
|
|
|
|
|
|
|
auto lhsElemTy = lhsTy.getElementType();
|
|
|
|
auto rhsElemTy = rhsTy.getElementType();
|
|
|
|
|
|
|
|
if (lhsElemTy != rhsElemTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Input datatypes mismatched");
|
2021-12-16 03:19:25 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<TosaOpT>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
lhs, rhs);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
template <typename T>
|
|
|
|
static bool isInValidRange(bool isFloat, const double &doubleValue, bool isInt,
|
|
|
|
const int64_t &intValue) {
|
|
|
|
if (isFloat) {
|
2022-02-15 04:45:19 +08:00
|
|
|
// Do a round-trip check here instead of numeric limits due to
|
|
|
|
// compiler warnings around double <-> int conversion.
|
|
|
|
return (doubleValue == static_cast<double>(static_cast<T>(doubleValue)));
|
2022-02-12 04:30:02 +08:00
|
|
|
} else {
|
|
|
|
assert(isInt);
|
|
|
|
return (intValue >= std::numeric_limits<T>::min()) &&
|
|
|
|
(intValue <= std::numeric_limits<T>::max());
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
// FIXME: This will eventually go into a Tosa*Utils file.
|
|
|
|
LogicalResult torchScalarToTosaTensor(ConversionPatternRewriter &rewriter,
|
|
|
|
Operation *op, Value torchScalarValue,
|
2022-02-12 04:30:02 +08:00
|
|
|
Value &tosaTensor, Type dtype,
|
|
|
|
llvm::ArrayRef<int64_t> dshape) {
|
|
|
|
// Retrieve a const float or int value but create the out Tensor with dtype.
|
|
|
|
double doubleValue;
|
|
|
|
auto isFloat =
|
|
|
|
matchPattern(torchScalarValue, m_TorchConstantFloat(&doubleValue));
|
2022-01-21 02:58:30 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
int64_t intValue;
|
|
|
|
auto isInt = matchPattern(torchScalarValue, m_TorchConstantInt(&intValue));
|
2022-01-21 02:58:30 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
if (!isFloat && !isInt)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Unable to extract the scalar constant");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
if (dtype.isa<mlir::FloatType>()) {
|
|
|
|
tosaTensor = tosa::getConstTensor<float>(
|
|
|
|
rewriter, op, (isFloat ? doubleValue : intValue), dshape)
|
2022-08-09 11:17:35 +08:00
|
|
|
.value();
|
2022-02-12 04:30:02 +08:00
|
|
|
} else if (auto intType = dtype.dyn_cast<mlir::IntegerType>()) {
|
2022-01-21 02:58:30 +08:00
|
|
|
auto w = intType.getWidth();
|
|
|
|
if (w != 32 && w != 64)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
|
|
|
|
diag << "Unsupported integer type: " << intType;
|
|
|
|
});
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
if (w == 32) {
|
2022-02-12 04:30:02 +08:00
|
|
|
if (!isInValidRange<int32_t>(isFloat, doubleValue, isInt, intValue)) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Supplied value of scalar constant exceeds limits "
|
|
|
|
"of destination type");
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
|
|
|
int32_t d = isFloat ? static_cast<int32_t>(doubleValue)
|
|
|
|
: static_cast<int32_t>(intValue);
|
|
|
|
tosaTensor =
|
2022-08-09 11:17:35 +08:00
|
|
|
tosa::getConstTensor<int32_t>(rewriter, op, {d}, dshape).value();
|
2022-01-21 02:58:30 +08:00
|
|
|
} else if (w == 64) {
|
2022-02-12 04:30:02 +08:00
|
|
|
if (!isInValidRange<int64_t>(isFloat, doubleValue, isInt, intValue)) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Supplied value of scalar constant exceeds limits "
|
|
|
|
"of destination type");
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
|
|
|
int64_t d = (isFloat ? static_cast<int64_t>(doubleValue) : intValue);
|
2022-01-21 02:58:30 +08:00
|
|
|
tosaTensor =
|
2022-08-09 11:17:35 +08:00
|
|
|
tosa::getConstTensor<int64_t>(rewriter, op, {d}, dshape).value();
|
2022-01-21 02:58:30 +08:00
|
|
|
}
|
2022-07-18 09:39:54 +08:00
|
|
|
} else {
|
|
|
|
return rewriter.notifyMatchFailure(op, "Usupported element type");
|
|
|
|
}
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult torchAlphaToTosaTensor(ConversionPatternRewriter &rewriter,
|
|
|
|
Operation *op, Value alphaScalar,
|
|
|
|
Value &alphaTensor, Type dtype,
|
|
|
|
bool checkForUnity) {
|
|
|
|
if (succeeded(torchScalarToTosaTensor(rewriter, op, alphaScalar, alphaTensor,
|
2022-02-12 04:30:02 +08:00
|
|
|
dtype, {})))
|
2022-01-21 02:58:30 +08:00
|
|
|
return success();
|
|
|
|
|
|
|
|
// `alpha` has not been specified.
|
|
|
|
int64_t alphaValue;
|
|
|
|
if (!matchPattern(alphaScalar, m_TorchConstantInt(&alphaValue)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"alpha in TOSA operation");
|
2022-01-21 02:58:30 +08:00
|
|
|
// When no alpha has been specified, this must be 1.
|
|
|
|
if (checkForUnity && alphaValue != 1)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Unsupported integer value for alpha");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
alphaTensor =
|
|
|
|
mlir::tosa::getTosaConstTensorSingleF32(rewriter, op, alphaValue);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-11-12 08:15:58 +08:00
|
|
|
// These binary op legalizations are specific to add/sub which have an
|
|
|
|
// alpha multiplier.
|
|
|
|
template <typename AtenOpT, typename TosaOpT>
|
|
|
|
class ConvertAtenAddSubOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
2021-11-16 07:00:53 +08:00
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
2022-01-08 02:57:54 +08:00
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2021-11-12 08:15:58 +08:00
|
|
|
Value lhs = adaptor.self();
|
2022-02-12 04:30:02 +08:00
|
|
|
auto lhsType = lhs.getType().dyn_cast<TensorType>();
|
2021-11-12 08:15:58 +08:00
|
|
|
Value rhs = adaptor.other();
|
2022-02-12 04:30:02 +08:00
|
|
|
auto rhsType = rhs.getType().dyn_cast<TensorType>();
|
2021-11-12 08:15:58 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
if (!lhsType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2021-11-12 08:15:58 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
if (auto lhsElemTy = lhsType.getElementType().dyn_cast<IntegerType>()) {
|
|
|
|
if (lhsElemTy.getWidth() > 32)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Integers with widths greater than 32 are not supported");
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
|
|
|
|
2022-02-17 01:53:51 +08:00
|
|
|
auto outType = OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<TensorType>();
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
Type outElemTy = outType.getElementType();
|
|
|
|
if (!outElemTy.isIntOrFloat()) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
Value rhsAsTensor;
|
2022-02-12 04:30:02 +08:00
|
|
|
if (!rhsType) {
|
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, op.other(), rhsAsTensor,
|
|
|
|
outElemTy, {})))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"conversion in TOSA operation");
|
2022-01-21 02:58:30 +08:00
|
|
|
}
|
2022-02-12 04:30:02 +08:00
|
|
|
auto rhsTensor = rhsType ? rhs : rhsAsTensor;
|
2021-11-12 08:15:58 +08:00
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
// Handle alpha.
|
|
|
|
Value alphaTensor;
|
|
|
|
if (failed(torchAlphaToTosaTensor(rewriter, op.getOperation(), op.alpha(),
|
2022-02-12 04:30:02 +08:00
|
|
|
alphaTensor, outElemTy,
|
|
|
|
/*checkForUnity=*/false))) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"alpha in conversion to TOSA operation");
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
2021-11-12 08:15:58 +08:00
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
auto multTensor = rewriter.create<tosa::MulOp>(
|
2022-02-12 04:30:02 +08:00
|
|
|
op.getLoc(), rhsType ? rhsType : RankedTensorType::get({}, outElemTy),
|
|
|
|
rhsTensor, alphaTensor, /*shift=*/0);
|
|
|
|
|
|
|
|
if (outElemTy.isa<mlir::FloatType>()) {
|
|
|
|
if (lhsType.getElementType() != outElemTy)
|
|
|
|
lhs = rewriter.create<tosa::CastOp>(op.getLoc(), outType, lhs);
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<TosaOpT>(op, outType, lhs, multTensor);
|
2021-11-12 08:15:58 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
} else {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization supported");
|
2021-11-12 08:15:58 +08:00
|
|
|
}
|
|
|
|
}
|
2022-01-21 02:58:30 +08:00
|
|
|
}; // namespace
|
|
|
|
|
|
|
|
// Binary op legalizations for comparator ops.
|
|
|
|
template <typename AtenOpT, typename TosaOpT>
|
|
|
|
class ConvertAtenCompareOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value lhs = adaptor.self();
|
|
|
|
auto lhsTy = lhs.getType().dyn_cast<TensorType>();
|
|
|
|
Value rhs = adaptor.other();
|
|
|
|
auto rhsTy = rhs.getType().dyn_cast<TensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
auto lhsElemTy = lhsTy.getElementType();
|
|
|
|
if (!lhsElemTy.isIntOrFloat())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
// For bitwise operators, only integer datatype legalization is supported
|
|
|
|
if (lhsElemTy.isa<mlir::FloatType>() &&
|
|
|
|
std::is_same<AtenOpT, AtenBitwiseAndTensorOp>()) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"For bitwise operators, only integer "
|
|
|
|
"datatype legalization is supported");
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
Value rhsAsTensor;
|
|
|
|
if (!rhsTy) {
|
2022-02-12 04:30:02 +08:00
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, op.other(), rhsAsTensor,
|
|
|
|
lhsElemTy, {})))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"conversion in TOSA operation");
|
2022-01-21 02:58:30 +08:00
|
|
|
}
|
|
|
|
auto rhsTensor = rhsTy ? rhs : rhsAsTensor;
|
2022-02-12 04:30:02 +08:00
|
|
|
// There is no Lesser operator in TOSA.
|
2022-01-21 02:58:30 +08:00
|
|
|
auto swapLhsRhs = (std::is_same<AtenOpT, AtenLtTensorOp>() ||
|
|
|
|
std::is_same<AtenOpT, AtenLtScalarOp>());
|
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
auto resultOp = rewriter.create<TosaOpT>(
|
|
|
|
op.getLoc(),
|
2022-01-21 02:58:30 +08:00
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
(swapLhsRhs ? rhsTensor : lhs), (swapLhsRhs ? lhs : rhsTensor));
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
// There is no NE operator in TOSA.
|
|
|
|
if (std::is_same<AtenOpT, AtenNeTensorOp>() ||
|
|
|
|
std::is_same<AtenOpT, AtenNeScalarOp>())
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::LogicalNotOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
resultOp.getResult());
|
|
|
|
else
|
|
|
|
rewriter.replaceOp(op, resultOp.getResult());
|
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Binary op legalizations for Mul variants.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenMulOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value lhs = adaptor.self();
|
2022-02-12 04:30:02 +08:00
|
|
|
auto lhsType = lhs.getType().dyn_cast<TensorType>();
|
2022-01-21 02:58:30 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
if (!lhsType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
2022-02-17 01:53:51 +08:00
|
|
|
auto outType = OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<TensorType>();
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
Type outElemTy = outType.getElementType();
|
|
|
|
if (!outElemTy.isIntOrFloat())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
Value rhsTensor;
|
|
|
|
if (std::is_same<AtenOpT, AtenSquareOp>()) {
|
|
|
|
rhsTensor = lhs;
|
|
|
|
} else {
|
|
|
|
Value rhsAsTensor;
|
|
|
|
Value rhs = adaptor.other();
|
|
|
|
auto rhsType = rhs.getType().dyn_cast<TensorType>();
|
|
|
|
if (!rhsType) {
|
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, op.other(),
|
2022-07-18 09:39:54 +08:00
|
|
|
rhsAsTensor, outElemTy, {}))) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"conversion in TOSA operation");
|
|
|
|
}
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
|
|
|
rhsTensor = rhsType ? rhs : rhsAsTensor;
|
2022-01-21 02:58:30 +08:00
|
|
|
}
|
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
if (outElemTy.isa<mlir::FloatType>() ||
|
|
|
|
outElemTy.isa<mlir::IntegerType>()) {
|
|
|
|
if (lhsType.getElementType() != outElemTy)
|
|
|
|
lhs = rewriter.create<tosa::CastOp>(op.getLoc(), outType, lhs);
|
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
rewriter.replaceOpWithNewOp<tosa::MulOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
lhs, rhsTensor,
|
|
|
|
/*shift=*/0);
|
|
|
|
return success();
|
|
|
|
}
|
2022-07-18 09:39:54 +08:00
|
|
|
|
|
|
|
// Quantized multiplication may need to rescale inputs.
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype "
|
|
|
|
"legalization currently supported");
|
2022-01-21 02:58:30 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenDivOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value lhs = adaptor.self();
|
|
|
|
auto lhsTy = lhs.getType().dyn_cast<TensorType>();
|
|
|
|
Value rhs = adaptor.other();
|
|
|
|
auto rhsTy = rhs.getType().dyn_cast<TensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
auto lhsElemTy = lhsTy.getElementType();
|
|
|
|
if (!lhsElemTy.isIntOrFloat())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
Value rhsAsTensor;
|
|
|
|
if (!rhsTy) {
|
2022-02-12 04:30:02 +08:00
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, op.other(), rhsAsTensor,
|
|
|
|
lhsElemTy, {})))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"conversion in TOSA operation");
|
2022-01-21 02:58:30 +08:00
|
|
|
}
|
|
|
|
auto rhsTensor = rhsTy ? rhs : rhsAsTensor;
|
|
|
|
|
|
|
|
if (lhsElemTy.isa<mlir::FloatType>()) {
|
|
|
|
auto rcpOp = rewriter.create<tosa::ReciprocalOp>(
|
|
|
|
op->getLoc(), rhsTy ? rhsTy : RankedTensorType::get({}, lhsElemTy),
|
|
|
|
rhsTensor);
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::MulOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
lhs, rcpOp.getResult(), /*shift=*/0);
|
|
|
|
} else {
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::DivOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
lhs, rhsTensor);
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
2021-11-12 08:15:58 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// This defines a template to construct ops whose legalizations are
|
|
|
|
// specialized.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
2021-11-16 07:00:53 +08:00
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
2021-11-12 08:15:58 +08:00
|
|
|
LogicalResult
|
2021-11-16 07:00:53 +08:00
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
2021-11-12 08:15:58 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenTanhOp>::matchAndRewrite(
|
2021-11-16 07:00:53 +08:00
|
|
|
AtenTanhOp op, OpAdaptor adaptor,
|
2021-11-12 08:15:58 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const {
|
2021-11-11 11:03:36 +08:00
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().cast<TensorType>();
|
|
|
|
if (selfTy && selfTy.getElementType().isa<mlir::FloatType>()) {
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::TanhOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), self);
|
|
|
|
return success();
|
|
|
|
}
|
2022-07-18 09:39:54 +08:00
|
|
|
// Sigmoid legalization in TOSA for quantized element-type uses specialized
|
|
|
|
// tosa.table construct.
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization currently supported");
|
2021-10-29 01:09:12 +08:00
|
|
|
}
|
|
|
|
|
2021-11-12 08:15:58 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenSigmoidOp>::matchAndRewrite(
|
2021-11-16 07:00:53 +08:00
|
|
|
AtenSigmoidOp op, OpAdaptor adaptor,
|
2021-11-11 11:03:36 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().cast<TensorType>();
|
|
|
|
if (selfTy && selfTy.getElementType().isa<mlir::FloatType>()) {
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::SigmoidOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), self);
|
|
|
|
return success();
|
|
|
|
}
|
2022-07-18 09:39:54 +08:00
|
|
|
// Sigmoid legalization in TOSA for quantized element-type uses
|
|
|
|
// specialized tosa.table construct.
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization currently supported");
|
2021-11-12 08:15:58 +08:00
|
|
|
}
|
2021-11-11 11:03:36 +08:00
|
|
|
|
2021-11-12 08:15:58 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenReluOp>::matchAndRewrite(
|
2021-11-16 07:00:53 +08:00
|
|
|
AtenReluOp op, OpAdaptor adaptor,
|
2021-11-12 08:15:58 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const {
|
2021-11-11 11:03:36 +08:00
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().cast<TensorType>();
|
|
|
|
|
|
|
|
// Maps to tosa.clamp which has both int and fp limits.
|
|
|
|
int64_t clampMin = 0;
|
|
|
|
Value clampIn = self;
|
2022-07-18 09:39:54 +08:00
|
|
|
if (!selfTy) {
|
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2021-11-11 11:03:36 +08:00
|
|
|
}
|
2022-07-18 09:39:54 +08:00
|
|
|
|
|
|
|
// Rescale the clampIn for quantized types. TBD
|
|
|
|
if (!selfTy.getElementType().isa<mlir::FloatType>()) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization currently supported");
|
|
|
|
}
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ClampOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), clampIn,
|
|
|
|
rewriter.getI64IntegerAttr(clampMin),
|
|
|
|
rewriter.getI64IntegerAttr(std::numeric_limits<int32_t>::max()),
|
|
|
|
rewriter.getF32FloatAttr(0.0f),
|
|
|
|
rewriter.getF32FloatAttr(std::numeric_limits<float>::max()));
|
|
|
|
return success();
|
2021-10-29 01:09:12 +08:00
|
|
|
}
|
|
|
|
|
2021-12-03 08:52:01 +08:00
|
|
|
using ReductionConvFunc = llvm::Optional<Value> (*)(PatternRewriter &,
|
|
|
|
Operation *,
|
|
|
|
RankedTensorType, Value,
|
|
|
|
ElementsAttr, bool);
|
|
|
|
|
|
|
|
// They all constitute a common form invoking the appropriate
|
|
|
|
// converion function in TosaLegalizeCommon.cpp
|
|
|
|
template <typename AtenOpT, ReductionConvFunc ConversionFuncT>
|
|
|
|
class ConvertAtenReductionOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
|
|
|
|
// Each variant must implement corresponding parameter parsing options
|
|
|
|
virtual LogicalResult readReduceDimsAndKeepDims(
|
|
|
|
AtenOpT op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter,
|
|
|
|
ElementsAttr &reduceDimsAttr, bool &keepDims) const {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Unimplemented reduce_dims and keep_dims parsing function");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Common rewriter for all reduction ops, calls the specific implementation of
|
|
|
|
// readReduceDimsAndKeepDims() needed for the op variant.
|
2022-01-08 02:57:54 +08:00
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2021-12-03 08:52:01 +08:00
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().cast<TensorType>();
|
|
|
|
|
|
|
|
if (!selfTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2021-12-03 08:52:01 +08:00
|
|
|
|
|
|
|
auto outputTy = OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<RankedTensorType>();
|
|
|
|
if (!outputTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor type outputs permitted for reduce_mean");
|
2021-12-03 08:52:01 +08:00
|
|
|
|
|
|
|
ElementsAttr reduceDimsAttr;
|
|
|
|
bool keepDims;
|
|
|
|
|
|
|
|
if (failed(readReduceDimsAndKeepDims(op, adaptor, rewriter, reduceDimsAttr,
|
|
|
|
keepDims)))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
llvm::Optional<Value> result =
|
|
|
|
ConversionFuncT(rewriter, op, outputTy, self, reduceDimsAttr, keepDims);
|
|
|
|
|
|
|
|
if (!result)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// TBD - support dtype casting.
|
|
|
|
|
2022-08-09 11:17:35 +08:00
|
|
|
rewriter.replaceOp(op, {result.value()});
|
2021-12-03 08:52:01 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// This reduction op legalization template handles op variants that have
|
|
|
|
// explicit reduce_dims dimensions (provided as a list) and keep_dims
|
|
|
|
// parameters.
|
|
|
|
template <typename AtenOpT, ReductionConvFunc ConversionFuncT>
|
|
|
|
class ConvertAtenMultipleDimsReductionOp
|
|
|
|
: public ConvertAtenReductionOp<AtenOpT, ConversionFuncT> {
|
|
|
|
using ConvertAtenReductionOp<AtenOpT,
|
|
|
|
ConversionFuncT>::ConvertAtenReductionOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
ElementsAttr &reduceDimsAttr,
|
2022-01-08 02:57:54 +08:00
|
|
|
bool &keepDims) const override {
|
2021-12-03 08:52:01 +08:00
|
|
|
SmallVector<int64_t, 4> reduceDims;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.dim(), m_TorchListOfConstantInts(reduceDims)))
|
2021-12-03 08:52:01 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const dim parameter unsupported");
|
|
|
|
int64_t N = reduceDims.size();
|
|
|
|
auto reduceDimsType = RankedTensorType::get({N}, rewriter.getI64Type());
|
|
|
|
reduceDimsAttr = DenseIntElementsAttr::get(reduceDimsType,
|
|
|
|
llvm::makeArrayRef(reduceDims));
|
|
|
|
|
|
|
|
keepDims = false;
|
|
|
|
if (!matchPattern(op.keepdim(), m_TorchConstantBool(&keepDims)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "non-const keepdim parameter unsupported");
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// This reduction op legalization template handles op variants that reduce in
|
|
|
|
// only one explicit dim which is provided as a number (rather than a list), and
|
|
|
|
// a keep_dims parameter.
|
|
|
|
template <typename AtenOpT, ReductionConvFunc ConversionFuncT>
|
|
|
|
class ConvertAtenOneDimReductionOp
|
|
|
|
: public ConvertAtenReductionOp<AtenOpT, ConversionFuncT> {
|
|
|
|
using ConvertAtenReductionOp<AtenOpT,
|
|
|
|
ConversionFuncT>::ConvertAtenReductionOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
ElementsAttr &reduceDimsAttr,
|
2022-01-08 02:57:54 +08:00
|
|
|
bool &keepDims) const override {
|
2021-12-03 08:52:01 +08:00
|
|
|
int64_t reduceDim;
|
|
|
|
if (!matchPattern(op.dim(), m_TorchConstantInt(&reduceDim)))
|
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const dim parameter unsupported");
|
|
|
|
auto reduceDimsType = RankedTensorType::get({1}, rewriter.getI64Type());
|
|
|
|
reduceDimsAttr = DenseIntElementsAttr::get(reduceDimsType,
|
|
|
|
llvm::makeArrayRef({reduceDim}));
|
|
|
|
|
|
|
|
keepDims = false;
|
|
|
|
if (!matchPattern(op.keepdim(), m_TorchConstantBool(&keepDims)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "non-const keepdim parameter unsupported");
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// This reduction op legalization template handles op variants that reduce all
|
|
|
|
// dims does not keep dims.
|
|
|
|
template <typename AtenOpT, ReductionConvFunc ConversionFuncT>
|
|
|
|
class ConvertAtenAllDimsReductionOp
|
|
|
|
: public ConvertAtenReductionOp<AtenOpT, ConversionFuncT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenReductionOp<AtenOpT,
|
|
|
|
ConversionFuncT>::ConvertAtenReductionOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
ElementsAttr &reduceDimsAttr,
|
2022-01-08 02:57:54 +08:00
|
|
|
bool &keepDims) const override {
|
2021-12-03 08:52:01 +08:00
|
|
|
auto self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().template cast<RankedTensorType>();
|
|
|
|
|
|
|
|
// Select all dims to reduce
|
|
|
|
SmallVector<int64_t, 4> reduceDims;
|
|
|
|
for (int64_t i = 0; i < selfTy.getRank(); i++)
|
|
|
|
reduceDims.push_back(i);
|
|
|
|
int64_t N = selfTy.getRank();
|
|
|
|
auto reduceDimsType = RankedTensorType::get({N}, rewriter.getI64Type());
|
|
|
|
reduceDimsAttr = DenseIntElementsAttr::get(reduceDimsType,
|
|
|
|
llvm::makeArrayRef(reduceDims));
|
|
|
|
keepDims = false;
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-12-16 03:01:01 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenArgmaxOp>::matchAndRewrite(
|
|
|
|
AtenArgmaxOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().template cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!selfTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA argmax");
|
2021-12-16 03:01:01 +08:00
|
|
|
|
|
|
|
int64_t reduceDim;
|
|
|
|
if (!matchPattern(op.dim(), m_TorchConstantInt(&reduceDim))) {
|
|
|
|
// NoneType indicates reduce on all dims
|
|
|
|
reduceDim = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool keepDim = false;
|
|
|
|
if (!matchPattern(op.keepdim(), m_TorchConstantBool(&keepDim)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "non-const keepdim parameter unsupported");
|
|
|
|
|
|
|
|
auto resultTy = getTypeConverter()
|
|
|
|
->convertType(op.getResult().getType())
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
auto outputETy = resultTy.getElementType();
|
|
|
|
|
|
|
|
// Create a single instance of tosa.argmax.
|
|
|
|
// Multiple dims require chained construct.
|
|
|
|
auto buildArgmax = [&](int64_t reduceDim, Value input) -> Value {
|
|
|
|
auto inputTy = input.getType().cast<RankedTensorType>();
|
|
|
|
auto inputShape = inputTy.getShape();
|
|
|
|
SmallVector<int64_t> outputShapeArr = {};
|
|
|
|
int32_t i = 0;
|
|
|
|
|
|
|
|
for (auto &dim : inputShape) {
|
|
|
|
if (i++ != reduceDim) {
|
|
|
|
outputShapeArr.push_back(dim);
|
|
|
|
} else {
|
|
|
|
if (keepDim)
|
|
|
|
outputShapeArr.push_back(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tosa argmax output is i32, while Torch backend mandates i64.
|
|
|
|
auto outputReduceTy = RankedTensorType::get(
|
|
|
|
ArrayRef<int64_t>(outputShapeArr), rewriter.getI32Type());
|
|
|
|
auto reduceDimAttr =
|
|
|
|
rewriter.getIntegerAttr(rewriter.getI64Type(), reduceDim);
|
|
|
|
return rewriter
|
|
|
|
.create<tosa::ArgMaxOp>(op->getLoc(),
|
|
|
|
getTypeConverter()->convertType(outputReduceTy),
|
|
|
|
input, reduceDimAttr)
|
|
|
|
.getResult();
|
|
|
|
};
|
|
|
|
|
|
|
|
// Convert the final index to i64 for backend finalization, However, i64
|
|
|
|
// is not a defined type for tosa.cast, so using arith.extsi instead.
|
|
|
|
auto castToInt64 = [&](Value result) -> LogicalResult {
|
|
|
|
auto resTy = result.getType().cast<ShapedType>();
|
|
|
|
if (!resTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Argmax: Result is not a shaped type");
|
2021-12-16 03:01:01 +08:00
|
|
|
|
|
|
|
auto resShape = resTy.getShape();
|
2022-07-18 09:39:54 +08:00
|
|
|
auto outTy = RankedTensorType::get(resShape, outputETy);
|
2021-12-16 03:01:01 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<arith::ExtSIOp>(
|
|
|
|
op, getTypeConverter()->convertType(outTy), result);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
};
|
|
|
|
|
|
|
|
if (reduceDim == -1) { // reducing on all dims
|
|
|
|
Value input = self;
|
|
|
|
for (int dim = 0; dim < selfTy.getRank(); dim++) {
|
|
|
|
// progressively reduce each 0-th dim
|
|
|
|
input = buildArgmax(0, input);
|
|
|
|
}
|
|
|
|
return castToInt64(input);
|
|
|
|
} else {
|
|
|
|
return castToInt64(buildArgmax(reduceDim, self));
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenSqueezeOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
// Each variant must implement corresponding parameter parsing options
|
|
|
|
virtual LogicalResult
|
|
|
|
generateSqueezedShape(AtenOpT op, RankedTensorType selfTy,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
SmallVector<int64_t> &squeezedShape) const {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Unimplemented dim/dim-list parsing function");
|
|
|
|
}
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
// Common rewriter for all squeeze ops, calls the specific implementation of
|
|
|
|
// generateSqueezedShape() needed for the op variant.
|
2022-01-08 02:57:54 +08:00
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2022-01-07 00:31:29 +08:00
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().template cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!selfTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA argmax");
|
2022-01-07 00:31:29 +08:00
|
|
|
|
|
|
|
SmallVector<int64_t> newOutputShape;
|
|
|
|
if (failed(generateSqueezedShape(op, selfTy, rewriter, newOutputShape)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Squeeze could not compute new shape");
|
2022-01-07 00:31:29 +08:00
|
|
|
|
|
|
|
auto resultTy = OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getResult().getType())
|
|
|
|
.template cast<RankedTensorType>();
|
|
|
|
auto resultElemTy = resultTy.getElementType();
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
auto newOutputTy = RankedTensorType::get(newOutputShape, resultElemTy);
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
auto reshapeOp = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
newOutputTy),
|
|
|
|
self, rewriter.getI64ArrayAttr(newOutputShape));
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
newOutputTy),
|
|
|
|
reshapeOp);
|
|
|
|
|
|
|
|
return success();
|
2021-12-16 13:40:29 +08:00
|
|
|
}
|
2022-01-07 00:31:29 +08:00
|
|
|
};
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenSqueezeOneDimOp : public ConvertAtenSqueezeOp<AtenOpT> {
|
|
|
|
using ConvertAtenSqueezeOp<AtenOpT>::ConvertAtenSqueezeOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
generateSqueezedShape(AtenOpT op, RankedTensorType selfTy,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
2022-01-08 02:57:54 +08:00
|
|
|
SmallVector<int64_t> &squeezedShape) const override {
|
2022-01-07 00:31:29 +08:00
|
|
|
int64_t squeezeDim;
|
|
|
|
if (!matchPattern(op.dim(), m_TorchConstantInt(&squeezeDim)))
|
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const dim parameter unsupported");
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
// Handle negative dim
|
|
|
|
if (squeezeDim < 0)
|
|
|
|
squeezeDim = squeezeDim + selfTy.getRank();
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
auto selfShape = selfTy.getShape();
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
// Only dims statically known to have size=1 are reduced.
|
|
|
|
// Dynamic dims are treated as unknowns and will not be squeezed
|
|
|
|
// even if dim parameter says it should be.
|
|
|
|
uint32_t dimNum = 0;
|
|
|
|
for (auto &dim : selfShape) {
|
|
|
|
if (dim != 1 || squeezeDim != dimNum)
|
|
|
|
squeezedShape.push_back(dim);
|
|
|
|
dimNum++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenSqueezeAllDimsOp : public ConvertAtenSqueezeOp<AtenOpT> {
|
|
|
|
using ConvertAtenSqueezeOp<AtenOpT>::ConvertAtenSqueezeOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
generateSqueezedShape(AtenOpT op, RankedTensorType selfTy,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
2022-01-08 02:57:54 +08:00
|
|
|
SmallVector<int64_t> &squeezedShape) const override {
|
2022-01-07 00:31:29 +08:00
|
|
|
auto selfShape = selfTy.getShape();
|
|
|
|
|
|
|
|
// Dims that may dynamically resolve to 1 are not reduced here. Only
|
|
|
|
// compile-time resolvable dims are handled here.
|
|
|
|
for (auto &dim : selfShape) {
|
|
|
|
if (dim != 1)
|
|
|
|
squeezedShape.push_back(dim);
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2021-12-16 13:40:29 +08:00
|
|
|
|
2022-01-12 04:49:17 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenPowTensorScalarOp>::matchAndRewrite(
|
|
|
|
AtenPowTensorScalarOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
Value self = adaptor.self();
|
|
|
|
auto selfTy = self.getType().template cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!selfTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA Pow");
|
2022-01-12 04:49:17 +08:00
|
|
|
|
|
|
|
if (!selfTy.getElementType().isa<mlir::FloatType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization supported");
|
2022-01-12 04:49:17 +08:00
|
|
|
|
|
|
|
Value expTensor;
|
|
|
|
Value expScalar = op.exponent();
|
2022-02-12 04:30:02 +08:00
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, expScalar, expTensor,
|
|
|
|
selfTy.getElementType(), {})))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"conversion in TOSA Pow operation");
|
2022-01-12 04:49:17 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::PowOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), self, expTensor);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
2022-01-19 05:37:32 +08:00
|
|
|
|
2022-01-26 00:48:58 +08:00
|
|
|
// Perform the basic n-dim matmul operation encompassing the handling of
|
|
|
|
// broadcasting and dynamic shape propagation.
|
|
|
|
// All PyTorch ops that leverage matrix multiplication will derive this and
|
|
|
|
// implement their specialized input processing (e.g transpose), and output
|
|
|
|
// processing, e.g. GEMM or fully connected bias handling.
|
2022-01-19 05:37:32 +08:00
|
|
|
template <typename AtenOpT>
|
2022-01-26 00:48:58 +08:00
|
|
|
class ConvertAtenMatmulBaseOp : public OpConversionPattern<AtenOpT> {
|
2022-01-19 05:37:32 +08:00
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
2022-01-26 00:48:58 +08:00
|
|
|
// Each variant must implement corresponding parameter parsing options.
|
|
|
|
// Maintain separate input read functions for each variant because it is not
|
|
|
|
// necessarily true with all variants that the first two operands are the lhs
|
|
|
|
// and rhs.
|
|
|
|
virtual LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op,
|
|
|
|
"Unimplemented matrix multiplication variant input parsing function");
|
|
|
|
}
|
|
|
|
LogicalResult performMatmul(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter, Value &lhs,
|
|
|
|
Value &rhs, Value &output) const {
|
2022-01-19 05:37:32 +08:00
|
|
|
|
2022-01-26 00:48:58 +08:00
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
2022-01-19 05:37:32 +08:00
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
auto lhsRank = lhsTy.getRank();
|
|
|
|
auto rhsRank = rhsTy.getRank();
|
|
|
|
|
|
|
|
auto lhsShape = lhsTy.getShape();
|
|
|
|
auto rhsShape = rhsTy.getShape();
|
|
|
|
|
|
|
|
auto lhsElemTy = lhsTy.getElementType();
|
|
|
|
auto rhsElemTy = rhsTy.getElementType();
|
|
|
|
|
|
|
|
if (lhsElemTy != rhsElemTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Matmul: input datatypes mismatched");
|
2022-01-19 05:37:32 +08:00
|
|
|
|
|
|
|
// Legalization constructs may offer input shapes but expect output shapes
|
|
|
|
// to be inferred, e.g.
|
|
|
|
// func @forward(%arg0: !torch.vtensor<[14,19],f32>,
|
|
|
|
// %arg1: !torch.vtensor<[19,28],f32>) ->
|
|
|
|
// !torch.vtensor<[?,?],f32>
|
|
|
|
// This is tricky with matmul, since TOSA matmul is on 3D inputs.
|
|
|
|
// This means the need to reshape potentially both inputs and outputs,
|
|
|
|
// and reshape to unknown shape is undefined.
|
|
|
|
|
|
|
|
auto maxInputRank = lhsRank > rhsRank ? lhsRank : rhsRank;
|
|
|
|
// If performing dot product on vectors, the RHS is synthetically transposed
|
|
|
|
if (maxInputRank == 1)
|
|
|
|
maxInputRank++;
|
|
|
|
|
|
|
|
// Obtaining the rank broadcasted shapes of tensors makes it easier to
|
|
|
|
// construct the input and output reshaping logic.
|
|
|
|
auto getRankBroadcastedShape = [&](Value tensor,
|
|
|
|
bool isRHS) -> SmallVector<int64_t> {
|
|
|
|
auto tensorTy = tensor.getType().cast<TensorType>();
|
|
|
|
auto tensorShape = tensorTy.getShape();
|
|
|
|
auto tensorRank = tensorTy.getRank();
|
|
|
|
|
|
|
|
SmallVector<int64_t> bcastedShape;
|
|
|
|
|
|
|
|
auto bcastDims = maxInputRank - tensorRank;
|
|
|
|
|
|
|
|
if (isRHS && (tensorRank == 1) && bcastDims) {
|
|
|
|
// RHS with rank1 is special. It be synthetically transposed to dim[:-2]
|
|
|
|
for (int32_t i = 0; i < bcastDims - 1; i++)
|
|
|
|
bcastedShape.push_back(1);
|
|
|
|
bcastedShape.push_back(tensorShape[0]);
|
|
|
|
bcastedShape.push_back(1);
|
|
|
|
} else {
|
|
|
|
if (bcastDims > 0) { // rank broadcast
|
|
|
|
for (uint32_t i = 0; i < bcastDims; i++)
|
|
|
|
bcastedShape.push_back(1);
|
|
|
|
}
|
|
|
|
for (auto &dim : tensorShape)
|
|
|
|
bcastedShape.push_back(dim);
|
|
|
|
}
|
|
|
|
return bcastedShape;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Step: Rank broadcast the two inputs.
|
|
|
|
auto lhsBroadcastedShape = getRankBroadcastedShape(lhs, false);
|
|
|
|
auto lhsBroadcastedTy =
|
|
|
|
RankedTensorType::get(lhsBroadcastedShape, lhsElemTy);
|
|
|
|
auto rhsBroadcastedShape = getRankBroadcastedShape(rhs, true);
|
|
|
|
auto rhsBroadcastedTy =
|
|
|
|
RankedTensorType::get(rhsBroadcastedShape, rhsElemTy);
|
|
|
|
|
|
|
|
auto rankBroadcastedLhs =
|
|
|
|
lhsRank == maxInputRank
|
|
|
|
? lhs
|
|
|
|
: rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
lhsBroadcastedTy),
|
|
|
|
lhs, rewriter.getI64ArrayAttr(lhsBroadcastedShape));
|
|
|
|
|
|
|
|
auto rankBroadcastedRhs =
|
|
|
|
rhsRank == maxInputRank
|
|
|
|
? rhs
|
|
|
|
: rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
rhsBroadcastedTy),
|
|
|
|
rhs, rewriter.getI64ArrayAttr(rhsBroadcastedShape));
|
|
|
|
|
|
|
|
// TOSA matmul is performed on two 3D inputs and generates a 3D output.
|
|
|
|
// Lower ranked tensors are dim-1 reshaped up to 3D
|
|
|
|
auto reshapeUpTo3DTensor = [&](Value tensor) -> Value {
|
|
|
|
auto tensorTy = tensor.getType().cast<TensorType>();
|
|
|
|
auto rank = tensorTy.getRank();
|
|
|
|
|
|
|
|
assert(rank <= 3 && "reshapeUpTo3D tensor must receive rank <= 3");
|
|
|
|
if (rank == 3)
|
|
|
|
return tensor;
|
|
|
|
|
|
|
|
auto shape = tensorTy.getShape();
|
|
|
|
SmallVector<int64_t> newShape({1, 1, 1});
|
|
|
|
|
|
|
|
if (rank == 2) { // batchsize = 1
|
|
|
|
newShape[1] = shape[0];
|
|
|
|
newShape[2] = shape[1];
|
|
|
|
} else { // rank 1
|
|
|
|
newShape[2] = shape[0];
|
|
|
|
}
|
|
|
|
auto newType = RankedTensorType::get(newShape, tensorTy.getElementType());
|
|
|
|
|
|
|
|
return rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
newType),
|
|
|
|
tensor, rewriter.getI64ArrayAttr(newShape));
|
|
|
|
};
|
|
|
|
|
|
|
|
// Where broadcasting is required in one or more batch dims, the following
|
|
|
|
// is done.
|
|
|
|
// Where all batch dims are involved in broadcasting:
|
|
|
|
// Given A: 3x1x5x6 and B: 1x4x6x7
|
|
|
|
// 1. Reshape A to 1x15x6 (squeeze all batchdims into dim1)
|
|
|
|
// 2. Transpose B to 6x1x4x7, Reshape to 1x6x28
|
|
|
|
// 3. tosa.Matmul 1x15x6 1x6x28 = 1x15x28
|
|
|
|
// 4. Reshape out to 3x5x4x7, Transpose to 3x4x5x7
|
|
|
|
// Where there are batch dimensions that are broadcast and not, the
|
|
|
|
// treatment is to have dim0 correspond to product of all non-broadcast
|
|
|
|
// dimsizes:
|
|
|
|
// Given A: 4x8x16x32 B: 8x32x17
|
|
|
|
// 1. Reshape A to 8x64x32 (squeeze all unbroadcasted dims into dim0,
|
|
|
|
// broadcasted dims into dim1)
|
|
|
|
// 2. No transpose or reshape of B as its batchdims are not broadcast to.
|
|
|
|
// 3. tosa.Matmul 8x64x32 8x32x17 = 8x64x17
|
|
|
|
// 4. Reshape to 8x4x16x17, Transpose to 4x8x16x17
|
|
|
|
|
|
|
|
// Check if we need to perform the broadcast on batch dim
|
|
|
|
// Not needed if max rank < 3, or if maxrank == 3 and dim[0] matches
|
|
|
|
auto needsBatchDimBroadcast = [&]() -> bool {
|
|
|
|
if (maxInputRank < 3) {
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (maxInputRank == 3 &&
|
|
|
|
lhsBroadcastedShape[0] == rhsBroadcastedShape[0]) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
auto performBatchDimBroadcast = needsBatchDimBroadcast();
|
|
|
|
|
|
|
|
// Inputs to the tosa.matmul
|
|
|
|
Value matmulLhs, matmulRhs;
|
|
|
|
|
|
|
|
using TensorShape_t = struct {
|
|
|
|
int64_t dim;
|
|
|
|
int64_t shape;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Transpose needs to done if transposeDims are not non-monotonically
|
|
|
|
// increasing. E.g. [0, 1, 2, 3]: No transpose [1, 0, 2, 3]: Transpose dim0
|
|
|
|
// and dim1 The order need not be sequential, since one or more dims may
|
|
|
|
// have been removed due to broadcasting.
|
|
|
|
auto isTransposeRequired = [](SmallVector<int32_t> transposedDims) -> bool {
|
|
|
|
int32_t lastDim = -1;
|
|
|
|
for (auto &dim : transposedDims) {
|
|
|
|
if (lastDim > dim)
|
|
|
|
return true;
|
|
|
|
lastDim = dim;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
|
|
|
SmallVector<TensorShape_t> commonElems, lhsSqueezedElems, rhsSqueezedElems;
|
|
|
|
|
|
|
|
if (!performBatchDimBroadcast) {
|
|
|
|
// Simple with no broadcasting artifacts. Just reshape up to 3D
|
|
|
|
matmulLhs = reshapeUpTo3DTensor(rankBroadcastedLhs);
|
|
|
|
matmulRhs = reshapeUpTo3DTensor(rankBroadcastedRhs);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// In this case, either or both input matrices involve broadcasting on
|
|
|
|
// their batch dimensions. For example:
|
|
|
|
// 4x5x6, 1x6x7 -> 4x5x7
|
|
|
|
// 4x1x5x6, 1x3x6x7 -> 4x3x5x7
|
|
|
|
// Though maxInputRank is necessarily >=3 here, individual matrices may be
|
|
|
|
// lower rank.
|
|
|
|
// E.g. 3x4x5x6, 6 -> 3x4x5
|
|
|
|
|
|
|
|
// These are the accumulated products of the shape of each dim:
|
|
|
|
// 1. common dimensions: upper dimensions (dims other than two rightmost)
|
|
|
|
// whose shapes are the same for both LHS and RHS.
|
|
|
|
// 2. LHS squeezed dimensions: all dimensions of LHS that involve
|
|
|
|
// broadcasting in either direction, plus the LHS[-2] shape
|
|
|
|
// 3. RHS squeezed dimensions: all dimensions of RHS that involve
|
|
|
|
// broadcasting in either direction, plus the RHS[-1] shape
|
|
|
|
int64_t commonValue = 1, lhsSqueezedValue = 1, rhsSqueezedValue = 1;
|
|
|
|
|
|
|
|
// For both LHS and RHS, the dimensions are separated into the common,
|
|
|
|
// squeezed and remaining dim. E.g. given
|
|
|
|
// LHS = 3x4x5x6
|
|
|
|
// RHS = 1x4x6x7
|
|
|
|
// common = {{dim=1, shape=4}}
|
|
|
|
// lhs squeezed = {{dim=0, shape=3},
|
|
|
|
// {dim=2, shape=5}}
|
|
|
|
// rhs squeezed = {{dim=0, shape=1},
|
|
|
|
// {dim=2, shape=7}}
|
|
|
|
// The matmul dim is LHS[-1] and RHS[-2], i.e. 6.
|
|
|
|
// Once this is obtained, LHS and RHS are expressed as:
|
|
|
|
// LHS = {common, lhs_squeezed, matmul_dim}
|
|
|
|
// RHS = {common, matmul_dim, rhs_squeezed}
|
|
|
|
// The matmul is then performed to obtain output:
|
|
|
|
// matmul_out = {common, lhs_squeezed, rhs_squeezed}
|
|
|
|
// Finally, we reshape to 'unsqueeze' the LHS and RHS parts and transpose
|
|
|
|
// them back to their correct positions.
|
|
|
|
|
|
|
|
SmallVector<int64_t> transposedLhsShape;
|
|
|
|
SmallVector<int32_t> transposedLhsDims;
|
|
|
|
|
|
|
|
// Step: generate the common dim/shape information
|
2022-06-16 23:45:10 +08:00
|
|
|
bool hasDynamicDims = false;
|
2022-01-19 05:37:32 +08:00
|
|
|
for (uint32_t dim = 0; dim < maxInputRank - 2; dim++) {
|
2022-01-27 11:16:13 +08:00
|
|
|
bool isDynamicDim = ShapedType::isDynamic(lhsBroadcastedShape[dim]);
|
2022-06-16 23:45:10 +08:00
|
|
|
hasDynamicDims |= isDynamicDim;
|
2022-01-19 05:37:32 +08:00
|
|
|
if (isDynamicDim ||
|
|
|
|
lhsBroadcastedShape[dim] == rhsBroadcastedShape[dim]) {
|
|
|
|
commonValue *= lhsBroadcastedShape[dim];
|
|
|
|
commonElems.push_back({dim, lhsBroadcastedShape[dim]});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-16 23:45:10 +08:00
|
|
|
// TODO: Handle the case when there are dynamic batch dimensions.
|
|
|
|
if (hasDynamicDims)
|
|
|
|
commonValue = ShapedType::kDynamicSize;
|
|
|
|
|
2022-01-19 05:37:32 +08:00
|
|
|
// Step: generate the LHS squeezed dim/shape information.
|
|
|
|
for (uint32_t dim = 0; dim < maxInputRank - 2; dim++) {
|
2022-01-27 11:16:13 +08:00
|
|
|
bool isDynamicDim = ShapedType::isDynamic(lhsBroadcastedShape[dim]);
|
2022-01-19 05:37:32 +08:00
|
|
|
if (!isDynamicDim &&
|
|
|
|
lhsBroadcastedShape[dim] != rhsBroadcastedShape[dim]) {
|
|
|
|
lhsSqueezedValue *= lhsBroadcastedShape[dim];
|
|
|
|
lhsSqueezedElems.push_back({dim, lhsBroadcastedShape[dim]});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// including LHS[-2]
|
|
|
|
lhsSqueezedElems.push_back(
|
|
|
|
{maxInputRank - 2, lhsBroadcastedShape[maxInputRank - 2]});
|
|
|
|
lhsSqueezedValue *= lhsBroadcastedShape[maxInputRank - 2];
|
|
|
|
|
|
|
|
// Step: Create the tosa.transpose array. If this array has a
|
|
|
|
// non-monotonic series of dims, perform transpose.
|
|
|
|
// First the common_elems
|
|
|
|
for (uint32_t i = 0; i < commonElems.size(); i++) {
|
|
|
|
transposedLhsShape.push_back(commonElems[i].shape);
|
|
|
|
transposedLhsDims.push_back(commonElems[i].dim);
|
|
|
|
}
|
|
|
|
// then the lhs_squeezed elems
|
|
|
|
for (uint32_t i = 0; i < lhsSqueezedElems.size(); i++) {
|
|
|
|
transposedLhsShape.push_back(lhsSqueezedElems[i].shape);
|
|
|
|
transposedLhsDims.push_back(lhsSqueezedElems[i].dim);
|
|
|
|
}
|
|
|
|
// then the final dim
|
|
|
|
transposedLhsDims.push_back(maxInputRank - 1);
|
|
|
|
transposedLhsShape.push_back(lhsBroadcastedShape[maxInputRank - 1]);
|
|
|
|
|
|
|
|
bool lhsNeedsTranspose = isTransposeRequired(transposedLhsDims);
|
|
|
|
|
|
|
|
auto lhsReshapeInput = rankBroadcastedLhs;
|
|
|
|
|
|
|
|
if (lhsNeedsTranspose) {
|
|
|
|
auto transposedLhsType =
|
|
|
|
RankedTensorType::get(transposedLhsShape, rhsElemTy);
|
|
|
|
|
|
|
|
llvm::Optional<Value> transposedLhsDimsConst =
|
|
|
|
tosa::getConstTensor<int32_t>(
|
|
|
|
rewriter, op,
|
|
|
|
/*vec=*/transposedLhsDims,
|
|
|
|
/*shape=*/{static_cast<int32_t>(transposedLhsDims.size())});
|
|
|
|
|
|
|
|
lhsReshapeInput =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::TransposeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(transposedLhsType),
|
2022-08-09 11:17:35 +08:00
|
|
|
rankBroadcastedLhs, transposedLhsDimsConst.value())
|
2022-01-19 05:37:32 +08:00
|
|
|
.getResult();
|
|
|
|
}
|
|
|
|
|
|
|
|
// LHS = {common, lhs_squeezed, matmul_dim}
|
|
|
|
SmallVector<int64_t> newLhsShape(
|
|
|
|
{1, 1, lhsBroadcastedShape[maxInputRank - 1]});
|
|
|
|
newLhsShape[0] = commonValue;
|
|
|
|
newLhsShape[1] =
|
|
|
|
hasDynamicDims ? ShapedType::kDynamicSize : lhsSqueezedValue;
|
|
|
|
|
|
|
|
auto newLhsType = RankedTensorType::get(newLhsShape, lhsElemTy);
|
|
|
|
|
|
|
|
matmulLhs = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
newLhsType),
|
|
|
|
lhsReshapeInput, rewriter.getI64ArrayAttr(newLhsShape));
|
|
|
|
|
|
|
|
SmallVector<int64_t> transposedRhsShape;
|
|
|
|
SmallVector<int32_t> transposedRhsDims;
|
|
|
|
|
|
|
|
// Step: Create the RHS transpose sequence
|
|
|
|
// RHS = {common, matmul_dim, rhs_squeezed}
|
|
|
|
// first the common_dims
|
|
|
|
for (uint32_t i = 0; i < commonElems.size(); i++) {
|
|
|
|
transposedRhsShape.push_back(commonElems[i].shape);
|
|
|
|
transposedRhsDims.push_back(commonElems[i].dim);
|
|
|
|
}
|
|
|
|
// The matmul_dim of RHS
|
|
|
|
transposedRhsDims.push_back(maxInputRank - 2);
|
|
|
|
transposedRhsShape.push_back(rhsBroadcastedShape[maxInputRank - 2]);
|
|
|
|
// finally all the rhs_squeeze dims
|
|
|
|
hasDynamicDims = false;
|
|
|
|
for (uint32_t dim = 0; dim < maxInputRank - 2; dim++) {
|
2022-01-27 11:16:13 +08:00
|
|
|
bool isDynamicDim = ShapedType::isDynamic(rhsBroadcastedShape[dim]);
|
2022-01-19 05:37:32 +08:00
|
|
|
hasDynamicDims |= isDynamicDim;
|
|
|
|
if (!isDynamicDim &&
|
|
|
|
rhsBroadcastedShape[dim] != lhsBroadcastedShape[dim]) {
|
|
|
|
rhsSqueezedElems.push_back({dim, rhsBroadcastedShape[dim]});
|
|
|
|
rhsSqueezedValue *= rhsBroadcastedShape[dim];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rhsSqueezedElems.push_back(
|
|
|
|
{maxInputRank - 1, rhsBroadcastedShape[maxInputRank - 1]});
|
|
|
|
rhsSqueezedValue *= rhsBroadcastedShape[maxInputRank - 1];
|
|
|
|
for (uint32_t i = 0; i < rhsSqueezedElems.size(); i++) {
|
|
|
|
transposedRhsShape.push_back(rhsSqueezedElems[i].shape);
|
|
|
|
transposedRhsDims.push_back(rhsSqueezedElems[i].dim);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto transposedRhsType =
|
|
|
|
RankedTensorType::get(transposedRhsShape, rhsElemTy);
|
|
|
|
|
|
|
|
if (hasDynamicDims)
|
|
|
|
rhsSqueezedValue = ShapedType::kDynamicSize;
|
|
|
|
|
|
|
|
SmallVector<int64_t> newRhsShape({commonValue,
|
|
|
|
rhsBroadcastedShape[maxInputRank - 2],
|
|
|
|
rhsSqueezedValue});
|
|
|
|
auto newRhsType = RankedTensorType::get(newRhsShape, rhsElemTy);
|
|
|
|
|
|
|
|
bool rhsNeedsTranspose = isTransposeRequired(transposedRhsDims);
|
|
|
|
|
|
|
|
auto transposedRhsValue = rankBroadcastedRhs;
|
|
|
|
|
|
|
|
if (rhsNeedsTranspose) {
|
|
|
|
llvm::Optional<Value> transposedRhsDimsConst =
|
|
|
|
tosa::getConstTensor<int32_t>(
|
|
|
|
rewriter, op,
|
|
|
|
/*vec=*/transposedRhsDims,
|
|
|
|
/*shape=*/{static_cast<int32_t>(transposedRhsDims.size())});
|
|
|
|
|
|
|
|
transposedRhsValue =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::TransposeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(transposedRhsType),
|
2022-08-09 11:17:35 +08:00
|
|
|
rankBroadcastedRhs, transposedRhsDimsConst.value())
|
2022-01-19 05:37:32 +08:00
|
|
|
.getResult();
|
|
|
|
}
|
|
|
|
|
|
|
|
// reshape
|
|
|
|
matmulRhs = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
newRhsType),
|
|
|
|
transposedRhsValue, rewriter.getI64ArrayAttr(newRhsShape));
|
|
|
|
}
|
|
|
|
|
|
|
|
auto matmulLhsShape =
|
|
|
|
matmulLhs.getType().template cast<RankedTensorType>().getShape();
|
|
|
|
auto matmulRhsShape =
|
|
|
|
matmulRhs.getType().template cast<RankedTensorType>().getShape();
|
|
|
|
|
|
|
|
// The reshape/transpose should ensure the tosa.matmul always has same
|
|
|
|
// batch size for either matrix. If if shapes are dynamic, they'll be
|
|
|
|
// appropriately handled.
|
|
|
|
assert(matmulLhsShape[0] == matmulRhsShape[0] &&
|
|
|
|
"tosa.matmul needs same batchsize on LHS and RHS");
|
|
|
|
|
|
|
|
SmallVector<int64_t> matmulOutputShape(
|
|
|
|
{matmulLhsShape[0], matmulLhsShape[1], matmulRhsShape[2]});
|
|
|
|
Type outputElemTy;
|
|
|
|
if (lhsElemTy.isa<mlir::FloatType>()) {
|
|
|
|
outputElemTy = lhsElemTy;
|
|
|
|
} else { // qint8 emits i32 matmul output
|
|
|
|
outputElemTy = rewriter.getIntegerType(32);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto mmOutputTy = RankedTensorType::get(matmulOutputShape, outputElemTy);
|
|
|
|
auto mmOpResult =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::MatMulOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
mmOutputTy),
|
|
|
|
matmulLhs, matmulRhs)
|
|
|
|
.getResult();
|
|
|
|
|
2022-07-07 08:24:16 +08:00
|
|
|
// Perform the reshape to output shape. This is always required unless max
|
2022-07-14 01:10:18 +08:00
|
|
|
// input rank=3 and there was no broadcasting, in which case the tosa.matmul
|
|
|
|
// output itself is correctly shaped.
|
|
|
|
bool performOpReshape = !(maxInputRank == 3 && !performBatchDimBroadcast);
|
2022-01-19 05:37:32 +08:00
|
|
|
|
|
|
|
if (performOpReshape) {
|
|
|
|
// Since the output shape may be unknown, we construct it
|
|
|
|
// independently and reshape. Otherwise reshape may be expressed for
|
|
|
|
// an unknown to-be-inferred output shape. The final tensor.cast
|
|
|
|
// reshapes the known shape to the desired output shape.
|
|
|
|
auto computeOpShape = [&](SmallVector<int64_t> &reshapedOpShape,
|
|
|
|
SmallVector<int32_t> &transposedOpDims,
|
|
|
|
SmallVector<int64_t> &transposedOpShapes) {
|
|
|
|
if (maxInputRank == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (maxInputRank == 2) {
|
|
|
|
if (lhsRank == 2)
|
|
|
|
reshapedOpShape.push_back(lhsShape[0]);
|
|
|
|
if (rhsRank == 2)
|
|
|
|
reshapedOpShape.push_back(rhsShape[1]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Step: Construct the output transpose/reshape information
|
|
|
|
// First the common_dims
|
|
|
|
for (uint32_t i = 0; i < commonElems.size(); i++) {
|
|
|
|
reshapedOpShape.push_back(commonElems[i].shape);
|
|
|
|
transposedOpDims.push_back(commonElems[i].dim);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then the LHS squeezed dims
|
|
|
|
for (uint32_t i = 0; i < lhsSqueezedElems.size() - 1; i++) {
|
|
|
|
// Only dims that don't broadcast - broadcasting ones come from the
|
|
|
|
// other input.
|
|
|
|
if (lhsSqueezedElems[i].shape != 1) {
|
|
|
|
reshapedOpShape.push_back(lhsSqueezedElems[i].shape);
|
|
|
|
transposedOpDims.push_back(lhsSqueezedElems[i].dim);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// The last squeezed dim is lhs[-2] which needs to be
|
|
|
|
// checked separately for broadcasting
|
|
|
|
if (lhsRank > 1) {
|
|
|
|
reshapedOpShape.push_back(lhsBroadcastedShape[maxInputRank - 2]);
|
|
|
|
transposedOpDims.push_back(maxInputRank - 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
// then the RHS squeezed dims except rhs[-1] which is handled like
|
|
|
|
// lhs[-2]
|
|
|
|
for (uint32_t i = 0; i < rhsSqueezedElems.size() - 1; i++) {
|
|
|
|
if (rhsSqueezedElems[i].shape != 1) {
|
|
|
|
reshapedOpShape.push_back(rhsSqueezedElems[i].shape);
|
|
|
|
transposedOpDims.push_back(rhsSqueezedElems[i].dim);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// rhs[-1]
|
|
|
|
if (rhsRank > 1) {
|
|
|
|
reshapedOpShape.push_back(rhsBroadcastedShape[maxInputRank - 1]);
|
|
|
|
transposedOpDims.push_back(maxInputRank - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Final transposed output shape construction
|
|
|
|
for (uint32_t i = 0; i < maxInputRank - 2; i++) {
|
|
|
|
if (lhsBroadcastedTy.isDynamicDim(i)) {
|
|
|
|
transposedOpShapes.push_back(ShapedType::kDynamicSize);
|
|
|
|
} else {
|
|
|
|
if (lhsBroadcastedShape[i] == rhsBroadcastedShape[i]) {
|
|
|
|
transposedOpShapes.push_back(lhsBroadcastedShape[i]);
|
|
|
|
} else {
|
|
|
|
transposedOpShapes.push_back(lhsBroadcastedShape[i] == 1
|
|
|
|
? rhsBroadcastedShape[i]
|
|
|
|
: lhsBroadcastedShape[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (lhsRank > 1)
|
|
|
|
transposedOpShapes.push_back(lhsBroadcastedShape[maxInputRank - 2]);
|
|
|
|
if (rhsRank > 1)
|
|
|
|
transposedOpShapes.push_back(rhsBroadcastedShape[maxInputRank - 1]);
|
|
|
|
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
SmallVector<int64_t> reshapedOpShape, transposedOpShape;
|
|
|
|
SmallVector<int32_t> transposedOpDims;
|
|
|
|
|
|
|
|
computeOpShape(reshapedOpShape, transposedOpDims, transposedOpShape);
|
|
|
|
|
|
|
|
bool opNeedsTranspose = isTransposeRequired(transposedOpDims);
|
|
|
|
|
|
|
|
// Perform reshape
|
|
|
|
auto reshapedOpType =
|
|
|
|
RankedTensorType::get(reshapedOpShape, outputElemTy);
|
|
|
|
auto reshapedOp = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
reshapedOpType),
|
|
|
|
mmOpResult, rewriter.getI64ArrayAttr(reshapedOpShape));
|
|
|
|
|
|
|
|
if (opNeedsTranspose) {
|
|
|
|
|
|
|
|
llvm::Optional<Value> transposedOpShapeConst =
|
|
|
|
tosa::getConstTensor<int32_t>(
|
|
|
|
rewriter, op,
|
|
|
|
/*vec=*/transposedOpDims,
|
|
|
|
/*shape=*/{static_cast<int32_t>(transposedOpDims.size())});
|
|
|
|
|
|
|
|
auto transposedOpType =
|
|
|
|
RankedTensorType::get(transposedOpShape, outputElemTy);
|
2022-08-09 11:17:35 +08:00
|
|
|
output = rewriter
|
|
|
|
.create<tosa::TransposeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(transposedOpType),
|
|
|
|
reshapedOp.getResult(), transposedOpShapeConst.value())
|
|
|
|
.getResult();
|
2022-01-19 05:37:32 +08:00
|
|
|
|
|
|
|
} else {
|
2022-01-26 00:48:58 +08:00
|
|
|
output = reshapedOp.getResult();
|
2022-01-19 05:37:32 +08:00
|
|
|
}
|
|
|
|
} else {
|
2022-01-26 00:48:58 +08:00
|
|
|
output = mmOpResult;
|
2022-01-19 05:37:32 +08:00
|
|
|
}
|
|
|
|
|
2022-01-26 00:48:58 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
// The default version just reads two inputs, computes output and returns it.
|
|
|
|
// Other versions may add a bias, apply GEMM-style alpha/beta scaling etc.
|
|
|
|
virtual LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
2022-01-27 11:16:13 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
Value lhs, rhs;
|
|
|
|
|
|
|
|
if (failed(readMatMulInputs(op, adaptor, rewriter, lhs, rhs)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Failed to read matmul inputs");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
Value output;
|
|
|
|
|
|
|
|
if (failed(performMatmul(op, adaptor, rewriter, lhs, rhs, output)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Failed to perform matmul operation");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<RankedTensorType>(),
|
|
|
|
output);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Legalizes the torch.matmul op for general n-dim matmul.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenMatMulOp : public ConvertAtenMatmulBaseOp<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenMatmulBaseOp<AtenOpT>::ConvertAtenMatmulBaseOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const override {
|
|
|
|
lhs = adaptor.self();
|
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
rhs = adaptor.other();
|
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy || !rhsTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA matmul");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Implements handling of aten.mm and aten.bmm ops.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenMmOp : public ConvertAtenMatmulBaseOp<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenMatmulBaseOp<AtenOpT>::ConvertAtenMatmulBaseOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const override {
|
|
|
|
|
|
|
|
lhs = adaptor.self();
|
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
rhs = adaptor.mat2();
|
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy || !rhsTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA matmul");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
auto lhsRank = lhsTy.getRank();
|
|
|
|
auto rhsRank = rhsTy.getRank();
|
|
|
|
|
|
|
|
if (isa<AtenMmOp>(op)) {
|
|
|
|
// Mm takes two 2D tensors.
|
|
|
|
if (lhsRank != 2 || rhsRank != 2)
|
|
|
|
return op.emitError("aten.mm called but matrix rank != 2");
|
|
|
|
} else if (isa<AtenBmmOp>(op)) {
|
|
|
|
// Bmm takes two 3D tensors.
|
|
|
|
if (lhsRank != 3 || rhsRank != 3)
|
|
|
|
return op.emitError("aten.bmm called but matrix rank != 3");
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Implements handling of aten.linear op.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenLinearOp : public ConvertAtenMatmulBaseOp<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenMatmulBaseOp<AtenOpT>::ConvertAtenMatmulBaseOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult readMatMulInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &lhs, Value &rhs) const override {
|
|
|
|
|
|
|
|
lhs = adaptor.input();
|
|
|
|
auto lhsTy = lhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
rhs = adaptor.weight();
|
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!lhsTy || !rhsTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA matmul");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
auto lhsRank = lhsTy.getRank();
|
|
|
|
auto rhsRank = rhsTy.getRank();
|
|
|
|
|
|
|
|
if (lhsRank != 2 && lhsRank != 3)
|
|
|
|
return op.emitError("aten.Linear called but input rank not 2 or 3");
|
|
|
|
if (rhsRank != 2 && rhsRank != 3)
|
|
|
|
return op.emitError("aten.Linear called but weight rank not 2 or 3");
|
|
|
|
|
|
|
|
// Protection against crash due to unguarded code in TOSA->LinAlg.
|
2022-07-18 09:39:54 +08:00
|
|
|
// TODO: This should be handled in TOSA->LinAlg instead.
|
2022-01-26 00:48:58 +08:00
|
|
|
if (!lhsTy.hasStaticShape() || !rhsTy.hasStaticShape())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "aten.Linear needs statically shaped input");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
// Override the default rewriter to perform RHS transpose and bias addition as
|
|
|
|
// well.
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
Value lhs, rhs;
|
|
|
|
|
|
|
|
if (failed(readMatMulInputs(op, adaptor, rewriter, lhs, rhs)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Failed to read matmul inputs");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
// The aten.Linear op has a bias tensor that is added to the matmul output.
|
|
|
|
auto bias = adaptor.bias();
|
|
|
|
auto biasTy = bias.getType();
|
|
|
|
|
|
|
|
// TOSA does not mandate that elementwise op tensors need to be ranked.
|
|
|
|
if (!biasTy.template isa<Torch::NoneType>() &&
|
|
|
|
!biasTy.template isa<TensorType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types supported in GEMM to TOSA for bias tensor");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
// RHS must have its last two dims transposed prior to matrix
|
|
|
|
// multiplication.
|
|
|
|
auto rhsTy = rhs.getType().cast<RankedTensorType>();
|
|
|
|
auto rhsRank = rhsTy.getRank();
|
|
|
|
auto rhsShape = rhsTy.getShape();
|
|
|
|
auto rhsElemTy = rhsTy.getElementType();
|
|
|
|
|
|
|
|
// Create a non-const shape array to transpose dims.
|
|
|
|
SmallVector<int64_t> transposedRhsShape;
|
|
|
|
for (auto &shape : rhsShape)
|
|
|
|
transposedRhsShape.push_back(shape);
|
|
|
|
SmallVector<int32_t> transposedRhsDims;
|
|
|
|
for (int32_t i = 0; i < rhsRank; i++)
|
|
|
|
transposedRhsDims.push_back(i);
|
|
|
|
|
|
|
|
// Swap the last two dims.
|
|
|
|
std::swap(transposedRhsShape[rhsRank - 1], transposedRhsShape[rhsRank - 2]);
|
|
|
|
std::swap(transposedRhsDims[rhsRank - 1], transposedRhsDims[rhsRank - 2]);
|
|
|
|
|
|
|
|
llvm::Optional<Value> transposedRhsShapeConst =
|
|
|
|
tosa::getConstTensor<int32_t>(
|
|
|
|
rewriter, op,
|
|
|
|
/*vec=*/transposedRhsDims,
|
|
|
|
/*shape=*/{static_cast<int32_t>(transposedRhsDims.size())});
|
|
|
|
|
|
|
|
auto transposedRhsType =
|
|
|
|
RankedTensorType::get(transposedRhsShape, rhsElemTy);
|
|
|
|
rhs = rewriter.create<tosa::TransposeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
transposedRhsType),
|
2022-08-09 11:17:35 +08:00
|
|
|
rhs, transposedRhsShapeConst.value());
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
Value matmulOutput;
|
|
|
|
if (failed(
|
|
|
|
this->performMatmul(op, adaptor, rewriter, lhs, rhs, matmulOutput)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Failed to perform matmul operation");
|
2022-01-26 00:48:58 +08:00
|
|
|
|
|
|
|
Value matmulPlusBias = matmulOutput;
|
|
|
|
if (!biasTy.template isa<Torch::NoneType>()) {
|
|
|
|
// Bias addition broadcasts to the matmul output shape.
|
|
|
|
matmulPlusBias =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::AddOp>(op->getLoc(), matmulOutput.getType(),
|
|
|
|
matmulOutput, bias)
|
|
|
|
.getResult();
|
|
|
|
}
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<RankedTensorType>(),
|
|
|
|
matmulPlusBias);
|
|
|
|
|
2022-01-19 05:37:32 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenRsubScalarOp>::matchAndRewrite(
|
|
|
|
AtenRsubScalarOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
auto self = adaptor.self();
|
|
|
|
auto otherScalar = op.other();
|
|
|
|
auto alphaScalar = op.alpha();
|
|
|
|
|
|
|
|
auto selfTy = self.getType().template cast<RankedTensorType>();
|
|
|
|
if (!selfTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA Rsub");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
if (!selfTy.getElementType().isa<mlir::FloatType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization supported");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
Value otherTensor, alphaTensor;
|
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, otherScalar, otherTensor,
|
|
|
|
selfTy.getElementType(), {})))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Currently only scalar constants are supported for "
|
|
|
|
"conversion in TOSA Rsub operation");
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
if (failed(torchAlphaToTosaTensor(rewriter, op.getOperation(), alphaScalar,
|
|
|
|
alphaTensor, selfTy.getElementType(),
|
2022-02-12 04:30:02 +08:00
|
|
|
/*checkForUnity=*/true)))
|
2022-01-21 02:58:30 +08:00
|
|
|
return failure();
|
|
|
|
|
|
|
|
auto multTensor = rewriter.create<tosa::MulOp>(
|
|
|
|
op->getLoc(), getTypeConverter()->convertType(op.getType()), self,
|
2022-02-12 04:30:02 +08:00
|
|
|
alphaTensor, /*shift=*/0);
|
2022-01-21 02:58:30 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::SubOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), otherTensor,
|
|
|
|
multTensor);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-01-27 11:16:13 +08:00
|
|
|
template <>
|
2022-04-08 12:47:57 +08:00
|
|
|
LogicalResult ConvertAtenOp<AtenConvolutionOp>::matchAndRewrite(
|
|
|
|
AtenConvolutionOp op, OpAdaptor adaptor,
|
2022-01-27 11:16:13 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
auto input = adaptor.input();
|
|
|
|
auto weight = adaptor.weight();
|
|
|
|
|
|
|
|
auto inputTy = input.getType().template cast<RankedTensorType>();
|
|
|
|
auto weightTy = weight.getType().template cast<RankedTensorType>();
|
|
|
|
auto outputTy = getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (!inputTy || !weightTy || !outputTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Input, weight and output to Convolution must be ranked tensors");
|
2022-01-27 11:16:13 +08:00
|
|
|
|
|
|
|
auto inputElemTy = inputTy.getElementType();
|
|
|
|
auto weightElemTy = weightTy.getElementType();
|
|
|
|
auto inputShape = inputTy.getShape();
|
|
|
|
auto weightShape = weightTy.getShape();
|
|
|
|
|
2022-04-08 12:47:57 +08:00
|
|
|
if (inputTy.getRank() != 4)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Unimplemented: only 2D convolutions supported");
|
2022-04-08 12:47:57 +08:00
|
|
|
|
|
|
|
if (!weightTy.hasStaticShape())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Unimplemented: TOSA only supports static weight");
|
2022-04-08 12:47:57 +08:00
|
|
|
|
2022-01-27 11:16:13 +08:00
|
|
|
// Bias is optional. TOSA mandates a zero tensor here, so construct one if
|
|
|
|
// required.
|
|
|
|
auto bias = adaptor.bias();
|
|
|
|
if (adaptor.bias().getType().template isa<Torch::NoneType>()) {
|
|
|
|
// TBD: This is only valid for quantized 8-bit. For 16-bit, the bias (and
|
|
|
|
// accumulator) are 48-bit and not 32-bit, and requires the use of APInt to
|
|
|
|
// define a 48-bit int.
|
|
|
|
if (inputElemTy.isa<quant::QuantizedType>()) {
|
|
|
|
SmallVector<int32_t> zeroVec(weightShape[0], 0);
|
|
|
|
bias = tosa::getConstTensor<int32_t>(
|
|
|
|
rewriter, op, zeroVec, {static_cast<int32_t>(weightShape[0])})
|
2022-08-09 11:17:35 +08:00
|
|
|
.value();
|
2022-01-27 11:16:13 +08:00
|
|
|
} else {
|
|
|
|
SmallVector<float> zeroVec(weightShape[0], 0);
|
|
|
|
bias = tosa::getConstTensor<float>(rewriter, op, zeroVec,
|
|
|
|
{static_cast<int32_t>(weightShape[0])})
|
2022-08-09 11:17:35 +08:00
|
|
|
.value();
|
2022-01-27 11:16:13 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!bias.getType().cast<RankedTensorType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Bias provided but not a ranked tensor");
|
2022-01-27 11:16:13 +08:00
|
|
|
}
|
|
|
|
auto biasElemTy = inputElemTy.template isa<mlir::FloatType>()
|
|
|
|
? inputElemTy
|
|
|
|
: rewriter.getI32Type();
|
|
|
|
|
|
|
|
SmallVector<int64_t, 2> stride;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(adaptor.stride(), m_TorchListOfConstantInts(stride)))
|
2022-01-27 11:16:13 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "non-const stride list unsupported");
|
|
|
|
|
|
|
|
SmallVector<int64_t, 2> padding_2d;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(adaptor.padding(), m_TorchListOfConstantInts(padding_2d)))
|
2022-01-27 11:16:13 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const padding list unsupported");
|
|
|
|
// TOSA uses 4D padding {t, b, l, r} while Torch defines 2D padding {t, l}.
|
|
|
|
// The Torch OFM computation uses 2*pad in each spatial direction, implying
|
|
|
|
// the same t=b and l=r values for TOSA.
|
|
|
|
SmallVector<int64_t> padding(
|
|
|
|
{padding_2d[0], padding_2d[0], padding_2d[1], padding_2d[1]});
|
|
|
|
|
|
|
|
SmallVector<int64_t, 2> dilation;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(adaptor.dilation(), m_TorchListOfConstantInts(dilation)))
|
2022-01-27 11:16:13 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"non-const dilation list unsupported");
|
|
|
|
|
|
|
|
// TOSA works in NHWC and takes OHWI weights. Perform the necessary transpose.
|
|
|
|
llvm::Optional<Value> nchwToNhwcTransposeConst =
|
|
|
|
tosa::getConstTensor<int32_t>(rewriter, op,
|
|
|
|
/*vec=*/{0, 2, 3, 1},
|
|
|
|
/*shape=*/{static_cast<int32_t>(4)});
|
|
|
|
SmallVector<int64_t> transposedInputShape(
|
|
|
|
{inputShape[0], inputShape[2], inputShape[3], inputShape[1]});
|
|
|
|
auto transposedInputType =
|
|
|
|
RankedTensorType::get(transposedInputShape, inputElemTy);
|
|
|
|
auto transposedInput =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::TransposeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
getTypeConverter()->convertType(transposedInputType), input,
|
2022-08-09 11:17:35 +08:00
|
|
|
nchwToNhwcTransposeConst.value())
|
2022-01-27 11:16:13 +08:00
|
|
|
.getResult();
|
|
|
|
|
|
|
|
SmallVector<int64_t> transposedWeightShape(
|
|
|
|
{weightShape[0], weightShape[2], weightShape[3], weightShape[1]});
|
|
|
|
auto transposedWeightType =
|
|
|
|
RankedTensorType::get(transposedWeightShape, weightElemTy);
|
|
|
|
auto transposedWeight =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::TransposeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
getTypeConverter()->convertType(transposedWeightType), weight,
|
2022-08-09 11:17:35 +08:00
|
|
|
nchwToNhwcTransposeConst.value())
|
2022-01-27 11:16:13 +08:00
|
|
|
.getResult();
|
|
|
|
|
|
|
|
int64_t outputHDim, outputWDim;
|
|
|
|
if (inputTy.hasStaticShape()) {
|
|
|
|
outputHDim = (transposedInputShape[1] + padding[0] + padding[1] -
|
|
|
|
dilation[0] * (transposedWeightShape[1] - 1) - 1) /
|
|
|
|
stride[0] +
|
|
|
|
1;
|
|
|
|
outputWDim = (transposedInputShape[2] + padding[2] + padding[3] -
|
|
|
|
dilation[1] * (transposedWeightShape[2] - 1) - 1) /
|
|
|
|
stride[1] +
|
|
|
|
1;
|
|
|
|
} else {
|
|
|
|
outputHDim = ShapedType::kDynamicSize;
|
|
|
|
outputWDim = ShapedType::kDynamicSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Output shape is NHWC, to be transposed back to NCHW. Output elemTy for
|
|
|
|
// quantized input is i32, which gets rescaled down to quantized output range.
|
|
|
|
SmallVector<int64_t> outputShape = {transposedInputShape[0], outputHDim,
|
|
|
|
outputWDim, transposedWeightShape[0]};
|
|
|
|
auto convOpTy = RankedTensorType::get(outputShape, biasElemTy);
|
|
|
|
|
|
|
|
Value convOpResult =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::Conv2DOp>(op->getLoc(),
|
|
|
|
getTypeConverter()->convertType(convOpTy),
|
|
|
|
transposedInput, transposedWeight, bias,
|
|
|
|
rewriter.getI64ArrayAttr(padding),
|
|
|
|
rewriter.getI64ArrayAttr(stride),
|
|
|
|
rewriter.getI64ArrayAttr(dilation))
|
|
|
|
.getResult();
|
|
|
|
|
|
|
|
llvm::Optional<Value> nhwcToNchwTransposeConst =
|
|
|
|
tosa::getConstTensor<int32_t>(rewriter, op,
|
|
|
|
/*vec=*/{0, 3, 1, 2},
|
|
|
|
/*shape=*/{static_cast<int32_t>(4)});
|
|
|
|
SmallVector<int64_t> transposedOutputShape(
|
|
|
|
{outputShape[0], outputShape[3], outputShape[1], outputShape[2]});
|
|
|
|
auto transposedOutputType =
|
|
|
|
RankedTensorType::get(transposedOutputShape, biasElemTy);
|
|
|
|
auto transposedOutput =
|
|
|
|
rewriter
|
|
|
|
.create<tosa::TransposeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
getTypeConverter()->convertType(transposedOutputType),
|
2022-08-09 11:17:35 +08:00
|
|
|
convOpResult, nhwcToNchwTransposeConst.value())
|
2022-01-27 11:16:13 +08:00
|
|
|
.getResult();
|
|
|
|
|
|
|
|
Value rescaledResult = transposedOutput;
|
|
|
|
if (inputElemTy.template isa<quant::QuantizedType>()) {
|
|
|
|
rescaledResult = tosa::buildRescaleOpConvOutput(
|
|
|
|
rewriter, op, transposedOutput, inputTy, weightTy, outputTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), rescaledResult);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-01-28 06:38:59 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenReshapeOp>::matchAndRewrite(
|
|
|
|
AtenReshapeOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
auto self = adaptor.self();
|
|
|
|
|
|
|
|
auto selfTy = self.getType().template cast<RankedTensorType>();
|
|
|
|
if (!selfTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types supported in TOSA Reshape");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
// Check that at most one dimension is -1
|
|
|
|
SmallVector<int64_t> newShape;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.shape(), m_TorchListOfConstantInts(newShape)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only constant shape supported in TOSA Reshape");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
int auto_sz = 0;
|
|
|
|
for (auto s : newShape)
|
|
|
|
auto_sz += (s == -1 ? 1 : 0);
|
|
|
|
if (auto_sz > 1)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "At most one dimension may be specified as -1 to "
|
|
|
|
"automatically calculate its size");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
auto newType = RankedTensorType::get(newShape, selfTy.getElementType());
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
|
|
|
|
op, getTypeConverter()->convertType(newType), self,
|
|
|
|
rewriter.getI64ArrayAttr(newShape));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
Value computeBatchNorm(Operation *op, ConversionPatternRewriter &rewriter,
|
|
|
|
Type outType, Value input, Value variance, Value eps,
|
|
|
|
Value mean, Value weight, Value bias) {
|
2022-01-28 06:38:59 +08:00
|
|
|
// For PyTorch:
|
|
|
|
// scale = gamma = weight
|
|
|
|
// offset = beta = bias
|
|
|
|
// Lowering:
|
|
|
|
// fused batchnorm = (input-mean) * scale * rsqrt(var+epsilon)) + offset
|
|
|
|
//
|
|
|
|
// shape_0 = ones(input.rank)
|
|
|
|
// shape_0[input.rank-1] = input.shape[input.rank-1]
|
|
|
|
// shape_1 = ones(1)
|
|
|
|
//
|
|
|
|
// bmean = reshape(mean, shape_0)
|
|
|
|
// bscale = reshape(scale, shape_0)
|
|
|
|
// boffset= reshape(offset, shape_0)
|
|
|
|
// beps = reshape(epsilon, shape_1)
|
|
|
|
//
|
|
|
|
// op1 = sub(input, bmean)
|
|
|
|
// op2 = add(var, beps)
|
|
|
|
// op3 = rsqrt(op2)
|
|
|
|
// bvar = reshape(op3, shape_0)
|
|
|
|
// op4 = mul(op1, bvar)
|
|
|
|
// op5 = mul(op4, bscale)
|
|
|
|
// op6 = add(op5, boffset)
|
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
auto op1SubInputMean =
|
|
|
|
rewriter.create<tosa::SubOp>(op->getLoc(), outType, input, mean);
|
|
|
|
|
|
|
|
auto op2AddVarEpsilon = rewriter.create<tosa::AddOp>(
|
|
|
|
op->getLoc(), variance.getType(), variance, eps);
|
|
|
|
|
|
|
|
auto op3RsqrtOp2 = rewriter.create<tosa::RsqrtOp>(
|
|
|
|
op->getLoc(), variance.getType(), op2AddVarEpsilon.getResult());
|
|
|
|
|
|
|
|
auto op4MulOp1Op3 = rewriter.create<tosa::MulOp>(op->getLoc(), outType,
|
|
|
|
op1SubInputMean.getResult(),
|
|
|
|
op3RsqrtOp2.getResult(), 0);
|
|
|
|
|
|
|
|
auto op5MulOp4Scale = rewriter.create<tosa::MulOp>(
|
|
|
|
op->getLoc(), outType, op4MulOp1Op3.getResult(), weight, 0);
|
|
|
|
|
|
|
|
return rewriter
|
|
|
|
.create<tosa::AddOp>(op->getLoc(), outType, op5MulOp4Scale.getResult(),
|
|
|
|
bias)
|
|
|
|
.getResult();
|
|
|
|
}
|
|
|
|
|
|
|
|
// This lowering is based on the TensorFlow to TOSA lowering.
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenBatchNormOp>::matchAndRewrite(
|
|
|
|
AtenBatchNormOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a ranked tensor output
|
|
|
|
if (!adaptor.input().getType().dyn_cast<RankedTensorType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types are supported");
|
2022-02-04 06:08:19 +08:00
|
|
|
|
|
|
|
auto outType = getTypeConverter()->convertType(op.getType());
|
|
|
|
|
|
|
|
// Note: cudnn_enabled is not handled.
|
|
|
|
|
|
|
|
// FIXME: Handle training and momentum.
|
|
|
|
if (op.momentum().getType().isa<Torch::NoneType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Unsupported None for momentum");
|
2022-02-04 06:08:19 +08:00
|
|
|
|
2022-01-28 06:38:59 +08:00
|
|
|
auto meanType = adaptor.running_mean().getType().dyn_cast<TensorType>();
|
|
|
|
auto varianceType = adaptor.running_var().getType().dyn_cast<TensorType>();
|
|
|
|
if (!varianceType || !meanType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types are supported");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
// Normalization ops perform elementwise ops of a single mean/stdev value
|
|
|
|
// against the feature map and because input is NCHW, the rank-1 value must be
|
|
|
|
// reshaped so it sits on the same dim as 'C'.
|
|
|
|
auto reshapeToNormInputDim = [&](Operation *op,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
TypeConverter *converter, Type outType,
|
|
|
|
const Value toBcast, Value &result) {
|
|
|
|
RankedTensorType toBcastType =
|
|
|
|
toBcast.getType().dyn_cast<RankedTensorType>();
|
|
|
|
if (toBcastType.getRank() > 1)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Rank cannot be more than 1");
|
2022-02-04 06:08:19 +08:00
|
|
|
|
|
|
|
RankedTensorType outTensorType = outType.cast<RankedTensorType>();
|
|
|
|
SmallVector<int64_t> newShape = {toBcastType.getShape()[0]};
|
|
|
|
for (auto i = 2; i < outTensorType.getRank(); ++i)
|
|
|
|
newShape.push_back(1);
|
|
|
|
auto newType =
|
|
|
|
RankedTensorType::get(newShape, outTensorType.getElementType());
|
|
|
|
|
|
|
|
result = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(), newType, toBcast, rewriter.getI64ArrayAttr(newShape));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
};
|
|
|
|
|
2022-01-28 06:38:59 +08:00
|
|
|
Value meanVal, varianceVal, weightVal, biasVal;
|
|
|
|
assert(meanType.getNumElements() != 0 && varianceType.getNumElements() != 0);
|
|
|
|
if (failed(reshapeToNormInputDim(op.getOperation(), rewriter,
|
|
|
|
getTypeConverter(), outType,
|
|
|
|
adaptor.running_mean(), meanVal)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Failed to reshape running mean");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
if (failed(reshapeToNormInputDim(op.getOperation(), rewriter,
|
|
|
|
getTypeConverter(), outType,
|
|
|
|
adaptor.running_var(), varianceVal)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Failed to reshape running variance");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
if (failed(reshapeToNormInputDim(op.getOperation(), rewriter,
|
|
|
|
getTypeConverter(), outType,
|
|
|
|
adaptor.weight(), weightVal)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Failed to reshape weight");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
if (failed(reshapeToNormInputDim(op.getOperation(), rewriter,
|
|
|
|
getTypeConverter(), outType, adaptor.bias(),
|
|
|
|
biasVal)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Failed to reshape bias");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
double eps;
|
|
|
|
if (!matchPattern(op.eps(), m_TorchConstantFloat(&eps)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "eps must be a scalar constant");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
auto epsilonConst =
|
|
|
|
mlir::tosa::getTosaConstTensorSingleF32(rewriter, op, eps);
|
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
auto batchNorm =
|
|
|
|
computeBatchNorm(op, rewriter, outType, adaptor.input(), varianceVal,
|
|
|
|
epsilonConst, meanVal, weightVal, biasVal);
|
2022-01-28 06:38:59 +08:00
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
rewriter.replaceOp(op, {batchNorm});
|
2022-01-28 06:38:59 +08:00
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
return success();
|
|
|
|
}
|
2022-01-28 06:38:59 +08:00
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
// This lowering is loosely based on Torch to LinAlg lowering.
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenNativeLayerNormOp>::matchAndRewrite(
|
|
|
|
AtenNativeLayerNormOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
2022-01-28 06:38:59 +08:00
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
// The key difference from BatchNorm is that a specified set of dims
|
|
|
|
// (normalized_shape) are chosen to compute the mean and variance from input.
|
|
|
|
// Where as in BatchNorm the mean and variance are operands. tosa::ReduceSumOp
|
|
|
|
// is used to sum up the these dims for mean and for variance. The results
|
|
|
|
// eventually being reshaped for broadcasting.
|
|
|
|
|
|
|
|
// Not a ranked tensor output
|
|
|
|
if (!adaptor.input().getType().dyn_cast<RankedTensorType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only ranked tensor types are supported");
|
2022-01-28 06:38:59 +08:00
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
auto inputType = adaptor.input().getType().cast<RankedTensorType>();
|
|
|
|
if (inputType.getRank() > 4)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only up to 4D tensors are supported");
|
2022-02-04 06:08:19 +08:00
|
|
|
|
|
|
|
auto outType = getTypeConverter()->convertType(op.getType(0));
|
|
|
|
|
|
|
|
// Note: cudnn_enabled is not handled.
|
|
|
|
|
|
|
|
// FIXME: Handle the None cases for the optional parameters.
|
|
|
|
if (adaptor.weight().getType().isa<Torch::NoneType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Unsupported None for weight");
|
2022-02-04 06:08:19 +08:00
|
|
|
if (adaptor.bias().getType().isa<Torch::NoneType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Unsupported None for bias");
|
2022-02-04 06:08:19 +08:00
|
|
|
|
|
|
|
auto weightType = adaptor.weight().getType().cast<RankedTensorType>();
|
|
|
|
auto biasType = adaptor.bias().getType().cast<RankedTensorType>();
|
|
|
|
int64_t inputRank = inputType.getRank();
|
|
|
|
Type elemTy = inputType.getElementType();
|
|
|
|
|
|
|
|
// Check if all the arguments meet the requirements.
|
|
|
|
SmallVector<int64_t> normalizedShapeSizesInt;
|
|
|
|
if (!matchPattern(op.normalized_shape(),
|
2022-11-17 04:33:12 +08:00
|
|
|
m_TorchListOfConstantInts(normalizedShapeSizesInt))) {
|
2022-02-04 06:08:19 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Unimplemented normalized_shape not"
|
|
|
|
"constructed from ListConstruct");
|
|
|
|
}
|
|
|
|
int64_t normalizedShapeRank = normalizedShapeSizesInt.size();
|
|
|
|
if (weightType.getRank() != normalizedShapeRank ||
|
|
|
|
biasType.getRank() != normalizedShapeRank ||
|
|
|
|
inputRank < normalizedShapeRank || normalizedShapeRank < 1)
|
|
|
|
return rewriter.notifyMatchFailure(op, "Input or weight or bias shape or"
|
|
|
|
"normalized shape not compatible");
|
|
|
|
|
|
|
|
// Check all the dimensions match the normalized_shape, only static shapes as
|
|
|
|
// of now
|
|
|
|
int64_t meanAndVarShapeRank = inputRank - normalizedShapeSizesInt.size();
|
|
|
|
for (auto en : llvm::enumerate((normalizedShapeSizesInt))) {
|
|
|
|
int64_t index = en.index();
|
|
|
|
int64_t value = en.value();
|
|
|
|
if (inputType.getShape()[index + meanAndVarShapeRank] != value ||
|
|
|
|
weightType.getShape()[index] != value ||
|
|
|
|
biasType.getShape()[index] != value)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"mismatching contracting dimension");
|
2022-02-04 06:08:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Helper for computing mean and variance.
|
|
|
|
auto computeSumAndReshape = [&](Value toReduce, RankedTensorType toReduceType,
|
|
|
|
Type outType, SmallVector<int64_t> outShape) {
|
|
|
|
Value sumDiv = toReduce;
|
|
|
|
SmallVector<int64_t> toReduceShape(toReduceType.getShape().begin(),
|
|
|
|
toReduceType.getShape().end());
|
2022-10-18 12:22:53 +08:00
|
|
|
for (int64_t i = toReduceShape.size() - 1; i >= meanAndVarShapeRank; i--) {
|
|
|
|
toReduceShape[i] = 1;
|
2022-02-04 06:08:19 +08:00
|
|
|
sumDiv = rewriter.create<tosa::ReduceSumOp>(
|
|
|
|
op.getLoc(),
|
|
|
|
RankedTensorType::get(toReduceShape, inputType.getElementType()),
|
2022-10-18 12:22:53 +08:00
|
|
|
sumDiv, rewriter.getI64IntegerAttr(i));
|
2022-02-04 06:08:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return rewriter.create<tosa::ReshapeOp>(op.getLoc(), outType, sumDiv,
|
|
|
|
rewriter.getI64ArrayAttr(outShape));
|
|
|
|
};
|
|
|
|
|
|
|
|
// TOSA has integer Div so, compute reciprocal of element count to be used in
|
|
|
|
// mul.
|
|
|
|
int64_t elemCnt = 1;
|
|
|
|
for (auto i : normalizedShapeSizesInt)
|
|
|
|
elemCnt *= i;
|
|
|
|
|
|
|
|
auto elemCntConst =
|
|
|
|
tosa::getConstTensor<float>(rewriter, op.getOperation(),
|
|
|
|
{static_cast<float>(elemCnt)}, {1})
|
2022-08-09 11:17:35 +08:00
|
|
|
.value();
|
2022-02-04 06:08:19 +08:00
|
|
|
Value elemCntRcp = rewriter.create<tosa::ReciprocalOp>(
|
|
|
|
op.getLoc(), elemCntConst.getType(), elemCntConst);
|
|
|
|
|
|
|
|
// Broadcast type and shape for various intermediate values.
|
|
|
|
SmallVector<int64_t> bcastOutShape;
|
|
|
|
for (auto en : llvm::enumerate(inputType.getShape())) {
|
|
|
|
bcastOutShape.push_back(
|
|
|
|
static_cast<int64_t>(en.index()) >= meanAndVarShapeRank ? 1
|
|
|
|
: en.value());
|
|
|
|
}
|
|
|
|
auto bcastOutType = RankedTensorType::get(bcastOutShape, elemTy);
|
|
|
|
|
|
|
|
// Compute mean.
|
|
|
|
Value sum = computeSumAndReshape(adaptor.input(), inputType, bcastOutType,
|
|
|
|
bcastOutShape);
|
|
|
|
Value meanVal = rewriter.create<tosa::MulOp>(op.getLoc(), bcastOutType, sum,
|
|
|
|
elemCntRcp, /*shift=*/0);
|
|
|
|
|
|
|
|
// Compute variance.
|
|
|
|
Value squareSumSub = rewriter.create<tosa::SubOp>(op.getLoc(), inputType,
|
|
|
|
adaptor.input(), meanVal);
|
|
|
|
Value squareSum = rewriter.create<tosa::MulOp>(op.getLoc(), inputType,
|
|
|
|
squareSumSub, squareSumSub, 0);
|
|
|
|
|
|
|
|
Value squareSumReduced =
|
|
|
|
computeSumAndReshape(squareSum, inputType, bcastOutType, bcastOutShape);
|
|
|
|
Value varianceVal = rewriter.create<tosa::MulOp>(
|
|
|
|
op.getLoc(), bcastOutType, squareSumReduced, elemCntRcp, /*shift=*/0);
|
|
|
|
|
|
|
|
// Reshape weight and bias.
|
|
|
|
SmallVector<int64_t> weightAndBiasBcastShape;
|
|
|
|
for (auto en : llvm::enumerate(inputType.getShape())) {
|
|
|
|
weightAndBiasBcastShape.push_back(
|
|
|
|
static_cast<int64_t>(en.index()) < meanAndVarShapeRank ? 1
|
|
|
|
: en.value());
|
|
|
|
}
|
|
|
|
auto weightAndMeanBcastType =
|
|
|
|
RankedTensorType::get(weightAndBiasBcastShape, elemTy);
|
2022-01-28 06:38:59 +08:00
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
Value weightVal = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op.getLoc(), weightAndMeanBcastType, adaptor.weight(),
|
|
|
|
rewriter.getI64ArrayAttr(weightAndBiasBcastShape));
|
|
|
|
|
|
|
|
Value biasVal = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op.getLoc(), weightAndMeanBcastType, adaptor.bias(),
|
|
|
|
rewriter.getI64ArrayAttr(weightAndBiasBcastShape));
|
|
|
|
|
|
|
|
double eps;
|
|
|
|
if (!matchPattern(op.eps(), m_TorchConstantFloat(&eps)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "eps must be a scalar constant");
|
2022-02-04 06:08:19 +08:00
|
|
|
auto epsilonConst =
|
|
|
|
mlir::tosa::getTosaConstTensorSingleF32(rewriter, op, eps);
|
|
|
|
|
|
|
|
// Compute layer norm.
|
|
|
|
auto layerNorm =
|
|
|
|
computeBatchNorm(op, rewriter, outType, adaptor.input(), varianceVal,
|
|
|
|
epsilonConst, meanVal, weightVal, biasVal);
|
|
|
|
|
|
|
|
rewriter.replaceOp(op, {layerNorm, meanVal, varianceVal});
|
2022-01-28 06:38:59 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-01-27 11:16:13 +08:00
|
|
|
// Torch constants are converted to tosa.const .
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<ValueTensorLiteralOp>::matchAndRewrite(
|
|
|
|
ValueTensorLiteralOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
2022-09-27 07:54:27 +08:00
|
|
|
|
2022-01-27 11:16:13 +08:00
|
|
|
auto outputTy = getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template cast<RankedTensorType>();
|
|
|
|
|
2022-09-27 07:54:27 +08:00
|
|
|
// Tensors with integer types need to be converted to signless integer
|
|
|
|
// element type. All tensors with element types other than integer can reuse
|
|
|
|
// existing elements attribute.
|
|
|
|
// TODO: what about unsigned integer?
|
|
|
|
if (auto elements = op.valueAttr().dyn_cast<DenseIntElementsAttr>()) {
|
2022-09-29 15:47:22 +08:00
|
|
|
if (elements.getElementType().isSignedInteger()) {
|
|
|
|
Type builtinTensorElemTy = outputTy.getElementType();
|
|
|
|
unsigned bitWidth = builtinTensorElemTy.getIntOrFloatBitWidth();
|
2022-09-27 07:54:27 +08:00
|
|
|
DenseElementsAttr valueAttr =
|
|
|
|
elements.mapValues(builtinTensorElemTy, [&](const APInt &v) {
|
|
|
|
return APInt(bitWidth, v.getSExtValue());
|
|
|
|
});
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, outputTy, valueAttr);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, outputTy, adaptor.value());
|
2022-01-27 11:16:13 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-01-29 13:38:56 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenFlattenUsingIntsOp>::matchAndRewrite(
|
|
|
|
AtenFlattenUsingIntsOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a ranked tensor type
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<RankedTensorType>();
|
|
|
|
if (!selfType || !selfType.hasStaticShape())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op,
|
2022-01-29 13:38:56 +08:00
|
|
|
"Only ranked tensor types with static shapes are currently supported");
|
|
|
|
|
|
|
|
int64_t selfRank = selfType.getRank();
|
|
|
|
|
|
|
|
int64_t start_dim, end_dim;
|
|
|
|
|
|
|
|
if (!matchPattern(op.start_dim(), m_TorchConstantInt(&start_dim)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"start_dim must be a Scalar constant");
|
2022-01-29 13:38:56 +08:00
|
|
|
start_dim = toPositiveDim(start_dim, selfRank);
|
|
|
|
|
|
|
|
if (!matchPattern(op.end_dim(), m_TorchConstantInt(&end_dim)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "end_dim must be a Scalar constant");
|
2022-01-29 13:38:56 +08:00
|
|
|
end_dim = toPositiveDim(end_dim, selfRank);
|
|
|
|
|
|
|
|
if (selfRank > 0 && !isValidDim(start_dim, selfRank))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "start_dim is statically invalid");
|
2022-01-29 13:38:56 +08:00
|
|
|
if (selfRank > 0 && !isValidDim(end_dim, selfRank))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "end_dim is statically invalid");
|
2022-01-29 13:38:56 +08:00
|
|
|
if (end_dim < start_dim)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"end_dim must be larger than start_dim");
|
2022-01-29 13:38:56 +08:00
|
|
|
|
|
|
|
SmallVector<int64_t> newShape;
|
|
|
|
for (auto s : llvm::enumerate(selfType.getShape())) {
|
|
|
|
int64_t idx = s.index();
|
|
|
|
if (idx < start_dim || idx > end_dim) {
|
|
|
|
newShape.push_back(s.value());
|
|
|
|
} else {
|
|
|
|
if (idx == start_dim)
|
|
|
|
newShape.push_back(s.value());
|
|
|
|
else
|
|
|
|
newShape.back() *= s.value();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the Scalar case
|
|
|
|
if (newShape.size() == 0)
|
|
|
|
newShape.push_back(1);
|
|
|
|
|
|
|
|
auto newType = RankedTensorType::get(newShape, selfType.getElementType());
|
2022-05-19 20:35:59 +08:00
|
|
|
auto reshapeOp = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op.getLoc(), newType, adaptor.self(), rewriter.getI64ArrayAttr(newShape));
|
2022-01-29 13:38:56 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), reshapeOp);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-04 06:08:19 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenPermuteOp>::matchAndRewrite(
|
|
|
|
AtenPermuteOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a ranked tensor type
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<RankedTensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op,
|
2022-02-04 06:08:19 +08:00
|
|
|
"Only ranked tensor types with static shapes are currently supported");
|
|
|
|
|
|
|
|
SmallVector<int64_t> dimListInt;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(adaptor.dims(), m_TorchListOfConstantInts(dimListInt)))
|
2022-02-04 06:08:19 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only constant dimensions are currently supported");
|
|
|
|
|
|
|
|
int64_t selfRank = selfType.getRank();
|
2022-07-18 09:39:54 +08:00
|
|
|
// TODO: If this is already verified on the op then we can drop checking here.
|
2022-02-04 06:08:19 +08:00
|
|
|
for (auto &d : dimListInt) {
|
|
|
|
d = toPositiveDim(d, selfRank);
|
|
|
|
if (!isValidDim(d, selfRank))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Not all dims are valid");
|
2022-02-04 06:08:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
auto transposeDimsConst = mlir::tosa::getConstTensor<int64_t>(
|
|
|
|
rewriter, op.getOperation(), dimListInt, {selfRank});
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::TransposeOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.self(),
|
2022-08-09 11:17:35 +08:00
|
|
|
transposeDimsConst.value());
|
2022-02-04 06:08:19 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-12 04:30:02 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenLog2Op>::matchAndRewrite(
|
|
|
|
AtenLog2Op op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
// Constant value of ln2.
|
|
|
|
SmallVector<int64_t> ln2Shape(selfType.getRank(), 1);
|
|
|
|
auto ln2Op =
|
|
|
|
tosa::getConstTensor<float>(rewriter, op, {0.69314718056}, ln2Shape)
|
2022-08-09 11:17:35 +08:00
|
|
|
.value();
|
2022-02-12 04:30:02 +08:00
|
|
|
auto rcpOp =
|
|
|
|
rewriter.create<tosa::ReciprocalOp>(op.getLoc(), ln2Op.getType(), ln2Op);
|
|
|
|
|
|
|
|
auto outType = getTypeConverter()->convertType(op.getType());
|
|
|
|
auto logOp =
|
|
|
|
rewriter.create<tosa::LogOp>(op.getLoc(), outType, adaptor.self());
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::MulOp>(op, outType, logOp, rcpOp,
|
|
|
|
/*shift=*/0);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenThresholdOp>::matchAndRewrite(
|
|
|
|
AtenThresholdOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
auto selfElemTy = selfType.getElementType();
|
|
|
|
if (!selfElemTy.isIntOrFloat())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
// Integer types with width > 32 are not supported
|
|
|
|
auto selfIntType = selfElemTy.dyn_cast<IntegerType>();
|
|
|
|
if (selfIntType && selfIntType.getWidth() > 32) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Integer types with width greater than 32 are not supported");
|
2022-02-12 04:30:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> constTypeShape(selfType.getRank(), 1);
|
|
|
|
Value threshold, value;
|
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, op.threshold(), threshold,
|
|
|
|
selfElemTy, constTypeShape)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only scalar constant is supported for threshold");
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, op.value(), value,
|
|
|
|
selfElemTy, constTypeShape)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only scalar constant is supported for value");
|
2022-02-12 04:30:02 +08:00
|
|
|
|
|
|
|
// Threshold only clamps the upper values. tosa::ClampOp has the same
|
|
|
|
// value for both threshold and clamped value so cannot be used.
|
|
|
|
auto outType = getTypeConverter()->convertType(op.getType());
|
|
|
|
|
|
|
|
auto cmpOp = rewriter.create<tosa::GreaterOp>(
|
|
|
|
op.getLoc(),
|
|
|
|
RankedTensorType::get(selfType.getShape(), rewriter.getIntegerType(1)),
|
|
|
|
adaptor.self(), threshold);
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::SelectOp>(op, outType, cmpOp,
|
|
|
|
adaptor.self(), value);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-03-26 05:15:07 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenUnsqueezeOp>::matchAndRewrite(
|
|
|
|
AtenUnsqueezeOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-03-26 05:15:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
auto selfRank = selfType.getRank();
|
|
|
|
auto selfElemTy = selfType.getElementType();
|
|
|
|
if (!selfElemTy.isIntOrFloat()) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-03-26 05:15:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int64_t dim;
|
|
|
|
if (!matchPattern(op.dim(), m_TorchConstantInt(&dim)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim must be a Scalar constant");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
dim = toPositiveDim(dim, selfRank);
|
2022-11-15 01:09:15 +08:00
|
|
|
if (!isValidDim(dim, selfRank + 1))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim is statically invalid");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
SmallVector<int64_t> outShape;
|
|
|
|
for (auto en : llvm::enumerate(selfType.getShape())) {
|
|
|
|
if (static_cast<int64_t>(en.index()) == dim)
|
|
|
|
outShape.push_back(1);
|
|
|
|
|
|
|
|
outShape.push_back(en.value());
|
|
|
|
}
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.self(),
|
|
|
|
rewriter.getI64ArrayAttr(outShape));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenContiguousOp>::matchAndRewrite(
|
|
|
|
AtenContiguousOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
// FIXME: memory_format is not handled.
|
|
|
|
|
|
|
|
rewriter.replaceOp(op, adaptor.self());
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenDropoutOp>::matchAndRewrite(
|
|
|
|
AtenDropoutOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.input().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
// FIXME: train and p are not handled.
|
|
|
|
|
|
|
|
bool train;
|
|
|
|
if (!matchPattern(op.train(), m_TorchConstantBool(&train)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "train must be a Scalar constant");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
if (train)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "train must be false");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::CastOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.input());
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenViewOp>::matchAndRewrite(
|
|
|
|
AtenViewOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
auto selfElemTy = selfType.getElementType();
|
|
|
|
if (!selfElemTy.isIntOrFloat()) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-03-26 05:15:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> outShape;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.size(), m_TorchListOfConstantInts(outShape)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"size must consist of Scalar constants");
|
2022-03-26 05:15:07 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.self(),
|
|
|
|
rewriter.getI64ArrayAttr(outShape));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-03-31 08:00:55 +08:00
|
|
|
static Value approximateErfOp(ConversionPatternRewriter &rewriter,
|
|
|
|
Operation *op, Value x) {
|
|
|
|
// Using:
|
|
|
|
// https://en.wikipedia.org/wiki/Error_function#Numerical_approximations with
|
|
|
|
// maximum error as 5 x 10^-4 where a1 = 0.278393, a2 = 0.230389, a3 =
|
|
|
|
// 0.000972, a4 = 0.078108.
|
|
|
|
//
|
|
|
|
// Erf = 1 - 1 / (1 + a1X + a2X + a3X + a4X)^4
|
|
|
|
|
|
|
|
auto outType = x.getType().cast<TensorType>();
|
|
|
|
auto loc = op->getLoc();
|
|
|
|
auto absX = rewriter.create<tosa::AbsOp>(loc, outType, x);
|
2022-08-09 11:17:35 +08:00
|
|
|
auto zero = tosa::getConstTensor<float>(rewriter, op, 0, {}).value();
|
|
|
|
auto one = tosa::getConstTensor<float>(rewriter, op, 1, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
|
2022-08-09 11:17:35 +08:00
|
|
|
auto a1 = tosa::getConstTensor<float>(rewriter, op, 0.278393, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
auto a1X = rewriter.create<tosa::MulOp>(loc, outType, a1, absX, /*shift=*/0);
|
|
|
|
auto sum = rewriter.create<tosa::AddOp>(loc, outType, a1X, one);
|
|
|
|
|
2022-08-09 11:17:35 +08:00
|
|
|
auto a2 = tosa::getConstTensor<float>(rewriter, op, 0.230389, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
auto x2 = rewriter.create<tosa::MulOp>(loc, outType, absX, absX, /*shift=*/0);
|
|
|
|
auto a2X = rewriter.create<tosa::MulOp>(loc, outType, a2, x2, /*shift=*/0);
|
|
|
|
sum = rewriter.create<tosa::AddOp>(loc, outType, sum, a2X);
|
|
|
|
|
2022-08-09 11:17:35 +08:00
|
|
|
auto a3 = tosa::getConstTensor<float>(rewriter, op, 0.000972, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
auto x3 = rewriter.create<tosa::MulOp>(loc, outType, x2, absX, /*shift=*/0);
|
|
|
|
auto a3X = rewriter.create<tosa::MulOp>(loc, outType, a3, x3, /*shift=*/0);
|
|
|
|
sum = rewriter.create<tosa::AddOp>(loc, outType, sum, a3X);
|
|
|
|
|
2022-08-09 11:17:35 +08:00
|
|
|
auto a4 = tosa::getConstTensor<float>(rewriter, op, 0.078108, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
auto x4 = rewriter.create<tosa::MulOp>(loc, outType, x3, absX, /*shift=*/0);
|
|
|
|
auto a4X = rewriter.create<tosa::MulOp>(loc, outType, a4, x4, /*shift=*/0);
|
|
|
|
sum = rewriter.create<tosa::AddOp>(loc, outType, sum, a4X);
|
|
|
|
|
|
|
|
auto rcprl = rewriter.create<tosa::ReciprocalOp>(loc, outType, sum);
|
|
|
|
auto rcprl2 =
|
|
|
|
rewriter.create<tosa::MulOp>(loc, outType, rcprl, rcprl, /*shift=*/0);
|
|
|
|
auto rcprl4 =
|
|
|
|
rewriter.create<tosa::MulOp>(loc, outType, rcprl2, rcprl2, /*shift=*/0);
|
|
|
|
auto erf = rewriter.create<tosa::SubOp>(loc, outType, one, rcprl4);
|
|
|
|
|
|
|
|
// Deal with negative x.
|
|
|
|
auto cond = rewriter.create<tosa::GreaterEqualOp>(
|
|
|
|
loc,
|
|
|
|
RankedTensorType::get(outType.getShape(), rewriter.getIntegerType(1)), x,
|
|
|
|
zero);
|
|
|
|
auto negateErf = rewriter.create<tosa::NegateOp>(loc, outType, erf);
|
|
|
|
|
|
|
|
return rewriter.create<tosa::SelectOp>(loc, outType, cond, erf, negateErf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value buildUnitNormalCdf(ConversionPatternRewriter &rewriter,
|
|
|
|
Operation *op, Value x) {
|
2022-08-09 11:17:35 +08:00
|
|
|
auto zero = tosa::getConstTensor<float>(rewriter, op, 0, {}).value();
|
|
|
|
auto one = tosa::getConstTensor<float>(rewriter, op, 1, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
auto loc = op->getLoc();
|
|
|
|
|
|
|
|
// buildNormalCdf, mean = zero, sigma = one
|
|
|
|
auto outType = x.getType();
|
|
|
|
auto mean = zero;
|
|
|
|
Value xMinusMean = rewriter.create<tosa::SubOp>(loc, outType, x, mean);
|
|
|
|
// rsqrt of 2
|
2022-05-19 20:35:59 +08:00
|
|
|
Value rsqrt2 =
|
2022-08-09 11:17:35 +08:00
|
|
|
tosa::getConstTensor<float>(rewriter, op, 0.70710678, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
Value erfArg = rewriter.create<tosa::MulOp>(loc, outType, xMinusMean, rsqrt2,
|
|
|
|
/*shift=*/0);
|
|
|
|
Value erf = approximateErfOp(rewriter, op, erfArg);
|
|
|
|
Value erfPlus1 = rewriter.create<tosa::AddOp>(loc, outType, one, erf);
|
2022-08-09 11:17:35 +08:00
|
|
|
Value oneHalf = tosa::getConstTensor<float>(rewriter, op, 0.5, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
Value normalCdf = rewriter.create<tosa::MulOp>(loc, outType, oneHalf,
|
|
|
|
erfPlus1, /*shift=*/0);
|
|
|
|
return normalCdf;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This lowering is based on Torch to LinAlg lowering.
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenGeluOp>::matchAndRewrite(
|
|
|
|
AtenGeluOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-03-31 08:00:55 +08:00
|
|
|
|
|
|
|
auto selfElemTy = selfType.getElementType();
|
|
|
|
if (!selfElemTy.isa<mlir::FloatType>()) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization supported");
|
2022-03-31 08:00:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Handle approximate.
|
|
|
|
std::string approximate;
|
|
|
|
if (!matchPattern(op.approximate(), m_TorchConstantStr(approximate)) ||
|
|
|
|
approximate != "none") {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Unsupported value of approximate");
|
2022-03-31 08:00:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Value cdf = buildUnitNormalCdf(rewriter, op, adaptor.self());
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::MulOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.self(), cdf,
|
|
|
|
/*shift=*/0);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// This lowering is based on Torch to LinAlg lowering.
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenGeluBackwardOp>::matchAndRewrite(
|
|
|
|
AtenGeluBackwardOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types are currently supported");
|
2022-03-31 08:00:55 +08:00
|
|
|
|
|
|
|
auto selfElemTy = selfType.getElementType();
|
|
|
|
if (!selfElemTy.isa<mlir::FloatType>()) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point datatype legalization supported");
|
2022-03-31 08:00:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Handle approximate.
|
|
|
|
std::string approximate;
|
|
|
|
if (!matchPattern(op.approximate(), m_TorchConstantStr(approximate)) ||
|
|
|
|
approximate != "none") {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Unsupported value of approximate");
|
2022-03-31 08:00:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
auto loc = op->getLoc();
|
|
|
|
|
|
|
|
const double cstAlpha0 = 1.12837916709551257390;
|
|
|
|
const double cstAlpha1 = 0.70710678118654752440;
|
|
|
|
const double oneHalf = 0.5;
|
|
|
|
const double kAlpha = cstAlpha0 * cstAlpha1;
|
|
|
|
|
|
|
|
Value kAlphaHalf =
|
2022-08-09 11:17:35 +08:00
|
|
|
tosa::getConstTensor<float>(rewriter, op, kAlpha * oneHalf, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
Value negOneHalf =
|
2022-08-09 11:17:35 +08:00
|
|
|
tosa::getConstTensor<float>(rewriter, op, -0.5, {}).value();
|
2022-03-31 08:00:55 +08:00
|
|
|
Value inputSquared = rewriter.create<tosa::MulOp>(
|
|
|
|
loc, selfType, adaptor.self(), adaptor.self(), /*shift=*/0);
|
|
|
|
Value negHalfInputSquared = rewriter.create<tosa::MulOp>(
|
|
|
|
loc, selfType, inputSquared, negOneHalf, /*shift=*/0);
|
|
|
|
Value dinput =
|
|
|
|
rewriter.create<tosa::ExpOp>(loc, selfType, negHalfInputSquared);
|
|
|
|
Value cdf = buildUnitNormalCdf(rewriter, op, adaptor.self());
|
|
|
|
Value dinputInput = rewriter.create<tosa::MulOp>(loc, selfType, dinput,
|
|
|
|
adaptor.self(), /*shift=*/0);
|
|
|
|
Value dinputInputAlpha = rewriter.create<tosa::MulOp>(
|
|
|
|
loc, selfType, dinputInput, kAlphaHalf, /*shift=*/0);
|
|
|
|
Value cdfExt =
|
|
|
|
rewriter.create<tosa::AddOp>(loc, selfType, dinputInputAlpha, cdf);
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::MulOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.grad_output(),
|
|
|
|
cdfExt,
|
|
|
|
/*shift=*/0);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-07-01 04:13:52 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenEmbeddingOp>::matchAndRewrite(
|
|
|
|
AtenEmbeddingOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
Value weight = adaptor.weight();
|
|
|
|
Value indices = adaptor.indices();
|
|
|
|
RankedTensorType outType =
|
|
|
|
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
|
|
|
|
|
|
|
|
auto indicesType = indices.getType().dyn_cast<RankedTensorType>();
|
|
|
|
if (!indicesType || !indicesType.getElementType().isa<IntegerType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Indices must be of integer tensor type");
|
2022-07-01 04:13:52 +08:00
|
|
|
|
|
|
|
if (indicesType.getRank() != 2)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "indices must be of rank 2");
|
2022-07-01 04:13:52 +08:00
|
|
|
|
|
|
|
auto weightType = weight.getType().cast<RankedTensorType>();
|
|
|
|
if (weightType.getRank() != 2)
|
|
|
|
return op.emitError("weight must be of rank 2");
|
|
|
|
|
|
|
|
// FIXME: padding_idx, scale_grad_by_freq and sparse are not handled yet.
|
|
|
|
int64_t paddingIdx;
|
|
|
|
if (!matchPattern(op.padding_idx(), m_TorchConstantInt(&paddingIdx)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only supports constant int padding_idx for embedding op");
|
|
|
|
|
|
|
|
bool scaleGradByFreq;
|
|
|
|
if (!matchPattern(op.scale_grad_by_freq(),
|
|
|
|
m_TorchConstantBool(&scaleGradByFreq)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only supports constant bool scale_grad_by_freq for embedding op");
|
|
|
|
if (scaleGradByFreq)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op,
|
|
|
|
"only supports scale_grad_by_freq equals to False for embedding op");
|
|
|
|
|
|
|
|
bool isSparse;
|
|
|
|
if (!matchPattern(op.sparse(), m_TorchConstantBool(&isSparse)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only supports constant bool sparse for embedding op");
|
|
|
|
if (isSparse)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only support sparse equals to False for embedding op");
|
|
|
|
|
|
|
|
// For inference:
|
|
|
|
// Weights [num_embeddings, embedding_dim], Indices [X, Y]
|
|
|
|
// Output [X, Y, embedding_dim] = Weights[Indices[x, y]] forall x in X, y
|
|
|
|
// in Y
|
|
|
|
//
|
|
|
|
// Condition: num_embeddings > Indices [x, y] forall x in X, y in Y
|
|
|
|
|
|
|
|
// Reshape the weight, since tosa.gather expects a 3D tensor
|
|
|
|
auto indicesShape = indicesType.getShape();
|
|
|
|
auto weightShape = weightType.getShape();
|
|
|
|
|
|
|
|
SmallVector<int64_t> newWeightShape = {1};
|
|
|
|
for (auto s : weightShape)
|
|
|
|
newWeightShape.push_back(s);
|
|
|
|
|
|
|
|
auto reshapedWeight = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
RankedTensorType::get(newWeightShape, weightType.getElementType()),
|
|
|
|
weight, rewriter.getI64ArrayAttr(newWeightShape));
|
|
|
|
|
|
|
|
int64_t numIndices = 1;
|
|
|
|
if (indicesType.hasStaticShape()) {
|
|
|
|
for (auto s : indicesShape)
|
|
|
|
numIndices *= s;
|
|
|
|
} else {
|
|
|
|
numIndices = ShapedType::kDynamicSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> newIndicesShape = {1, numIndices};
|
|
|
|
auto reshapedIndices = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
RankedTensorType::get(newIndicesShape, indicesType.getElementType()),
|
|
|
|
indices, rewriter.getI64ArrayAttr(newIndicesShape));
|
|
|
|
|
|
|
|
auto castIndices = rewriter.create<tosa::CastOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
RankedTensorType::get(newIndicesShape, rewriter.getIntegerType(32)),
|
|
|
|
reshapedIndices);
|
|
|
|
|
|
|
|
SmallVector<int64_t> intermediateOutShape = {1, numIndices, weightShape[1]};
|
|
|
|
auto gatherOp = rewriter.create<tosa::GatherOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
RankedTensorType::get(intermediateOutShape, weightType.getElementType()),
|
|
|
|
reshapedWeight, castIndices);
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
|
|
|
|
op, outType, gatherOp, rewriter.getI64ArrayAttr(outType.getShape()));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-07-08 04:05:33 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenTransposeIntOp>::matchAndRewrite(
|
|
|
|
AtenTransposeIntOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Only tensor types are supported");
|
2022-07-08 04:05:33 +08:00
|
|
|
|
|
|
|
// Only statically resolvable values are currently supported
|
|
|
|
int64_t dim0, dim1;
|
|
|
|
if (!matchPattern(op.dim0(), m_TorchConstantInt(&dim0)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim0 must be a Scalar constant");
|
2022-07-08 04:05:33 +08:00
|
|
|
|
|
|
|
if (!matchPattern(op.dim1(), m_TorchConstantInt(&dim1)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim1 must be a Scalar constant");
|
2022-07-08 04:05:33 +08:00
|
|
|
|
|
|
|
dim0 = toPositiveDim(dim0, selfType.getRank());
|
|
|
|
dim1 = toPositiveDim(dim1, selfType.getRank());
|
|
|
|
|
|
|
|
auto selfRank = selfType.getRank();
|
|
|
|
if (!isValidDim(dim0, selfRank) || !isValidDim(dim1, selfRank))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "dim0 and dim1 must be less than tensor rank");
|
2022-07-08 04:05:33 +08:00
|
|
|
|
|
|
|
SmallVector<int32_t> transposeDims;
|
|
|
|
for (auto i = 0; i < selfType.getRank(); ++i)
|
|
|
|
transposeDims.push_back(i);
|
|
|
|
|
|
|
|
transposeDims[dim0] = dim1;
|
|
|
|
transposeDims[dim1] = dim0;
|
|
|
|
|
|
|
|
auto transposeDimsConst = mlir::tosa::getConstTensor<int32_t>(
|
|
|
|
rewriter, op.getOperation(), transposeDims, {selfType.getRank()});
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::TransposeOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.self(),
|
2022-08-09 11:17:35 +08:00
|
|
|
transposeDimsConst.value());
|
2022-07-08 04:05:33 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-07-14 01:10:18 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenMaxDimOp>::matchAndRewrite(
|
|
|
|
AtenMaxDimOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Only tensor types are supported");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
auto indicesType =
|
|
|
|
getTypeConverter()->convertType(op.getType(1)).dyn_cast<TensorType>();
|
|
|
|
if (!indicesType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Only tensor types are supported");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
auto selfElemType = selfType.getElementType();
|
|
|
|
auto indicesElemType = indicesType.getElementType();
|
|
|
|
|
|
|
|
// Only statically deducible values are currently supported
|
|
|
|
int64_t dim;
|
|
|
|
if (!matchPattern(op.dim(), m_TorchConstantInt(&dim)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim must be a Scalar constant");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
dim = toPositiveDim(dim, selfType.getRank());
|
|
|
|
|
|
|
|
if (!isValidDim(dim, selfType.getRank()))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim must be less than tensor rank");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
bool keepDim;
|
|
|
|
if (!matchPattern(op.keepdim(), m_TorchConstantBool(&keepDim)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "keepdim must be a Scalar constant");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
SmallVector<int64_t> reducedShape, prunedShape;
|
|
|
|
for (auto en : llvm::enumerate(selfType.getShape())) {
|
|
|
|
if (static_cast<int64_t>(en.index()) == dim) {
|
|
|
|
reducedShape.push_back(1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
reducedShape.push_back(en.value());
|
|
|
|
prunedShape.push_back(en.value());
|
|
|
|
}
|
|
|
|
|
|
|
|
auto dimAttr = rewriter.getIntegerAttr(rewriter.getI64Type(), dim);
|
|
|
|
auto prunedShapeAttr = rewriter.getI64ArrayAttr(prunedShape);
|
|
|
|
|
|
|
|
Value reduceMax = rewriter.create<tosa::ReduceMaxOp>(
|
|
|
|
op->getLoc(), RankedTensorType::get(reducedShape, selfElemType),
|
|
|
|
adaptor.self(), dimAttr);
|
|
|
|
|
|
|
|
Value argMax = rewriter.create<tosa::ArgMaxOp>(
|
|
|
|
op->getLoc(), RankedTensorType::get(prunedShape, indicesElemType),
|
|
|
|
adaptor.self(), dimAttr);
|
|
|
|
|
|
|
|
if (argMax.getType() != indicesType) {
|
2022-09-17 12:29:56 +08:00
|
|
|
argMax = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(), indicesType, argMax,
|
|
|
|
rewriter.getI64ArrayAttr(reducedShape));
|
2022-07-14 01:10:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!keepDim) {
|
|
|
|
reduceMax = rewriter.create<tosa::ReshapeOp>(
|
|
|
|
op->getLoc(), RankedTensorType::get(prunedShape, selfElemType),
|
|
|
|
reduceMax, prunedShapeAttr);
|
|
|
|
}
|
|
|
|
|
|
|
|
rewriter.replaceOp(op, {reduceMax, argMax});
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenSliceTensorOp>::matchAndRewrite(
|
|
|
|
AtenSliceTensorOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType || !selfType.hasStaticShape())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types with static shape are supported");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
// Only statically deducible values are currently supported
|
|
|
|
int64_t dim;
|
|
|
|
if (!matchPattern(op.dim(), m_TorchConstantInt(&dim)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim must be a Scalar constant");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
dim = toPositiveDim(dim, selfType.getRank());
|
|
|
|
|
|
|
|
if (!isValidDim(dim, selfType.getRank()))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "dim must less than tensor rank");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
int64_t start;
|
|
|
|
if (!matchPattern(op.start(), m_TorchConstantInt(&start)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "start must be a Scalar constant");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
if (start < 0)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Currently unsupported: start < 0");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
int64_t end;
|
|
|
|
if (!matchPattern(op.end(), m_TorchConstantInt(&end)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "end must be a Scalar constant");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
// FIXME: add support for start/end < 0 and end < start
|
|
|
|
if (end < start)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Currently unsupported: end < start");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
int64_t step;
|
|
|
|
if (!matchPattern(op.step(), m_TorchConstantInt(&step)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "step must be a Scalar constant");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
if (step != 1)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "step value other than 1 is currently unsupported");
|
2022-07-14 01:10:18 +08:00
|
|
|
|
|
|
|
SmallVector<int64_t> startSlice(selfType.getRank(), 0);
|
|
|
|
SmallVector<int64_t> sizeSlice = llvm::to_vector(selfType.getShape());
|
|
|
|
|
|
|
|
startSlice[dim] = start;
|
|
|
|
sizeSlice[dim] = end - start;
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::SliceOp>(
|
|
|
|
op, getTypeConverter()->convertType(op.getType()), adaptor.self(),
|
|
|
|
rewriter.getI64ArrayAttr(startSlice),
|
|
|
|
rewriter.getI64ArrayAttr(sizeSlice));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-09-21 01:04:51 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenBroadcastToOp>::matchAndRewrite(
|
|
|
|
AtenBroadcastToOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType || !selfType.hasStaticShape())
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types with static shape are supported");
|
|
|
|
|
|
|
|
auto selfElemTy = selfType.getElementType();
|
|
|
|
if (!selfElemTy.isIntOrFloat()) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> outShape;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.size(), m_TorchListOfConstantInts(outShape)))
|
2022-09-21 01:04:51 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"size must consist of Scalar constants");
|
|
|
|
|
|
|
|
SmallVector<int64_t> inputShape(selfType.getShape());
|
2022-09-30 00:40:56 +08:00
|
|
|
if (inputShape.size() == outShape.size() || inputShape.size() == 0) {
|
|
|
|
// Check for identity case i.e, for ex: [a, b, c] -> [a, b, c]. If this is
|
|
|
|
// true then we can replace the op result with the input operand
|
|
|
|
// irrespective of the users of the op result.
|
|
|
|
if (!llvm::equal(inputShape, outShape)) {
|
|
|
|
for (auto user : op->getResult(0).getUsers()) {
|
|
|
|
// This case is only supported if the result of the `broadcast_to` op is
|
|
|
|
// not used by an op which is a view like.
|
|
|
|
if (isViewLikeOp(user)) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: broadcast not supported for this case");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If we reach here, then it means the given case is handled by implicit
|
|
|
|
// broadcasting done by tosa.
|
|
|
|
op.replaceAllUsesWith(op.self());
|
|
|
|
rewriter.eraseOp(op);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op,
|
|
|
|
"unimplemented: broadcasts other than same rank or zero ranked tensor.");
|
2022-09-21 01:04:51 +08:00
|
|
|
}
|
|
|
|
|
2022-10-19 00:39:39 +08:00
|
|
|
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenWhereSelfOp>::matchAndRewrite(
|
|
|
|
AtenWhereSelfOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types input are currently supported");
|
|
|
|
auto condType = adaptor.condition().getType().dyn_cast<TensorType>();
|
|
|
|
if (!condType)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types condition are currently supported");
|
|
|
|
|
|
|
|
auto outType = getTypeConverter()->convertType(op.getType());
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::SelectOp>(op, outType, adaptor.condition(),
|
|
|
|
adaptor.self(), adaptor.other());
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-11-19 05:32:13 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenClampOp>::matchAndRewrite(
|
|
|
|
AtenClampOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only tensor types input are currently supported");
|
|
|
|
|
|
|
|
int64_t int_min, int_max;
|
|
|
|
if (!matchPattern(op.min(), m_TorchConstantInt(&int_min)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: value `int_min` should be a torch constant int");
|
|
|
|
|
|
|
|
if (!matchPattern(op.max(), m_TorchConstantInt(&int_max)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: value `int_max` should be a torch constant int");
|
|
|
|
|
|
|
|
IntegerAttr min_int = rewriter.getI64IntegerAttr(int_min);
|
|
|
|
IntegerAttr max_int = rewriter.getI64IntegerAttr(int_max);
|
|
|
|
FloatAttr min_fp = rewriter.getF32FloatAttr(float(int_min));
|
|
|
|
FloatAttr max_fp = rewriter.getF32FloatAttr(float(int_max));
|
|
|
|
|
|
|
|
auto outType = getTypeConverter()->convertType(op.getType());
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ClampOp>(
|
|
|
|
op, outType, adaptor.self(), min_int, max_int, min_fp, max_fp);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
2022-10-19 00:39:39 +08:00
|
|
|
|
2022-09-30 22:33:41 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenArangeStartStepOp>::matchAndRewrite(
|
|
|
|
AtenArangeStartStepOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
TypeConverter *typeConverter = this->getTypeConverter();
|
|
|
|
RankedTensorType resultType =
|
|
|
|
typeConverter->convertType(op->getResult(0).getType())
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
|
|
|
// At this point all tensors should have value semantics, and hence the
|
|
|
|
// `layout` check can be ignored.
|
|
|
|
|
|
|
|
// TODO: Add support for pin_memory features.
|
|
|
|
// The pin_memory should be either `False` or `none`.
|
|
|
|
bool pinMemory;
|
|
|
|
if (!op.pin_memory().getType().isa<Torch::NoneType>() &&
|
|
|
|
(!matchPattern(op.pin_memory(), m_TorchConstantBool(&pinMemory)) ||
|
|
|
|
pinMemory)) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: pin_memory must be either None or false");
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t start, step, end;
|
|
|
|
if (!matchPattern(op.start(), m_TorchConstantInt(&start)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: value `start` should be a torch constant int");
|
|
|
|
|
|
|
|
if (!matchPattern(op.end(), m_TorchConstantInt(&end)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: value `end` should be a torch constant int");
|
|
|
|
|
|
|
|
if (!matchPattern(op.step(), m_TorchConstantInt(&step)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: value `step` should be a torch constant int");
|
|
|
|
|
|
|
|
// The result will always be a 1-d tensor.
|
|
|
|
// The size of the result is calculated as follows:
|
|
|
|
// ceil((end - start)/step)
|
|
|
|
int64_t resultShape = ceil((float)(end - start) / (float)step);
|
|
|
|
SmallVector<int64_t> values(resultShape, start);
|
|
|
|
for (unsigned i = 1; i < resultShape; i++)
|
|
|
|
values[i] += i * step;
|
|
|
|
Value result =
|
|
|
|
tosa::getConstTensor<int64_t>(rewriter, op, values, resultShape).value();
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::CastOp>(op, resultType, result);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-10-04 22:00:16 +08:00
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<PrimNumToTensorScalarOp>::matchAndRewrite(
|
|
|
|
PrimNumToTensorScalarOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
TypeConverter *typeConverter = this->getTypeConverter();
|
|
|
|
RankedTensorType resultType =
|
|
|
|
typeConverter->convertType(op->getResult(0).getType())
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
|
|
|
// Only supports integer operand type, because for the floating point operand
|
|
|
|
// type result tensor has to be of type `f64` which is not supported in the
|
|
|
|
// tosa.
|
|
|
|
int64_t initValue;
|
|
|
|
if (!matchPattern(op.a(), m_TorchConstantInt(&initValue)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: input should be a torch constant int");
|
|
|
|
|
|
|
|
DenseElementsAttr constAttr = DenseElementsAttr::get(resultType, {initValue});
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, resultType, constAttr);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-10-04 21:05:59 +08:00
|
|
|
template <>
|
2022-10-28 23:06:11 +08:00
|
|
|
LogicalResult ConvertAtenOp<AtenCopyOp>::matchAndRewrite(
|
|
|
|
AtenCopyOp op, OpAdaptor adaptor,
|
2022-10-04 21:05:59 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
auto srcType = adaptor.src().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType || !selfType.hasStaticShape())
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types with static shape are supported");
|
|
|
|
|
|
|
|
if (!srcType || !srcType.hasStaticShape())
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types with static shape are supported");
|
|
|
|
|
|
|
|
// The non_blocking should be a constant `False`.
|
|
|
|
bool nonBlocking;
|
|
|
|
if (!matchPattern(op.non_blocking(), m_TorchConstantBool(&nonBlocking))) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: non_blocking must be a constant");
|
|
|
|
} else if (nonBlocking) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: non_blocking is expected to be false");
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> selfShape(selfType.getShape());
|
|
|
|
SmallVector<int64_t> srcShape(srcType.getShape());
|
|
|
|
|
|
|
|
if (llvm::equal(selfShape, srcShape) || selfShape.size() == 0) {
|
|
|
|
// If we reach here, then it means the given case is handled by implicit
|
|
|
|
// broadcasting done by tosa.
|
|
|
|
Value result;
|
|
|
|
if (failed(tosa::tosaCastTensorToType(
|
|
|
|
rewriter, op, adaptor.src(),
|
|
|
|
getTypeConverter()->convertType(op.getType()), result)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: cast to result type not supported");
|
|
|
|
rewriter.replaceOp(op, result);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: valsem.aten.copy op not supported for this case.");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Legalizes the torch.aten.to.dtype op
|
|
|
|
template <>
|
|
|
|
LogicalResult ConvertAtenOp<AtenToDtypeOp>::matchAndRewrite(
|
|
|
|
AtenToDtypeOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
|
|
|
// Not a tensor type.
|
|
|
|
auto selfType = adaptor.self().getType().dyn_cast<TensorType>();
|
|
|
|
if (!selfType || !selfType.hasStaticShape())
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only tensor types with static shape are supported");
|
|
|
|
|
|
|
|
// The non_blocking arg should be a constant `False`.
|
|
|
|
bool nonBlocking;
|
|
|
|
if (!matchPattern(op.non_blocking(), m_TorchConstantBool(&nonBlocking))) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: non_blocking arg must be a constant");
|
|
|
|
} else if (nonBlocking) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: non_blocking arg is expected to be false");
|
|
|
|
}
|
|
|
|
|
|
|
|
// The copy arg should be a constant `False`.
|
|
|
|
bool copy;
|
|
|
|
if (!matchPattern(op.copy(), m_TorchConstantBool(©))) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: copy arg must be a constant");
|
|
|
|
} else if (copy) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: copy arg is expected to be false");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only `none`, `contiguous` and `preserve` memory_format is supported.
|
|
|
|
if (!op.memory_format().getType().isa<Torch::NoneType>()) {
|
|
|
|
int64_t memoryFormat;
|
|
|
|
if (!matchPattern(op.memory_format(), m_TorchConstantInt(&memoryFormat)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: the memory format should be specified in "
|
|
|
|
"an integer constant");
|
|
|
|
if (memoryFormat != torch_upstream::MemoryFormat::Contiguous &&
|
|
|
|
memoryFormat != torch_upstream::MemoryFormat::Preserve)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unimplemented: only none, contiguous and preserve "
|
|
|
|
"memory_format is supported");
|
|
|
|
}
|
|
|
|
|
|
|
|
auto resultTy = getTypeConverter()
|
|
|
|
->convertType(op.getResult().getType())
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
|
|
|
Value result;
|
|
|
|
if (failed(tosa::tosaCastTensorToType(rewriter, op, adaptor.self(), resultTy,
|
|
|
|
result)))
|
|
|
|
return rewriter.notifyMatchFailure(op, "conversion to result type failed");
|
|
|
|
|
|
|
|
rewriter.replaceOp(op, result);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-01 05:34:09 +08:00
|
|
|
template <typename AtenOpT, typename TosaOpT>
|
|
|
|
class ConvertAtenPoolingBaseOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
|
|
|
|
// Different pooling variants need to process inputs differently, e.g.
|
|
|
|
// adaptive pooling generates the kernel size rather than receive it. This
|
|
|
|
// function also transposes inputs.
|
|
|
|
virtual LogicalResult processInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value &input, ArrayAttr &kernel,
|
|
|
|
ArrayAttr &stride, ArrayAttr &pad,
|
|
|
|
Type &outputTy) const {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Unimplemented pooling input parsing function");
|
|
|
|
}
|
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
static int64_t getOutputDim(int64_t inputDim, int64_t kernelDim,
|
|
|
|
int64_t stride, int64_t padBefore,
|
|
|
|
int64_t padAfter, int64_t dilation) {
|
2022-02-01 05:34:09 +08:00
|
|
|
if (inputDim == ShapedType::kDynamicSize) {
|
|
|
|
return ShapedType::kDynamicSize;
|
|
|
|
} else {
|
|
|
|
return (
|
|
|
|
(inputDim + padBefore + padAfter - dilation * (kernelDim - 1) - 1) /
|
|
|
|
stride +
|
|
|
|
1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apply the transposeDims vector on input to generate a transposed form.
|
|
|
|
Value transposeTensor(AtenOpT op, ConversionPatternRewriter &rewriter,
|
|
|
|
Value input, ArrayRef<int32_t> transposeDims) const {
|
|
|
|
auto inputTy = input.getType().template cast<RankedTensorType>();
|
|
|
|
auto inputElemTy = inputTy.getElementType();
|
|
|
|
auto inputShape = inputTy.getShape();
|
|
|
|
auto inputRank = inputTy.getRank();
|
|
|
|
|
|
|
|
llvm::Optional<Value> transposeDimsConst = tosa::getConstTensor<int32_t>(
|
|
|
|
rewriter, op,
|
|
|
|
/*vec=*/transposeDims,
|
|
|
|
/*shape=*/{static_cast<int32_t>(inputRank)});
|
|
|
|
|
|
|
|
SmallVector<int64_t> transposedInputShape;
|
|
|
|
for (auto &dim : transposeDims)
|
|
|
|
transposedInputShape.push_back(inputShape[dim]);
|
|
|
|
auto transposedInputType =
|
|
|
|
RankedTensorType::get(transposedInputShape, inputElemTy);
|
|
|
|
return rewriter
|
|
|
|
.create<tosa::TransposeOp>(op->getLoc(), transposedInputType, input,
|
2022-08-09 11:17:35 +08:00
|
|
|
transposeDimsConst.value())
|
2022-02-01 05:34:09 +08:00
|
|
|
.getResult();
|
|
|
|
}
|
|
|
|
|
|
|
|
Value transposePoolingInputToHwc(AtenOpT op,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value input) const {
|
|
|
|
auto inputRank =
|
|
|
|
input.getType().template cast<RankedTensorType>().getRank();
|
|
|
|
|
|
|
|
SmallVector<int32_t> nchwToNhwc4DTransposeDims({0, 2, 3, 1});
|
|
|
|
SmallVector<int32_t> chwToHwc3DTransposeDims({1, 2, 0});
|
|
|
|
|
|
|
|
return transposeTensor(op, rewriter, input,
|
|
|
|
inputRank == 3 ? chwToHwc3DTransposeDims
|
|
|
|
: nchwToNhwc4DTransposeDims);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value transposePoolingOutputToChw(AtenOpT op,
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
Value input) const {
|
|
|
|
auto inputTy = input.getType().template cast<RankedTensorType>();
|
|
|
|
auto inputRank = inputTy.getRank();
|
|
|
|
|
|
|
|
SmallVector<int32_t> nhwcToNchw4DTransposeDims({0, 3, 1, 2});
|
|
|
|
SmallVector<int32_t> hwcToChw3DTransposeDims({2, 0, 1});
|
|
|
|
|
|
|
|
return transposeTensor(op, rewriter, input,
|
|
|
|
inputRank == 3 ? hwcToChw3DTransposeDims
|
|
|
|
: nhwcToNchw4DTransposeDims);
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
Value input;
|
|
|
|
ArrayAttr kernel, stride, pad;
|
|
|
|
Type outputTy;
|
|
|
|
|
|
|
|
// Attempts to read input and kernel parameters, or synthesize them in the
|
|
|
|
// case of adaptive pooling. Also performs input CHW->HWC transpose.
|
|
|
|
if (failed(processInputs(op, adaptor, rewriter, input, kernel, stride, pad,
|
|
|
|
outputTy)))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Failed to process inputs for pooling");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
|
|
|
auto pooledOutput =
|
|
|
|
rewriter
|
|
|
|
.create<TosaOpT>(op->getLoc(), outputTy, input, kernel, stride, pad)
|
|
|
|
.getResult();
|
|
|
|
|
|
|
|
auto transposedOutput =
|
|
|
|
ConvertAtenPoolingBaseOp<AtenOpT, TosaOpT>::transposePoolingOutputToChw(
|
|
|
|
op, rewriter, pooledOutput);
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(
|
|
|
|
op,
|
|
|
|
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
|
|
|
|
op.getType()),
|
|
|
|
transposedOutput);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename AtenOpT, typename TosaOpT>
|
|
|
|
class ConvertAtenAdaptivePoolingOp
|
|
|
|
: public ConvertAtenPoolingBaseOp<AtenOpT, TosaOpT> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenPoolingBaseOp<AtenOpT, TosaOpT>::ConvertAtenPoolingBaseOp;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult processInputs(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter, Value &input,
|
|
|
|
ArrayAttr &kernel, ArrayAttr &stride,
|
|
|
|
ArrayAttr &pad, Type &outputTy) const override {
|
|
|
|
auto inputXchw = adaptor.self();
|
|
|
|
auto inputTy = inputXchw.getType().template cast<RankedTensorType>();
|
|
|
|
if (!inputTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Adaptive avgpool requires ranked tensor input");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
|
|
|
auto inputShape = inputTy.getShape();
|
|
|
|
auto inputRank = inputTy.getRank();
|
|
|
|
auto inputElemTy = inputTy.getElementType();
|
|
|
|
|
|
|
|
// Rank sanity check.
|
|
|
|
if (inputTy.getRank() != 4 && inputRank != 3)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "NCHW->NHWC transpose requires 3D or 4D tensor");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
|
|
|
int64_t inputHDim = inputShape[inputRank - 2];
|
|
|
|
int64_t inputWDim = inputShape[inputRank - 1];
|
|
|
|
|
|
|
|
SmallVector<int64_t> outputSize;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.output_size(), m_TorchListOfConstantInts(outputSize)))
|
2022-02-01 05:34:09 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Non-const output_size for adaptive pooling unsupported.");
|
|
|
|
|
|
|
|
SmallVector<int64_t> kernelDims;
|
|
|
|
int64_t outputHDim, outputWDim;
|
|
|
|
if (outputSize.size() == 1) {
|
|
|
|
outputHDim = outputWDim = outputSize[0];
|
|
|
|
} else {
|
|
|
|
if (outputSize.size() != 2)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Adaptive avgpool output_size not 1 or 2 elements.");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
|
|
|
// Assumes 'None' (e.g. output_size=(None, 5) ) is expressed as <=0.
|
|
|
|
outputHDim =
|
|
|
|
(outputSize[0] <= 0) ? inputShape[inputRank - 2] : outputSize[0];
|
|
|
|
outputWDim =
|
|
|
|
(outputSize[1] <= 0) ? inputShape[inputRank - 1] : outputSize[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
// In adaptive pooling,
|
|
|
|
// stride = inputDim // outputDim
|
|
|
|
// kernel = inputDim - (outputDim-1)* stride
|
|
|
|
// pad = 0, dilation = 1
|
|
|
|
|
|
|
|
int64_t strideH = inputShape[inputRank - 2] / outputHDim;
|
|
|
|
int64_t strideW = inputShape[inputRank - 1] / outputWDim;
|
|
|
|
|
|
|
|
kernelDims.push_back(inputHDim - (outputHDim - 1) * strideH);
|
|
|
|
kernelDims.push_back(inputWDim - (outputWDim - 1) * strideW);
|
|
|
|
|
|
|
|
SmallVector<int64_t> outputShape;
|
|
|
|
if (inputRank > 3)
|
|
|
|
outputShape.push_back(inputShape[0]);
|
|
|
|
outputShape.push_back(outputHDim);
|
|
|
|
outputShape.push_back(outputWDim);
|
|
|
|
outputShape.push_back(inputShape[inputRank - 3]);
|
|
|
|
|
|
|
|
// Transpose to xHWC
|
|
|
|
input =
|
|
|
|
ConvertAtenPoolingBaseOp<AtenOpT, TosaOpT>::transposePoolingInputToHwc(
|
|
|
|
op, rewriter, inputXchw);
|
|
|
|
kernel = rewriter.getI64ArrayAttr(kernelDims);
|
|
|
|
stride = rewriter.getI64ArrayAttr({strideH, strideW});
|
|
|
|
// Adaptive pooling does unit dilation and zero pad.
|
|
|
|
pad = rewriter.getI64ArrayAttr({0, 0, 0, 0});
|
|
|
|
outputTy = RankedTensorType::get(outputShape, inputElemTy);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
template <typename AtenOpT, typename tosaOp>
|
|
|
|
static Type getOutputTypeForNonAdaptivePoolingOp(
|
|
|
|
RankedTensorType inputTy, SmallVectorImpl<int64_t> &kernelSize,
|
|
|
|
SmallVectorImpl<int64_t> &strideArray, SmallVectorImpl<int64_t> &padArray,
|
|
|
|
SmallVectorImpl<int64_t> &dilationArray) {
|
|
|
|
auto inputShape = inputTy.getShape();
|
|
|
|
auto inputRank = inputTy.getRank();
|
|
|
|
auto inputElemTy = inputTy.getElementType();
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
int64_t outputHDim = ConvertAtenPoolingBaseOp<AtenOpT, tosaOp>::getOutputDim(
|
|
|
|
inputShape[inputRank - 2], kernelSize[0], strideArray[0], padArray[0],
|
|
|
|
padArray[0], dilationArray[0]);
|
|
|
|
int64_t outputWDim = ConvertAtenPoolingBaseOp<AtenOpT, tosaOp>::getOutputDim(
|
|
|
|
inputShape[inputRank - 1], kernelSize[1], strideArray[1], padArray[1],
|
|
|
|
padArray[1], dilationArray[1]);
|
|
|
|
SmallVector<int64_t> outputShape;
|
|
|
|
if (inputRank > 3)
|
|
|
|
outputShape.push_back(inputShape[0]);
|
|
|
|
outputShape.push_back(outputHDim);
|
|
|
|
outputShape.push_back(outputWDim);
|
|
|
|
outputShape.push_back(inputShape[inputRank - 3]);
|
|
|
|
return RankedTensorType::get(outputShape, inputElemTy);
|
|
|
|
}
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
// Checks the validity of pooling parameters and stores them in the respective
|
|
|
|
// vector. Also, gets the output type for the pooling op.
|
|
|
|
template <typename AtenOpT, typename tosaOp>
|
|
|
|
static LogicalResult getOutputTypeAndPoolingParameters(
|
|
|
|
AtenOpT op, ConversionPatternRewriter &rewriter, Value inputXchw,
|
|
|
|
SmallVectorImpl<int64_t> &dilationArray, Type &outputTy, ArrayAttr &kernel,
|
|
|
|
ArrayAttr &stride, ArrayAttr &pad) {
|
|
|
|
|
|
|
|
RankedTensorType inputTy = inputXchw.getType().cast<RankedTensorType>();
|
|
|
|
if (!inputTy)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Pooling op requires ranked tensor input");
|
2022-05-19 20:35:59 +08:00
|
|
|
|
|
|
|
auto inputRank = inputTy.getRank();
|
|
|
|
// Rank sanity check.
|
|
|
|
if (inputTy.getRank() != 4 && inputRank != 3)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "NCHW->NHWC transpose requires 3D or 4D tensor");
|
2022-05-19 20:35:59 +08:00
|
|
|
|
|
|
|
SmallVector<int64_t, 2> kernelSizeInts, strideInts, paddingInts;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.kernel_size(),
|
|
|
|
m_TorchListOfConstantInts(kernelSizeInts)))
|
2022-05-19 20:35:59 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Non-const kernel_size for pooling op unsupported");
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.stride(), m_TorchListOfConstantInts(strideInts)))
|
2022-05-19 20:35:59 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Non-const stride for pooling op unsupported");
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.padding(), m_TorchListOfConstantInts(paddingInts)))
|
2022-05-19 20:35:59 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Non-const padding factor for pooling op unsupported");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
kernel = rewriter.getI64ArrayAttr(kernelSizeInts);
|
|
|
|
stride = rewriter.getI64ArrayAttr(strideInts);
|
|
|
|
pad = rewriter.getI64ArrayAttr(
|
|
|
|
{paddingInts[0], paddingInts[0], paddingInts[1], paddingInts[1]});
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
// FIXME: add ceil_mode support.
|
|
|
|
bool ceilMode;
|
|
|
|
if (!matchPattern(op.ceil_mode(), m_TorchConstantBool(&ceilMode)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only support constant bool ceil_mode for pooling op");
|
|
|
|
if (ceilMode)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "only support ceil_mode equals to False for pooling op");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
outputTy = getOutputTypeForNonAdaptivePoolingOp<AtenOpT, tosaOp>(
|
|
|
|
inputTy, kernelSizeInts, strideInts, paddingInts, dilationArray);
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
return success();
|
|
|
|
}
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
class ConvertAtenMaxPool2dOp
|
|
|
|
: public ConvertAtenPoolingBaseOp<AtenMaxPool2dOp, tosa::MaxPool2dOp> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenPoolingBaseOp<AtenMaxPool2dOp,
|
|
|
|
tosa::MaxPool2dOp>::ConvertAtenPoolingBaseOp;
|
|
|
|
LogicalResult processInputs(AtenMaxPool2dOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter, Value &input,
|
|
|
|
ArrayAttr &kernel, ArrayAttr &stride,
|
|
|
|
ArrayAttr &pad, Type &outputTy) const override {
|
|
|
|
SmallVector<int64_t, 2> dilationArray;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.dilation(), m_TorchListOfConstantInts(dilationArray)))
|
2022-02-01 05:34:09 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
2022-05-19 20:35:59 +08:00
|
|
|
op, "Non-const dilation for pooling op unsupported.");
|
2022-02-01 05:34:09 +08:00
|
|
|
// TOSA pooling only supports unit dilation.
|
|
|
|
if (dilationArray[0] > 1 || dilationArray[1] > 1)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Cannot process non-unit pooling dilation.");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
if (failed(getOutputTypeAndPoolingParameters<AtenMaxPool2dOp,
|
|
|
|
tosa::MaxPool2dOp>(
|
|
|
|
op, rewriter, adaptor.self(), dilationArray, outputTy, kernel,
|
|
|
|
stride, pad)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "invalid pooling parameters or input type");
|
2022-02-01 05:34:09 +08:00
|
|
|
|
2022-05-19 20:35:59 +08:00
|
|
|
// Transpose to xHWC
|
|
|
|
input = ConvertAtenPoolingBaseOp<AtenMaxPool2dOp, tosa::MaxPool2dOp>::
|
|
|
|
transposePoolingInputToHwc(op, rewriter, adaptor.self());
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class ConvertAtenAvgPool2dOp
|
|
|
|
: public ConvertAtenPoolingBaseOp<AtenAvgPool2dOp, tosa::AvgPool2dOp> {
|
|
|
|
public:
|
|
|
|
using ConvertAtenPoolingBaseOp<AtenAvgPool2dOp,
|
|
|
|
tosa::AvgPool2dOp>::ConvertAtenPoolingBaseOp;
|
|
|
|
LogicalResult processInputs(AtenAvgPool2dOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter, Value &input,
|
|
|
|
ArrayAttr &kernel, ArrayAttr &stride,
|
|
|
|
ArrayAttr &pad, Type &outputTy) const override {
|
|
|
|
SmallVector<int64_t, 2> dilationArray{1, 1};
|
|
|
|
if (failed(getOutputTypeAndPoolingParameters<AtenAvgPool2dOp,
|
|
|
|
tosa::AvgPool2dOp>(
|
|
|
|
op, rewriter, adaptor.self(), dilationArray, outputTy, kernel,
|
|
|
|
stride, pad)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "invalid pooling parameters or input type");
|
|
|
|
|
|
|
|
// Transpose to xHWC
|
|
|
|
input = ConvertAtenPoolingBaseOp<AtenAvgPool2dOp, tosa::AvgPool2dOp>::
|
|
|
|
transposePoolingInputToHwc(op, rewriter, adaptor.self());
|
2022-02-01 05:34:09 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-02-17 01:53:51 +08:00
|
|
|
// Ref: Error checking based on the Torch to LinAlg lowering
|
|
|
|
template <typename AtenOpT, int fillVal>
|
|
|
|
class ConvertAtenConstPatternOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
auto outType = OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template dyn_cast<TensorType>();
|
|
|
|
|
|
|
|
if (!outType)
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only Tensor types supported in TOSA");
|
2022-02-17 01:53:51 +08:00
|
|
|
|
|
|
|
Type outElemTy = outType.getElementType();
|
|
|
|
if (!outElemTy.isIntOrFloat())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-02-17 01:53:51 +08:00
|
|
|
|
|
|
|
// FIXME: Handle layout, device and pin_memory. Assume dtype has been
|
|
|
|
// processed to set output type correctly?
|
|
|
|
if (!op.layout().getType().template isa<Torch::NoneType>())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"Only default layout is supported");
|
2022-02-17 01:53:51 +08:00
|
|
|
|
|
|
|
bool pinMemory;
|
|
|
|
if (!op.pin_memory().getType().template isa<Torch::NoneType>() &&
|
|
|
|
(!matchPattern(op.pin_memory(), m_TorchConstantBool(&pinMemory)) ||
|
|
|
|
pinMemory)) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Unsupported pin_memory, should be either None or false");
|
2022-02-17 01:53:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> shape;
|
2022-11-17 04:33:12 +08:00
|
|
|
if (!matchPattern(op.size(), m_TorchListOfConstantInts(shape))) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Shape must be a list of Scalar constants");
|
2022-02-17 01:53:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int64_t size = 1;
|
|
|
|
for (auto s : shape)
|
|
|
|
size *= s;
|
|
|
|
|
|
|
|
SmallVector<int32_t> values(size, fillVal);
|
|
|
|
auto constOp =
|
2022-08-09 11:17:35 +08:00
|
|
|
tosa::getConstTensor<int32_t>(rewriter, op, values, shape).value();
|
2022-02-17 01:53:51 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::CastOp>(op, outType, constOp);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenFillScalarOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
auto outType = OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template dyn_cast<TensorType>();
|
|
|
|
|
|
|
|
if (!outType || !outType.hasStaticShape())
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only Tensor types with static shapes are currently supported");
|
2022-02-17 01:53:51 +08:00
|
|
|
|
|
|
|
Type outElemTy = outType.getElementType();
|
|
|
|
if (!outElemTy.isIntOrFloat()) {
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Only floating-point or integer datatype legalization supported");
|
2022-02-17 01:53:51 +08:00
|
|
|
}
|
|
|
|
Value constOp;
|
|
|
|
if (failed(torchScalarToTosaTensor(rewriter, op, op.value(), constOp,
|
|
|
|
outElemTy, outType.getShape())))
|
2022-07-18 09:39:54 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Supplied value must be a Scalar constant");
|
2022-02-17 01:53:51 +08:00
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::CastOp>(op, outType, constOp);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-09-21 06:07:46 +08:00
|
|
|
// Legalizes the torch.clone op.
|
|
|
|
template <typename AtenOpT>
|
|
|
|
class ConvertAtenCloneOp : public OpConversionPattern<AtenOpT> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<AtenOpT>::OpConversionPattern;
|
|
|
|
using OpAdaptor = typename AtenOpT::Adaptor;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
int64_t memoryFormat;
|
|
|
|
if (!op.memory_format().getType().template isa<Torch::NoneType>() &&
|
|
|
|
(!matchPattern(op.memory_format(), m_TorchConstantInt(&memoryFormat)) ||
|
|
|
|
memoryFormat != torch_upstream::MemoryFormat::Contiguous)) {
|
|
|
|
return op.emitError(
|
|
|
|
"unimplemented: only default memory format is supported");
|
|
|
|
}
|
|
|
|
auto outType = OpConversionPattern<AtenOpT>::getTypeConverter()
|
|
|
|
->convertType(op.getType())
|
|
|
|
.template dyn_cast<TensorType>();
|
|
|
|
rewriter.replaceOpWithNewOp<tosa::CastOp>(op, outType, adaptor.self());
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-10-08 10:07:03 +08:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
2021-10-29 01:09:12 +08:00
|
|
|
// TorchToTosa Pass
|
2021-10-08 10:07:03 +08:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
namespace {
|
2021-11-11 11:03:36 +08:00
|
|
|
class ConvertTorchToTosa : public ConvertTorchToTosaBase<ConvertTorchToTosa> {
|
2021-10-08 10:07:03 +08:00
|
|
|
public:
|
|
|
|
void getDependentDialects(DialectRegistry ®istry) const override {
|
|
|
|
registry.insert<tosa::TosaDialect>();
|
2021-12-16 03:01:01 +08:00
|
|
|
registry.insert<tensor::TensorDialect>();
|
2022-10-05 21:28:06 +08:00
|
|
|
registry.insert<arith::ArithDialect>();
|
2021-10-08 10:07:03 +08:00
|
|
|
TorchConversion::getBackendTypeConversionDependentDialects(registry);
|
|
|
|
}
|
|
|
|
|
|
|
|
void runOnOperation() override {
|
|
|
|
MLIRContext *context = &getContext();
|
|
|
|
ConversionTarget target(*context);
|
2021-12-16 03:01:01 +08:00
|
|
|
target.addLegalDialect<tosa::TosaDialect, tensor::TensorDialect,
|
2022-10-05 21:28:06 +08:00
|
|
|
arith::ArithDialect>();
|
2021-10-08 10:07:03 +08:00
|
|
|
|
|
|
|
TypeConverter typeConverter;
|
|
|
|
typeConverter.addConversion([](Type type) { return type; });
|
|
|
|
TorchConversion::setupBackendTypeConversion(target, typeConverter);
|
|
|
|
|
|
|
|
RewritePatternSet patterns(context);
|
2021-10-29 01:09:12 +08:00
|
|
|
|
2021-11-12 08:15:58 +08:00
|
|
|
#define INSERT_UNARY_FPONLY_PATTERN(AtenOp, TosaOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenUnaryFPOnlyOp<AtenOp, TosaOp>>(typeConverter, \
|
|
|
|
context);
|
|
|
|
INSERT_UNARY_FPONLY_PATTERN(AtenLogOp, tosa::LogOp)
|
|
|
|
INSERT_UNARY_FPONLY_PATTERN(AtenExpOp, tosa::ExpOp)
|
|
|
|
#undef INSERT_UNARY_FPONLY_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_UNARY_PATTERN(AtenOp, TosaOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenUnaryOp<AtenOp, TosaOp>>(typeConverter, context);
|
|
|
|
INSERT_UNARY_PATTERN(AtenNegOp, tosa::NegateOp)
|
|
|
|
INSERT_UNARY_PATTERN(AtenFloorOp, tosa::FloorOp)
|
2021-12-15 02:03:58 +08:00
|
|
|
INSERT_UNARY_PATTERN(AtenRsqrtOp, tosa::RsqrtOp)
|
2021-11-12 08:15:58 +08:00
|
|
|
INSERT_UNARY_PATTERN(AtenBitwiseNotOp, tosa::BitwiseNotOp)
|
2022-01-21 02:58:30 +08:00
|
|
|
INSERT_UNARY_PATTERN(AtenCeilOp, tosa::CeilOp)
|
|
|
|
INSERT_UNARY_PATTERN(AtenReciprocalOp, tosa::ReciprocalOp)
|
2021-11-12 08:15:58 +08:00
|
|
|
#undef INSERT_UNARY_PATTERN
|
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
#define INSERT_BINARY_PATTERN(AtenOp, TosaOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
2021-12-16 03:19:25 +08:00
|
|
|
patterns.add<ConvertAtenBinaryOp<AtenOp, TosaOp>>(typeConverter, context);
|
|
|
|
INSERT_BINARY_PATTERN(AtenMaximumOp, tosa::MaximumOp)
|
|
|
|
INSERT_BINARY_PATTERN(AtenMinimumOp, tosa::MinimumOp)
|
|
|
|
#undef INSERT_BINARY_PATTERN
|
|
|
|
|
2021-11-12 08:15:58 +08:00
|
|
|
#define INSERT_BINARY_ADDSUB_PATTERN(AtenOp, TosaOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenAddSubOp<AtenOp, TosaOp>>(typeConverter, context);
|
|
|
|
INSERT_BINARY_ADDSUB_PATTERN(AtenAddTensorOp, tosa::AddOp)
|
2022-01-21 02:58:30 +08:00
|
|
|
INSERT_BINARY_ADDSUB_PATTERN(AtenAddScalarOp, tosa::AddOp)
|
2021-11-12 08:15:58 +08:00
|
|
|
INSERT_BINARY_ADDSUB_PATTERN(AtenSubTensorOp, tosa::SubOp)
|
2022-01-21 02:58:30 +08:00
|
|
|
INSERT_BINARY_ADDSUB_PATTERN(AtenSubScalarOp, tosa::SubOp)
|
2021-11-12 08:15:58 +08:00
|
|
|
#undef INSERT_BINARY_ADDSUB_PATTERN
|
|
|
|
|
2022-01-21 02:58:30 +08:00
|
|
|
#define INSERT_BINARY_COMPARE_PATTERN(AtenOp, TosaOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenCompareOp<AtenOp, TosaOp>>(typeConverter, context);
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenGtTensorOp, tosa::GreaterOp)
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenGtScalarOp, tosa::GreaterOp)
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenLtTensorOp, tosa::GreaterOp)
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenLtScalarOp, tosa::GreaterOp)
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenEqTensorOp, tosa::EqualOp)
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenEqScalarOp, tosa::EqualOp)
|
2022-02-12 04:30:02 +08:00
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenNeTensorOp, tosa::EqualOp)
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenNeScalarOp, tosa::EqualOp)
|
|
|
|
INSERT_BINARY_COMPARE_PATTERN(AtenBitwiseAndTensorOp, tosa::BitwiseAndOp)
|
2022-01-21 02:58:30 +08:00
|
|
|
#undef INSERT_BINARY_COMPARE_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_BINARY_MUL_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenMulOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_BINARY_MUL_PATTERN(AtenMulTensorOp);
|
|
|
|
INSERT_BINARY_MUL_PATTERN(AtenMulScalarOp);
|
|
|
|
#undef INSERT_BINARY_MUL_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_BINARY_DIV_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenDivOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_BINARY_DIV_PATTERN(AtenDivTensorOp);
|
|
|
|
INSERT_BINARY_DIV_PATTERN(AtenDivScalarOp);
|
|
|
|
#undef INSERT_BINARY_DIV_PATTERN
|
|
|
|
|
2021-12-03 08:52:01 +08:00
|
|
|
#define INSERT_NDIMS_REDUCTION_OP_PATTERN(AtenOp, ConversionFunc) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenMultipleDimsReductionOp<AtenOp, ConversionFunc>>( \
|
|
|
|
typeConverter, context);
|
|
|
|
INSERT_NDIMS_REDUCTION_OP_PATTERN(AtenMeanDimOp,
|
|
|
|
mlir::tosa::convertReduceMeanOp)
|
|
|
|
INSERT_NDIMS_REDUCTION_OP_PATTERN(AtenSumDimIntListOp,
|
|
|
|
mlir::tosa::convertReduceSumOp)
|
|
|
|
#undef INSERT_NDIMS_REDUCTION_OP_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_ONEDIM_REDUCTION_OP_PATTERN(AtenOp, ConversionFunc) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenOneDimReductionOp<AtenOp, ConversionFunc>>( \
|
|
|
|
typeConverter, context);
|
|
|
|
INSERT_ONEDIM_REDUCTION_OP_PATTERN(AtenAnyDimOp,
|
|
|
|
mlir::tosa::convertReduceAnyOp)
|
|
|
|
#undef INSERT_ONEDIM_REDUCTION_OP_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_ALLDIMS_REDUCTION_OP_PATTERN(AtenOp, ConversionFunc) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenAllDimsReductionOp<AtenOp, ConversionFunc>>( \
|
|
|
|
typeConverter, context);
|
|
|
|
INSERT_ALLDIMS_REDUCTION_OP_PATTERN(AtenAllOp,
|
|
|
|
mlir::tosa::convertReduceAllOp)
|
|
|
|
INSERT_ALLDIMS_REDUCTION_OP_PATTERN(AtenAnyOp,
|
|
|
|
mlir::tosa::convertReduceAnyOp)
|
|
|
|
INSERT_ALLDIMS_REDUCTION_OP_PATTERN(AtenSumOp,
|
|
|
|
mlir::tosa::convertReduceSumOp)
|
|
|
|
#undef INSERT_ALLDIMS_REDUCTION_OP_PATTERN
|
|
|
|
|
2022-01-07 00:31:29 +08:00
|
|
|
#define INSERT_SQUEEZE_OP_PATTERN(AtenOp, TemplateForm) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<TemplateForm<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_SQUEEZE_OP_PATTERN(AtenSqueezeOp, ConvertAtenSqueezeAllDimsOp)
|
|
|
|
INSERT_SQUEEZE_OP_PATTERN(AtenSqueezeDimOp, ConvertAtenSqueezeOneDimOp)
|
|
|
|
#undef INSERT_SQUEEZE_OP_PATTERN
|
|
|
|
|
2022-01-19 05:37:32 +08:00
|
|
|
#define INSERT_MATMUL_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenMatMulOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_MATMUL_ATENOP_PATTERN(AtenMatmulOp);
|
|
|
|
#undef INSERT_MATMUL_ATEMOP_PATTERN
|
|
|
|
|
2022-01-26 00:48:58 +08:00
|
|
|
#define INSERT_MM_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenMmOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_MM_ATENOP_PATTERN(AtenMmOp);
|
|
|
|
INSERT_MM_ATENOP_PATTERN(AtenBmmOp);
|
|
|
|
#undef INSERT_MM_ATEMOP_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_LINEAR_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenLinearOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_LINEAR_ATENOP_PATTERN(AtenLinearOp);
|
|
|
|
#undef INSERT_LINEAR_ATEMOP_PATTERN
|
|
|
|
|
2022-02-01 05:34:09 +08:00
|
|
|
#define INSERT_ADAPTIVE_POOLING_ATENOP_PATTERN(AtenOp, TosaOpT) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenAdaptivePoolingOp<AtenOp, TosaOpT>>(typeConverter, \
|
|
|
|
context);
|
|
|
|
INSERT_ADAPTIVE_POOLING_ATENOP_PATTERN(AtenAdaptiveAvgPool2dOp,
|
|
|
|
tosa::AvgPool2dOp);
|
|
|
|
#undef INSERT_ADAPTIVE_POOLING_ATEMOP_PATTERN
|
|
|
|
|
2022-07-01 04:13:52 +08:00
|
|
|
target.addIllegalOp<AtenMaxPool2dOp>();
|
|
|
|
patterns.add<ConvertAtenMaxPool2dOp>(typeConverter, context);
|
2022-05-19 20:35:59 +08:00
|
|
|
|
2022-07-01 04:13:52 +08:00
|
|
|
target.addIllegalOp<AtenAvgPool2dOp>();
|
|
|
|
patterns.add<ConvertAtenAvgPool2dOp>(typeConverter, context);
|
2022-05-19 20:35:59 +08:00
|
|
|
|
2022-02-17 01:53:51 +08:00
|
|
|
#define INSERT_CONSTANT_FILL_PATTERN(AtenOp, fillVal) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenConstPatternOp<AtenOp, fillVal>>(typeConverter, \
|
|
|
|
context);
|
|
|
|
INSERT_CONSTANT_FILL_PATTERN(AtenOnesOp, 1);
|
|
|
|
INSERT_CONSTANT_FILL_PATTERN(AtenZerosOp, 0);
|
|
|
|
#undef INSERT_CONSTANT_FILL_PATTERN
|
|
|
|
|
|
|
|
#define INSERT_FILL_SCALAR_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenFillScalarOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_FILL_SCALAR_PATTERN(AtenFill_ScalarOp);
|
|
|
|
#undef INSERT_FILL_SCALAR_PATTERN
|
|
|
|
|
2021-11-12 08:15:58 +08:00
|
|
|
#define INSERT_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenTanhOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenSigmoidOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenReluOp);
|
2021-12-16 03:01:01 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenArgmaxOp);
|
2022-01-12 04:49:17 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenPowTensorScalarOp);
|
2022-01-21 02:58:30 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenRsubScalarOp);
|
2022-04-08 12:47:57 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenConvolutionOp);
|
2022-01-27 11:16:13 +08:00
|
|
|
INSERT_ATENOP_PATTERN(ValueTensorLiteralOp);
|
2022-01-28 06:38:59 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenReshapeOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenBatchNormOp);
|
2022-02-04 06:08:19 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenNativeLayerNormOp);
|
2022-01-29 13:38:56 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenFlattenUsingIntsOp);
|
2022-02-04 06:08:19 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenPermuteOp);
|
2022-02-12 04:30:02 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenLog2Op);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenThresholdOp);
|
2022-03-26 05:15:07 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenUnsqueezeOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenContiguousOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenDropoutOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenViewOp);
|
2022-03-31 08:00:55 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenGeluOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenGeluBackwardOp);
|
2022-07-01 04:13:52 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenEmbeddingOp);
|
2022-07-08 04:05:33 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenTransposeIntOp);
|
2022-07-14 01:10:18 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenMaxDimOp);
|
|
|
|
INSERT_ATENOP_PATTERN(AtenSliceTensorOp);
|
2022-09-21 01:04:51 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenBroadcastToOp);
|
2022-10-19 00:39:39 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenWhereSelfOp);
|
2022-11-19 05:32:13 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenClampOp);
|
2022-09-30 22:33:41 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenArangeStartStepOp);
|
2022-10-04 22:00:16 +08:00
|
|
|
INSERT_ATENOP_PATTERN(PrimNumToTensorScalarOp);
|
2022-10-28 23:06:11 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenCopyOp);
|
2022-10-04 21:05:59 +08:00
|
|
|
INSERT_ATENOP_PATTERN(AtenToDtypeOp);
|
2021-11-12 08:15:58 +08:00
|
|
|
#undef INSERT_ATENOP_PATTERN
|
2021-10-08 10:07:03 +08:00
|
|
|
|
2022-09-21 06:07:46 +08:00
|
|
|
#define INSERT_CLONE_ATENOP_PATTERN(AtenOp) \
|
|
|
|
target.addIllegalOp<AtenOp>(); \
|
|
|
|
patterns.add<ConvertAtenCloneOp<AtenOp>>(typeConverter, context);
|
|
|
|
INSERT_CLONE_ATENOP_PATTERN(AtenCloneOp);
|
|
|
|
#undef INSERT_CLONE_ATENOP_PATTERN
|
|
|
|
|
2021-10-08 10:07:03 +08:00
|
|
|
if (failed(applyPartialConversion(getOperation(), target,
|
|
|
|
std::move(patterns))))
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-04-27 03:27:51 +08:00
|
|
|
std::unique_ptr<OperationPass<func::FuncOp>>
|
2021-10-08 10:07:03 +08:00
|
|
|
mlir::torch::createConvertTorchToTosaPass() {
|
|
|
|
return std::make_unique<ConvertTorchToTosa>();
|
|
|
|
}
|