2022-03-11 01:54:13 +08:00
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
//
|
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
|
//
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
|
#include "torch-mlir/Conversion/TorchToLinalg/TorchToLinalg.h"
|
|
|
|
|
|
|
|
|
|
#include "../PassDetail.h"
|
|
|
|
|
#include "PopulatePatterns.h"
|
2022-10-05 21:28:06 +08:00
|
|
|
|
#include "mlir/Dialect/Arith/IR/Arith.h"
|
2023-04-20 00:22:57 +08:00
|
|
|
|
#include "mlir/Dialect/Complex/IR/Complex.h"
|
2022-03-11 01:54:13 +08:00
|
|
|
|
#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
|
|
|
|
|
#include "mlir/Dialect/Linalg/IR/Linalg.h"
|
|
|
|
|
#include "mlir/Dialect/Math/IR/Math.h"
|
|
|
|
|
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
|
|
|
|
#include "mlir/IR/Matchers.h"
|
2023-12-02 08:38:21 +08:00
|
|
|
|
#include "torch-mlir/Conversion/TorchToLinalg/Utils.h"
|
2022-03-11 01:54:13 +08:00
|
|
|
|
#include "torch-mlir/Conversion/Utils/Utils.h"
|
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"
|
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
|
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/TorchUpstream.h"
|
|
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
2022-09-07 14:58:42 +08:00
|
|
|
|
#include "llvm/ADT/APSInt.h"
|
2023-11-16 00:34:38 +08:00
|
|
|
|
#include <numeric>
|
2024-04-16 04:45:10 +08:00
|
|
|
|
#include <type_traits>
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
|
using namespace mlir::torch;
|
|
|
|
|
using namespace mlir::torch::Torch;
|
|
|
|
|
|
2022-04-05 01:57:49 +08:00
|
|
|
|
// Check if a ranked-tensor has the specified element type.
|
|
|
|
|
template <typename elementType> static bool hasElementType(Value tensor) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto tensorType = cast<RankedTensorType>(tensor.getType());
|
2022-04-05 01:57:49 +08:00
|
|
|
|
Type tensorElementType = tensorType.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
return isa<elementType>(tensorElementType);
|
2022-04-05 01:57:49 +08:00
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
template <arith::CmpFPredicate fpred, arith::CmpIPredicate iupred,
|
|
|
|
|
arith::CmpIPredicate ispred>
|
|
|
|
|
static Value createComparisonTemplate(OpBuilder &b, Location loc, Type type,
|
|
|
|
|
Value lhs, Value rhs) {
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(type))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::CmpFOp>(loc, fpred, lhs, rhs);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (IntegerType intType = dyn_cast<mlir::IntegerType>(type)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (intType.isUnsigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, iupred, lhs, rhs);
|
|
|
|
|
if (intType.isSigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, ispred, lhs, rhs);
|
2023-07-17 15:49:04 +08:00
|
|
|
|
assert(intType.getWidth() == 1);
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, iupred, lhs, rhs);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2022-05-26 05:04:59 +08:00
|
|
|
|
llvm_unreachable("Unhandled element type for comparison");
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
|
|
|
|
|
2024-04-24 02:01:36 +08:00
|
|
|
|
static Value getZeroPoint(Value value) {
|
|
|
|
|
if (auto make = value.getDefiningOp<Aten_MakePerTensorQuantizedTensorOp>()) {
|
|
|
|
|
return make.getZeroPoint();
|
|
|
|
|
}
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
static Value createGreaterThan(OpBuilder &b, Location loc, Type elementalType,
|
|
|
|
|
Value lhs, Value rhs) {
|
2023-11-20 11:27:08 +08:00
|
|
|
|
return createComparisonTemplate<arith::CmpFPredicate::OGT,
|
2022-03-11 01:54:13 +08:00
|
|
|
|
arith::CmpIPredicate::ugt,
|
|
|
|
|
arith::CmpIPredicate::sgt>(
|
|
|
|
|
b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 10:20:47 +08:00
|
|
|
|
static Value createGreaterThanOrEqual(OpBuilder &b, Location loc,
|
|
|
|
|
Type elementalType, Value lhs,
|
|
|
|
|
Value rhs) {
|
2023-11-20 11:27:08 +08:00
|
|
|
|
return createComparisonTemplate<arith::CmpFPredicate::OGE,
|
2023-02-03 10:20:47 +08:00
|
|
|
|
arith::CmpIPredicate::uge,
|
|
|
|
|
arith::CmpIPredicate::sge>(
|
|
|
|
|
b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
static Value createLessThan(OpBuilder &b, Location loc, Type elementalType,
|
|
|
|
|
Value lhs, Value rhs) {
|
2023-11-20 11:27:08 +08:00
|
|
|
|
return createComparisonTemplate<arith::CmpFPredicate::OLT,
|
2022-03-11 01:54:13 +08:00
|
|
|
|
arith::CmpIPredicate::ult,
|
|
|
|
|
arith::CmpIPredicate::slt>(
|
|
|
|
|
b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 10:20:47 +08:00
|
|
|
|
static Value createLessThanOrEqual(OpBuilder &b, Location loc,
|
|
|
|
|
Type elementalType, Value lhs, Value rhs) {
|
2023-11-20 11:27:08 +08:00
|
|
|
|
return createComparisonTemplate<arith::CmpFPredicate::OLE,
|
2023-02-03 10:20:47 +08:00
|
|
|
|
arith::CmpIPredicate::ule,
|
|
|
|
|
arith::CmpIPredicate::sle>(
|
|
|
|
|
b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-04 00:19:01 +08:00
|
|
|
|
static Value createEqual(OpBuilder &b, Location loc, Type elementalType,
|
|
|
|
|
Value lhs, Value rhs) {
|
2023-11-04 22:26:01 +08:00
|
|
|
|
return createComparisonTemplate<arith::CmpFPredicate::OEQ,
|
2022-04-04 00:19:01 +08:00
|
|
|
|
arith::CmpIPredicate::eq,
|
|
|
|
|
arith::CmpIPredicate::eq>(
|
|
|
|
|
b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static Value createNotEqual(OpBuilder &b, Location loc, Type elementalType,
|
|
|
|
|
Value lhs, Value rhs) {
|
|
|
|
|
return createComparisonTemplate<arith::CmpFPredicate::UNE,
|
|
|
|
|
arith::CmpIPredicate::ne,
|
|
|
|
|
arith::CmpIPredicate::ne>(
|
|
|
|
|
b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
static Value buildNormalCdf(OpBuilder &b, Location &loc, Value x, Value mean,
|
|
|
|
|
Value sigma) {
|
|
|
|
|
Type elementType = x.getType();
|
|
|
|
|
Value xMinusMean = b.create<arith::SubFOp>(loc, x, mean);
|
|
|
|
|
Value two = b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 2));
|
|
|
|
|
Value sqrt2 = b.create<math::SqrtOp>(loc, two);
|
|
|
|
|
Value erfArg = b.create<arith::DivFOp>(loc, xMinusMean, sqrt2);
|
|
|
|
|
Value erf = b.create<math::ErfOp>(loc, erfArg);
|
|
|
|
|
Value one = b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 1));
|
|
|
|
|
Value erfPlus1 = b.create<arith::AddFOp>(loc, one, erf);
|
|
|
|
|
Value oneHalf =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 0.5));
|
|
|
|
|
Value normalCdf = b.create<arith::MulFOp>(loc, oneHalf, erfPlus1);
|
|
|
|
|
return normalCdf;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static Value buildUnitNormalCdf(OpBuilder &b, Location &loc, Value x) {
|
|
|
|
|
Type elementType = x.getType();
|
|
|
|
|
Value zero = b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 0));
|
|
|
|
|
Value one = b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 1));
|
|
|
|
|
return buildNormalCdf(b, loc, x, zero, one);
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-24 04:35:43 +08:00
|
|
|
|
template <typename MathOpTy>
|
2024-02-17 05:35:25 +08:00
|
|
|
|
static Value createFpOpWithDtype(OpBuilder &b, const TypeConverter *converter,
|
|
|
|
|
Value payloadArg, Operation *op) {
|
|
|
|
|
Type inTTy = cast<ValueTensorType>(op->getOperand(0).getType()).getDtype();
|
|
|
|
|
Type outTTy = cast<ValueTensorType>(op->getResult(0).getType()).getDtype();
|
|
|
|
|
Type outTy =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(op->getResult(0).getType()))
|
|
|
|
|
.getElementType();
|
|
|
|
|
Type computeTy = outTy;
|
|
|
|
|
if (isa<IntegerType>(computeTy))
|
|
|
|
|
computeTy = b.getF32Type();
|
2022-03-24 04:35:43 +08:00
|
|
|
|
Location loc = op->getLoc();
|
2024-02-17 05:35:25 +08:00
|
|
|
|
Value arg = convertScalarToDtype(b, loc, payloadArg, computeTy, inTTy);
|
|
|
|
|
auto newOp = b.create<MathOpTy>(loc, arg);
|
|
|
|
|
return convertScalarToDtype(b, loc, newOp, outTy, std::nullopt, outTTy);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 10:20:47 +08:00
|
|
|
|
template <typename OpTy>
|
|
|
|
|
static Value createCompareTensorOp(OpBuilder &b, Location loc, OpTy op,
|
|
|
|
|
Value lhs, Value rhs) {
|
|
|
|
|
static_assert(std::is_same<OpTy, AtenLtTensorOp>() ||
|
|
|
|
|
std::is_same<OpTy, AtenLeTensorOp>() ||
|
|
|
|
|
std::is_same<OpTy, AtenGtTensorOp>() ||
|
|
|
|
|
std::is_same<OpTy, AtenGeTensorOp>() ||
|
2023-06-07 10:06:27 +08:00
|
|
|
|
std::is_same<OpTy, AtenEqTensorOp>() ||
|
|
|
|
|
std::is_same<OpTy, AtenNeTensorOp>(),
|
2023-02-03 10:20:47 +08:00
|
|
|
|
"unimplemented: op type not supported");
|
|
|
|
|
|
|
|
|
|
Type lhsDtype = lhs.getType();
|
|
|
|
|
Type rhsDtype = rhs.getType();
|
|
|
|
|
|
|
|
|
|
// TODO: Type promotion in case of different `lhsDtype` and `rhsDtype` needs
|
|
|
|
|
// to be handled.
|
|
|
|
|
if (lhsDtype != rhsDtype) {
|
|
|
|
|
op.emitError("unimplemented: lhs and rhs dtype must be same");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type elementalType = cast<BaseTensorType>(op.getSelf().getType()).getDtype();
|
2023-02-03 10:20:47 +08:00
|
|
|
|
if constexpr (std::is_same<OpTy, AtenLtTensorOp>()) {
|
|
|
|
|
return createLessThan(b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
if constexpr (std::is_same<OpTy, AtenLeTensorOp>()) {
|
|
|
|
|
return createLessThanOrEqual(b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
if constexpr (std::is_same<OpTy, AtenGtTensorOp>()) {
|
|
|
|
|
return createGreaterThan(b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
if constexpr (std::is_same<OpTy, AtenGeTensorOp>()) {
|
|
|
|
|
return createGreaterThanOrEqual(b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
if constexpr (std::is_same<OpTy, AtenEqTensorOp>()) {
|
|
|
|
|
return createEqual(b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
2023-06-07 10:06:27 +08:00
|
|
|
|
if constexpr (std::is_same<OpTy, AtenNeTensorOp>()) {
|
|
|
|
|
return createNotEqual(b, loc, elementalType, lhs, rhs);
|
|
|
|
|
}
|
2023-02-03 10:20:47 +08:00
|
|
|
|
llvm_unreachable("unimplemented: op type not supported");
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-06 07:17:01 +08:00
|
|
|
|
template <arith::CmpIPredicate predicate>
|
|
|
|
|
static LogicalResult
|
|
|
|
|
createTriangularMatrix(OpBuilder &b, Location loc, ValueRange payloadArgs,
|
|
|
|
|
Operation *op, ArrayRef<Value> operands, Value &result) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto inputType = cast<RankedTensorType>(operands[0].getType());
|
2023-06-06 07:17:01 +08:00
|
|
|
|
uint64_t inputRank = inputType.getRank();
|
|
|
|
|
|
|
|
|
|
// Use the indices of the two innermost dimensions.
|
|
|
|
|
auto rowIndex = b.create<linalg::IndexOp>(loc, inputRank - 2);
|
|
|
|
|
Value rowIndexI64 = castIndexToInt64(b, loc, rowIndex);
|
|
|
|
|
auto colIndex = b.create<linalg::IndexOp>(loc, inputRank - 1);
|
|
|
|
|
Value colIndexI64 = castIndexToInt64(b, loc, colIndex);
|
|
|
|
|
|
|
|
|
|
// columnIndex >= rowIndex + diagonal?
|
|
|
|
|
auto sum =
|
|
|
|
|
b.create<arith::AddIOp>(loc, rowIndexI64, /*diagonal=*/operands[1]);
|
|
|
|
|
auto pred = b.create<arith::CmpIOp>(loc, predicate, colIndexI64, sum);
|
|
|
|
|
|
|
|
|
|
Value scalar = payloadArgs[0];
|
|
|
|
|
Type elementType = inputType.getElementType();
|
|
|
|
|
Value zero = getConstant(b, loc, 0, elementType);
|
|
|
|
|
result = b.create<arith::SelectOp>(loc, pred, scalar, zero);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-16 04:45:10 +08:00
|
|
|
|
template <typename OpT>
|
|
|
|
|
Value createDivModePayload(OpBuilder &b, Location loc,
|
|
|
|
|
const TypeConverter *converter,
|
|
|
|
|
ValueRange payloadArgs, OpT op,
|
|
|
|
|
ArrayRef<Value> operands) {
|
|
|
|
|
static_assert(std::is_same_v<OpT, AtenDivTensorModeOp> ||
|
|
|
|
|
std::is_same_v<OpT, AtenDivScalarModeOp>,
|
|
|
|
|
"template type must be a tensor/scalar div mode");
|
|
|
|
|
typename OpT::Adaptor adaptor(operands);
|
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(op.getType()))
|
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(
|
|
|
|
|
b, loc,
|
|
|
|
|
std::is_same_v<OpT, AtenDivScalarModeOp> ? operands[1] : payloadArgs[1],
|
|
|
|
|
dtype);
|
|
|
|
|
|
|
|
|
|
Value quotient;
|
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
|
|
|
|
quotient = b.create<arith::DivFOp>(loc, lhs, rhs);
|
|
|
|
|
} else if (dtype.isUnsignedInteger()) {
|
|
|
|
|
quotient = b.create<arith::DivUIOp>(loc, lhs, rhs);
|
|
|
|
|
} else {
|
|
|
|
|
assert(dtype.isInteger() &&
|
|
|
|
|
"dtype should be an integer (signless or signed)");
|
|
|
|
|
quotient = b.create<arith::DivSIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (isa<Torch::NoneType>(op.getRoundingMode().getType()))
|
|
|
|
|
return quotient;
|
|
|
|
|
|
|
|
|
|
std::string roundingMode;
|
|
|
|
|
if (!matchPattern(op.getRoundingMode(), m_TorchConstantStr(roundingMode))) {
|
|
|
|
|
op.emitError("only support constant str rounding mode");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
assert((roundingMode == "trunc" || roundingMode == "floor") &&
|
|
|
|
|
"unsupported rounding mode");
|
|
|
|
|
if (roundingMode == "trunc") {
|
|
|
|
|
// "trunc" - rounds the results of the division towards zero. Equivalent
|
|
|
|
|
// to C-style integer division.
|
|
|
|
|
if (!isa<mlir::FloatType>(dtype)) {
|
|
|
|
|
// nothing to do for integers
|
|
|
|
|
return quotient;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// float
|
|
|
|
|
Value ceil = b.create<math::CeilOp>(loc, quotient);
|
|
|
|
|
Value floor = b.create<math::FloorOp>(loc, quotient);
|
|
|
|
|
Value cstZero = b.create<arith::ConstantOp>(loc, b.getZeroAttr(dtype));
|
|
|
|
|
Value pred = b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULT,
|
|
|
|
|
quotient, cstZero);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, pred, ceil, floor);
|
|
|
|
|
}
|
|
|
|
|
if (roundingMode == "floor") {
|
|
|
|
|
// "floor" - rounds the results of the division down. Equivalent to
|
|
|
|
|
// floor division in Python (the // operator)
|
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
|
|
|
|
return b.create<math::FloorOp>(loc, quotient);
|
|
|
|
|
if (!dtype.isUnsignedInteger()) {
|
|
|
|
|
Type defaultIntToFloatType = b.getF64Type();
|
|
|
|
|
lhs = convertScalarToDtype(b, loc, lhs, defaultIntToFloatType);
|
|
|
|
|
rhs = convertScalarToDtype(b, loc, rhs, defaultIntToFloatType);
|
|
|
|
|
quotient = b.create<arith::DivFOp>(loc, lhs, rhs);
|
|
|
|
|
Value floor = b.create<math::FloorOp>(loc, quotient);
|
|
|
|
|
Value convert = convertScalarToDtype(b, loc, floor, dtype);
|
|
|
|
|
return convert;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return quotient;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
static Value createLinalgPayloadCalculationForElementwiseOp(
|
2023-08-16 00:53:28 +08:00
|
|
|
|
OpBuilder &b, Location loc, const TypeConverter *converter,
|
2022-03-11 01:54:13 +08:00
|
|
|
|
ValueRange payloadArgs, Operation *op, ArrayRef<Value> operands) {
|
|
|
|
|
if (isa<AtenFloorOp>(op))
|
|
|
|
|
return b.create<math::FloorOp>(loc, payloadArgs[0]);
|
|
|
|
|
if (isa<AtenCeilOp>(op))
|
|
|
|
|
return b.create<math::CeilOp>(loc, payloadArgs[0]);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
if (isa<AtenExpOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::ExpOp>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
2022-07-27 10:36:52 +08:00
|
|
|
|
if (isa<AtenExpm1Op>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::ExpM1Op>(b, converter, payloadArgs[0], op);
|
2022-07-27 10:36:52 +08:00
|
|
|
|
}
|
2022-03-24 04:35:43 +08:00
|
|
|
|
if (isa<AtenLogOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::LogOp>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenLog2Op>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::Log2Op>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
2023-09-29 01:17:03 +08:00
|
|
|
|
if (isa<AtenLog10Op>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::Log10Op>(b, converter, payloadArgs[0], op);
|
2023-09-29 01:17:03 +08:00
|
|
|
|
}
|
2022-07-18 03:00:29 +08:00
|
|
|
|
if (isa<AtenLog1pOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::Log1pOp>(b, converter, payloadArgs[0], op);
|
2022-07-18 03:00:29 +08:00
|
|
|
|
}
|
2022-03-24 04:35:43 +08:00
|
|
|
|
if (isa<AtenErfOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::ErfOp>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenSqrtOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::SqrtOp>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenRsqrtOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::RsqrtOp>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
2022-04-16 02:11:22 +08:00
|
|
|
|
if (isa<AtenNegOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<arith::NegFOp>(b, converter, payloadArgs[0], op);
|
2022-04-16 02:11:22 +08:00
|
|
|
|
}
|
2022-03-24 04:35:43 +08:00
|
|
|
|
if (isa<AtenSinOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::SinOp>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
2024-02-14 14:28:09 +08:00
|
|
|
|
if (isa<AtenSinhOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::SinhOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenAsinOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::AsinOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenAsinhOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::AsinhOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
2022-03-24 04:35:43 +08:00
|
|
|
|
if (isa<AtenCosOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::CosOp>(b, converter, payloadArgs[0], op);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
}
|
2024-02-14 14:28:09 +08:00
|
|
|
|
if (isa<AtenCoshOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::CoshOp>(b, converter, payloadArgs[0], op);
|
2023-04-28 15:04:58 +08:00
|
|
|
|
}
|
2023-12-06 18:26:13 +08:00
|
|
|
|
if (isa<AtenAcosOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::AcosOp>(b, converter, payloadArgs[0], op);
|
2023-12-06 18:26:13 +08:00
|
|
|
|
}
|
2024-02-14 14:28:09 +08:00
|
|
|
|
if (isa<AtenAcoshOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::AcoshOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenTanOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::TanOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenTanhOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::TanhOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenAtanOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::AtanOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
|
|
|
|
if (isa<AtenAtanhOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
return createFpOpWithDtype<math::AtanhOp>(b, converter, payloadArgs[0], op);
|
2024-02-14 14:28:09 +08:00
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto clone = dyn_cast<AtenCloneOp>(op)) {
|
|
|
|
|
int64_t memoryFormat;
|
2022-12-08 04:20:41 +08:00
|
|
|
|
if (!clone.getMemoryFormat().getType().isa<Torch::NoneType>() &&
|
|
|
|
|
(!matchPattern(clone.getMemoryFormat(),
|
2022-03-11 01:54:13 +08:00
|
|
|
|
m_TorchConstantInt(&memoryFormat)) ||
|
2023-02-01 19:13:59 +08:00
|
|
|
|
(memoryFormat != torch_upstream::MemoryFormat::Contiguous &&
|
|
|
|
|
memoryFormat != torch_upstream::MemoryFormat::ChannelsLast))) {
|
|
|
|
|
clone.emitError("unimplemented: only contiguous and channels last memory "
|
|
|
|
|
"format is supported");
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
return payloadArgs[0];
|
|
|
|
|
}
|
|
|
|
|
if (auto bitwiseAndTensor = dyn_cast<AtenBitwiseAndTensorOp>(op)) {
|
|
|
|
|
if (bitwiseAndTensor.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
bitwiseAndTensor.emitError(
|
|
|
|
|
"Bitwise_And does not support floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Type dtype = converter->convertType(bitwiseAndTensor.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
return b.create<arith::AndIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
2023-09-28 20:53:02 +08:00
|
|
|
|
if (auto bitwiseAndScalar = dyn_cast<AtenBitwiseAndScalarOp>(op)) {
|
|
|
|
|
Type dtype = converter->convertType(bitwiseAndScalar.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::IntegerType>(dtype)) {
|
2023-09-28 20:53:02 +08:00
|
|
|
|
bitwiseAndScalar.emitError(
|
|
|
|
|
"bitwise_and.Scalar does not support non-integer input dtype.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2023-10-03 19:59:56 +08:00
|
|
|
|
Type resultElementType =
|
2024-04-28 05:00:56 +08:00
|
|
|
|
cast<BaseTensorType>(bitwiseAndScalar.getType()).getDtype();
|
2023-10-03 19:59:56 +08:00
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
|
|
|
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
2023-09-28 20:53:02 +08:00
|
|
|
|
return b.create<arith::AndIOp>(loc, self, other);
|
|
|
|
|
}
|
2022-10-06 21:11:52 +08:00
|
|
|
|
if (auto bitwiseOrTensor = dyn_cast<AtenBitwiseOrTensorOp>(op)) {
|
|
|
|
|
if (bitwiseOrTensor.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
bitwiseOrTensor.emitError(
|
|
|
|
|
"Bitwise_Or does not support floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Type dtype = converter->convertType(bitwiseOrTensor.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
return b.create<arith::OrIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
2023-01-12 06:40:03 +08:00
|
|
|
|
if (auto bitwiseXorTensor = dyn_cast<AtenBitwiseXorTensorOp>(op)) {
|
|
|
|
|
if (bitwiseXorTensor.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
bitwiseXorTensor.emitError(
|
|
|
|
|
"Bitwise_Xor does not support floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Type dtype = converter->convertType(bitwiseXorTensor.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
return b.create<arith::XOrIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
2023-09-28 20:53:02 +08:00
|
|
|
|
if (auto bitwiseRightShiftTensor =
|
|
|
|
|
dyn_cast<AtenBitwiseRightShiftTensorOp>(op)) {
|
|
|
|
|
Type dtype = converter->convertType(bitwiseRightShiftTensor.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::IntegerType>(dtype)) {
|
2023-09-28 20:53:02 +08:00
|
|
|
|
bitwiseRightShiftTensor.emitError(
|
|
|
|
|
"Bitwise_Right_Shift op does not support non-integer input dtype.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
return b.create<arith::ShRSIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
2023-11-27 21:44:16 +08:00
|
|
|
|
if (auto bitwiseLeftShiftTensor =
|
|
|
|
|
dyn_cast<AtenBitwiseLeftShiftTensorOp>(op)) {
|
|
|
|
|
Type dtype = converter->convertType(bitwiseLeftShiftTensor.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::IntegerType>(dtype)) {
|
2023-11-27 21:44:16 +08:00
|
|
|
|
bitwiseLeftShiftTensor.emitError(
|
|
|
|
|
"Bitwise_Left_Shift op does not support non-integer input dtype.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
return b.create<arith::ShLIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
2023-01-04 10:11:25 +08:00
|
|
|
|
if (isa<AtenLogicalOrOp, AtenLogicalAndOp, AtenLogicalXorOp>(op)) {
|
2022-06-04 07:21:03 +08:00
|
|
|
|
MLIRContext *context = op->getContext();
|
|
|
|
|
Type floatDtype = mlir::FloatType::getF64(context);
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], floatDtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], floatDtype);
|
|
|
|
|
Value zero =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getFloatAttr(floatDtype, 0));
|
|
|
|
|
Value lhsTest = createNotEqual(b, loc, floatDtype, lhs, zero);
|
|
|
|
|
Value rhsTest = createNotEqual(b, loc, floatDtype, rhs, zero);
|
2023-01-04 10:11:25 +08:00
|
|
|
|
if (isa<AtenLogicalOrOp>(op)) {
|
|
|
|
|
return b.create<arith::OrIOp>(loc, lhsTest, rhsTest);
|
|
|
|
|
}
|
|
|
|
|
if (isa<AtenLogicalAndOp>(op)) {
|
|
|
|
|
return b.create<arith::AndIOp>(loc, lhsTest, rhsTest);
|
|
|
|
|
}
|
|
|
|
|
if (isa<AtenLogicalXorOp>(op)) {
|
|
|
|
|
return b.create<arith::XOrIOp>(loc, lhsTest, rhsTest);
|
|
|
|
|
}
|
|
|
|
|
llvm_unreachable("Unknown op type");
|
|
|
|
|
}
|
|
|
|
|
if (isa<AtenLogicalNotOp>(op)) {
|
|
|
|
|
MLIRContext *context = op->getContext();
|
|
|
|
|
Type floatDtype = mlir::FloatType::getF64(context);
|
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], floatDtype);
|
|
|
|
|
Value zero =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getFloatAttr(floatDtype, 0));
|
|
|
|
|
return createEqual(b, loc, floatDtype, self, zero);
|
2022-06-04 07:21:03 +08:00
|
|
|
|
}
|
2024-02-09 06:53:40 +08:00
|
|
|
|
if (isa<AtenAbsOp>(op)) {
|
|
|
|
|
if (payloadArgs[0].getType().isa<IntegerType>())
|
|
|
|
|
return b.create<math::AbsIOp>(loc, payloadArgs[0]);
|
2022-08-16 14:54:45 +08:00
|
|
|
|
return b.create<math::AbsFOp>(loc, payloadArgs[0]);
|
2024-02-09 06:53:40 +08:00
|
|
|
|
}
|
2024-01-30 01:59:33 +08:00
|
|
|
|
if (isa<AtenIsinfOp>(op)) {
|
2023-12-29 09:20:32 +08:00
|
|
|
|
Value abs = b.create<math::AbsFOp>(loc, payloadArgs[0]);
|
|
|
|
|
Value infinity = b.create<arith::ConstantOp>(
|
2024-01-30 01:59:33 +08:00
|
|
|
|
loc,
|
|
|
|
|
b.getFloatAttr(abs.getType(), std::numeric_limits<double>::infinity()));
|
2023-12-29 09:20:32 +08:00
|
|
|
|
return createEqual(b, loc, abs.getType(), abs, infinity);
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (isa<AtenSigmoidOp>(op)) {
|
2024-02-17 05:35:25 +08:00
|
|
|
|
Type inTTy = cast<ValueTensorType>(op->getOperand(0).getType()).getDtype();
|
|
|
|
|
Type outTTy = cast<ValueTensorType>(op->getResult(0).getType()).getDtype();
|
|
|
|
|
Type outTy = cast<RankedTensorType>(
|
|
|
|
|
converter->convertType(op->getResult(0).getType()))
|
|
|
|
|
.getElementType();
|
|
|
|
|
Type computeTy = outTy;
|
|
|
|
|
if (isa<IntegerType>(computeTy))
|
|
|
|
|
computeTy = b.getF32Type();
|
|
|
|
|
|
|
|
|
|
Value arg = payloadArgs[0];
|
|
|
|
|
arg = convertScalarToDtype(b, loc, payloadArgs[0], computeTy, inTTy);
|
|
|
|
|
auto negate = b.create<arith::NegFOp>(loc, arg);
|
2022-03-24 04:35:43 +08:00
|
|
|
|
auto one =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(negate.getType(), 1));
|
2022-03-11 01:54:13 +08:00
|
|
|
|
auto exp = b.create<math::ExpOp>(loc, negate);
|
|
|
|
|
auto added = b.create<arith::AddFOp>(loc, exp, one);
|
2024-02-17 05:35:25 +08:00
|
|
|
|
auto div = b.create<arith::DivFOp>(loc, one, added);
|
|
|
|
|
return convertScalarToDtype(b, loc, div, outTy, std::nullopt, outTTy);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
|
|
|
|
if (auto relu = dyn_cast<AtenReluOp>(op)) {
|
2024-04-24 02:01:36 +08:00
|
|
|
|
Value zeroPoint = getZeroPoint(relu.getSelf());
|
|
|
|
|
Value arg = payloadArgs[0];
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto intType = dyn_cast<mlir::IntegerType>(arg.getType());
|
2024-04-24 02:01:36 +08:00
|
|
|
|
if (zeroPoint && !intType) {
|
|
|
|
|
relu.emitError("unimplemented: non-integer quantized Relu.");
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2024-04-24 02:01:36 +08:00
|
|
|
|
auto reluTorchType = cast<ValueTensorType>(relu.getType());
|
|
|
|
|
bool isUnsigned =
|
|
|
|
|
torch_to_linalg::isUnsignedTorchType(reluTorchType.getDtype());
|
|
|
|
|
if (zeroPoint) {
|
|
|
|
|
int64_t zeroPointInt;
|
|
|
|
|
int64_t width = intType.getWidth();
|
|
|
|
|
assert(width < 64);
|
|
|
|
|
int64_t minForIntType = isUnsigned ? 0 : -(1 << (width - 1));
|
|
|
|
|
int64_t maxForIntType =
|
|
|
|
|
isUnsigned ? (1 << (width + 1)) - 1 : (1 << (width - 1)) - 1;
|
|
|
|
|
// check for constant zero point edge-cases:
|
|
|
|
|
if (matchPattern(zeroPoint, m_TorchConstantInt(&zeroPointInt))) {
|
|
|
|
|
if (zeroPointInt > maxForIntType) {
|
|
|
|
|
// TODO: figure out how to handle this case:
|
|
|
|
|
// current impl. quantizes output like input.
|
|
|
|
|
// If zero point > maxForIntType, ordinary relu should return 0.
|
|
|
|
|
// However, 0 isn't represented in such a quantization scheme.
|
|
|
|
|
relu.emitError(
|
|
|
|
|
"unimplemented: quantized relu for zero-point > max qint");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (zeroPointInt < minForIntType)
|
|
|
|
|
return arg;
|
|
|
|
|
}
|
|
|
|
|
zeroPoint = converter->materializeTargetConversion(
|
|
|
|
|
b, loc, converter->convertType(zeroPoint.getType()), zeroPoint);
|
|
|
|
|
auto minForIntTypeValue = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, b.getIntegerAttr(zeroPoint.getType(), minForIntType));
|
|
|
|
|
auto maxForIntTypeValue = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, b.getIntegerAttr(zeroPoint.getType(), maxForIntType));
|
|
|
|
|
auto zpLtMax = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt,
|
|
|
|
|
zeroPoint, maxForIntTypeValue);
|
|
|
|
|
b.create<cf::AssertOp>(
|
|
|
|
|
loc, zpLtMax,
|
|
|
|
|
b.getStringAttr("Invalid Quantization: quantized relu with "
|
|
|
|
|
"zero-point > max qint"));
|
|
|
|
|
auto zpLtMin = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt,
|
|
|
|
|
zeroPoint, minForIntTypeValue);
|
|
|
|
|
zeroPoint = b.create<arith::SelectOp>(loc, zpLtMin, minForIntTypeValue,
|
|
|
|
|
zeroPoint);
|
|
|
|
|
zeroPoint = b.create<arith::TruncIOp>(loc, arg.getType(), zeroPoint);
|
|
|
|
|
} else {
|
|
|
|
|
zeroPoint =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getZeroAttr(arg.getType()));
|
|
|
|
|
}
|
|
|
|
|
Value cmp;
|
|
|
|
|
if (intType) {
|
|
|
|
|
auto pred =
|
|
|
|
|
isUnsigned ? arith::CmpIPredicate::ugt : arith::CmpIPredicate::sgt;
|
|
|
|
|
cmp = b.create<arith::CmpIOp>(loc, pred, arg, zeroPoint);
|
|
|
|
|
} else {
|
|
|
|
|
cmp = b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGT, arg,
|
|
|
|
|
zeroPoint);
|
|
|
|
|
}
|
|
|
|
|
return b.create<arith::SelectOp>(loc, cmp, arg, zeroPoint);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2022-10-11 17:52:01 +08:00
|
|
|
|
if (auto round = dyn_cast<AtenRoundOp>(op)) {
|
|
|
|
|
if (!round.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
round.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2023-01-25 15:51:29 +08:00
|
|
|
|
return b.create<math::RoundEvenOp>(loc, payloadArgs[0]);
|
2022-10-11 17:52:01 +08:00
|
|
|
|
}
|
2022-12-28 11:21:33 +08:00
|
|
|
|
if (auto prelu = dyn_cast<AtenPreluOp>(op)) {
|
|
|
|
|
if (!prelu.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
prelu.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Type elementType = payloadArgs[0].getType();
|
|
|
|
|
Value constZero =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getZeroAttr(elementType));
|
|
|
|
|
Value pred = b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGT,
|
|
|
|
|
payloadArgs[0], constZero);
|
|
|
|
|
Value positivePart =
|
|
|
|
|
b.create<arith::SelectOp>(loc, pred, payloadArgs[0], constZero);
|
|
|
|
|
Value negativePart =
|
|
|
|
|
b.create<arith::SelectOp>(loc, pred, constZero, payloadArgs[0]);
|
|
|
|
|
Value scale = convertScalarToDtype(b, loc, payloadArgs[1], elementType);
|
|
|
|
|
Value scaledNegativePart =
|
|
|
|
|
b.create<arith::MulFOp>(loc, negativePart, scale);
|
|
|
|
|
return b.create<arith::AddFOp>(loc, positivePart, scaledNegativePart);
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto gelu = dyn_cast<AtenGeluOp>(op)) {
|
|
|
|
|
if (!gelu.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
gelu.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
// TODO: Take approximation into account.
|
|
|
|
|
std::string approximate;
|
2024-02-27 21:56:01 +08:00
|
|
|
|
if (!matchPattern(gelu.getApproximate(), m_TorchConstantStr(approximate))) {
|
|
|
|
|
gelu.emitError(
|
|
|
|
|
"unimplemented: expected approximate to be a constant str");
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return nullptr;
|
2024-02-27 21:56:01 +08:00
|
|
|
|
}
|
|
|
|
|
if (approximate == "none") {
|
|
|
|
|
Value multiplier = buildUnitNormalCdf(b, loc, payloadArgs[0]);
|
|
|
|
|
return b.create<arith::MulFOp>(loc, payloadArgs[0], multiplier);
|
|
|
|
|
}
|
|
|
|
|
if (approximate == "tanh") {
|
|
|
|
|
// GELU(x)=0.5∗x∗(1+Tanh((2/π)^1/2 * (x+0.044715∗x^3)))
|
|
|
|
|
// Ref: https://pytorch.org/docs/stable/generated/torch.nn.GELU.html
|
|
|
|
|
Value cstThree = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, IntegerAttr::get(IntegerType::get(op->getContext(), 64), 3));
|
|
|
|
|
Value xCube = b.create<math::FPowIOp>(loc, payloadArgs[0], cstThree);
|
|
|
|
|
Type elementType = payloadArgs[0].getType();
|
|
|
|
|
Value cstAlpha = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, FloatAttr::get(elementType, 0.044715));
|
|
|
|
|
Value xCubeMulAlpha = b.create<arith::MulFOp>(loc, xCube, cstAlpha);
|
|
|
|
|
Value xPlusXCubeMulAlpha =
|
|
|
|
|
b.create<arith::AddFOp>(loc, payloadArgs[0], xCubeMulAlpha);
|
|
|
|
|
Value cstBeta = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, FloatAttr::get(elementType, 0.7977240352174656));
|
|
|
|
|
Value betaMulX =
|
|
|
|
|
b.create<arith::MulFOp>(loc, cstBeta, xPlusXCubeMulAlpha);
|
|
|
|
|
Value tanh = b.create<math::TanhOp>(loc, betaMulX);
|
|
|
|
|
Value cstOne =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 1.0));
|
|
|
|
|
Value onePlusTanh = b.create<arith::AddFOp>(loc, cstOne, tanh);
|
|
|
|
|
Value cstHalf =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 0.5));
|
|
|
|
|
Value multiplier = b.create<arith::MulFOp>(loc, cstHalf, onePlusTanh);
|
|
|
|
|
return b.create<arith::MulFOp>(loc, payloadArgs[0], multiplier);
|
|
|
|
|
}
|
|
|
|
|
gelu.emitError("unimplemented: approximate value should be none or tanh");
|
|
|
|
|
return nullptr;
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
|
|
|
|
if (auto geluBackward = dyn_cast<AtenGeluBackwardOp>(op)) {
|
|
|
|
|
if (!geluBackward.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
geluBackward.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
// TODO: Take approximation into account.
|
|
|
|
|
std::string approximate;
|
2022-12-08 04:20:41 +08:00
|
|
|
|
if (!matchPattern(geluBackward.getApproximate(),
|
2022-03-11 01:54:13 +08:00
|
|
|
|
m_TorchConstantStr(approximate)) ||
|
|
|
|
|
approximate != "none")
|
|
|
|
|
return nullptr;
|
|
|
|
|
Type elementType = payloadArgs[1].getType();
|
|
|
|
|
Value cstAlpha0 = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, FloatAttr::get(elementType, 1.12837916709551257390));
|
|
|
|
|
Value cstAlpha1 = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, FloatAttr::get(elementType, 0.70710678118654752440));
|
|
|
|
|
Value oneHalf =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 0.5));
|
|
|
|
|
Value kAlpha = b.create<arith::MulFOp>(loc, cstAlpha0, cstAlpha1);
|
|
|
|
|
Value kAlphaHalf = b.create<arith::MulFOp>(loc, kAlpha, oneHalf);
|
|
|
|
|
Value negOneHalf =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, -0.5));
|
|
|
|
|
Value inputSquared =
|
|
|
|
|
b.create<arith::MulFOp>(loc, payloadArgs[1], payloadArgs[1]);
|
|
|
|
|
Value negHalfInputSquared =
|
|
|
|
|
b.create<arith::MulFOp>(loc, inputSquared, negOneHalf);
|
|
|
|
|
Value dinput = b.create<math::ExpOp>(loc, negHalfInputSquared);
|
|
|
|
|
Value cdf = buildUnitNormalCdf(b, loc, payloadArgs[1]);
|
|
|
|
|
Value dinputInput = b.create<arith::MulFOp>(loc, dinput, payloadArgs[1]);
|
|
|
|
|
Value dinputInputAlpha =
|
|
|
|
|
b.create<arith::MulFOp>(loc, dinputInput, kAlphaHalf);
|
|
|
|
|
Value cdfExt = b.create<arith::AddFOp>(loc, dinputInputAlpha, cdf);
|
|
|
|
|
return b.create<arith::MulFOp>(loc, payloadArgs[0], cdfExt);
|
|
|
|
|
}
|
2023-03-07 02:16:37 +08:00
|
|
|
|
if (auto hardtanhBackward = dyn_cast<AtenHardtanhBackwardOp>(op)) {
|
|
|
|
|
AtenHardtanhBackwardOp::Adaptor adaptor(operands);
|
|
|
|
|
if (!hardtanhBackward.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
hardtanhBackward.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value gradOutput = payloadArgs[0];
|
|
|
|
|
Type elementType = gradOutput.getType();
|
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[1], elementType);
|
|
|
|
|
Value constantZero =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 0.0));
|
|
|
|
|
Value min = convertScalarToDtype(b, loc, adaptor.getMinVal(), elementType);
|
|
|
|
|
Value max = convertScalarToDtype(b, loc, adaptor.getMaxVal(), elementType);
|
|
|
|
|
Value lesser =
|
|
|
|
|
b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULT, self, min);
|
|
|
|
|
Value greater =
|
|
|
|
|
b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGT, self, max);
|
|
|
|
|
Value cmp = b.create<arith::OrIOp>(loc, lesser, greater);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, cmp, constantZero, gradOutput);
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto add = dyn_cast<AtenAddTensorOp>(op)) {
|
|
|
|
|
AtenAddTensorOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type resultElementType = cast<BaseTensorType>(add.getType()).getDtype();
|
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(add.getType()))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
.getElementType();
|
2024-02-16 02:17:13 +08:00
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
2024-02-13 01:19:39 +08:00
|
|
|
|
Value alpha = convertScalarToDtype(b, loc, adaptor.getAlpha(), dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
2024-02-16 02:17:13 +08:00
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value scaled = b.create<arith::MulFOp>(loc, rhs, alpha);
|
|
|
|
|
return b.create<arith::AddFOp>(loc, lhs, scaled);
|
|
|
|
|
} else {
|
|
|
|
|
Value scaled = b.create<arith::MulIOp>(loc, rhs, alpha);
|
|
|
|
|
return b.create<arith::AddIOp>(loc, lhs, scaled);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (auto sub = dyn_cast<AtenSubTensorOp>(op)) {
|
|
|
|
|
AtenSubTensorOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(sub.getType()))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
.getElementType();
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type resultElementType = cast<BaseTensorType>(sub.getType()).getDtype();
|
2023-10-03 19:59:56 +08:00
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
|
|
|
|
Value alpha = convertScalarToDtype(b, loc, adaptor.getAlpha(), dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
2024-03-01 00:18:46 +08:00
|
|
|
|
/*dstOriginalDtype=*/resultElementType,
|
|
|
|
|
/*originalScalar=*/sub.getAlpha());
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value scaled = b.create<arith::MulFOp>(loc, rhs, alpha);
|
|
|
|
|
return b.create<arith::SubFOp>(loc, lhs, scaled);
|
|
|
|
|
} else {
|
|
|
|
|
Value scaled = b.create<arith::MulIOp>(loc, rhs, alpha);
|
|
|
|
|
return b.create<arith::SubIOp>(loc, lhs, scaled);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (auto subScalar = dyn_cast<AtenSubScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(subScalar.getType()))
|
|
|
|
|
.getElementType();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
|
2024-02-13 01:19:39 +08:00
|
|
|
|
Value alpha = convertScalarToDtype(
|
|
|
|
|
b, loc, operands[2], dtype, /*srcOriginalDtype=*/operands[2].getType(),
|
|
|
|
|
/*dstOriginalDtype=*/dtype);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value mult = b.create<arith::MulFOp>(loc, other, alpha);
|
|
|
|
|
return b.create<arith::SubFOp>(loc, self, mult);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value mult = b.create<arith::MulIOp>(loc, other, alpha);
|
|
|
|
|
return b.create<arith::SubIOp>(loc, self, mult);
|
|
|
|
|
}
|
|
|
|
|
subScalar.emitError("unimplemented: dtype other than float and integer "
|
|
|
|
|
"types are not supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (auto addScalar = dyn_cast<AtenAddScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(addScalar.getType()))
|
|
|
|
|
.getElementType();
|
2023-10-03 19:59:56 +08:00
|
|
|
|
Type resultElementType =
|
2024-04-28 05:00:56 +08:00
|
|
|
|
cast<BaseTensorType>(addScalar.getType()).getDtype();
|
2023-10-03 19:59:56 +08:00
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
|
|
|
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
|
|
|
|
Value alpha = convertScalarToDtype(b, loc, operands[2], dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value mult = b.create<arith::MulFOp>(loc, other, alpha);
|
|
|
|
|
return b.create<arith::AddFOp>(loc, self, mult);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value mult = b.create<arith::MulIOp>(loc, other, alpha);
|
|
|
|
|
return b.create<arith::AddIOp>(loc, self, mult);
|
|
|
|
|
}
|
|
|
|
|
addScalar.emitError("unimplemented: dtype other than float and integer "
|
|
|
|
|
"types are not supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (auto mul = dyn_cast<AtenMulTensorOp>(op)) {
|
|
|
|
|
AtenMulTensorOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(mul.getType()))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::MulFOp>(loc, lhs, rhs);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::ComplexType>(dtype)) {
|
2023-09-08 04:29:15 +08:00
|
|
|
|
return b.create<complex::MulOp>(loc, lhs, rhs);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
} else {
|
|
|
|
|
return b.create<arith::MulIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-08-02 23:39:41 +08:00
|
|
|
|
if (auto atan2 = dyn_cast<AtenAtan2Op>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(atan2.getType()))
|
2022-08-02 23:39:41 +08:00
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType>(dtype)) {
|
2022-08-02 23:39:41 +08:00
|
|
|
|
atan2.emitError("Atan2 requires floating point result type");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
return b.create<math::Atan2Op>(loc, lhs, rhs);
|
|
|
|
|
}
|
2023-02-03 10:20:47 +08:00
|
|
|
|
if (auto ltTensor = dyn_cast<AtenLtTensorOp>(op)) {
|
|
|
|
|
return createCompareTensorOp(b, loc, ltTensor, payloadArgs[0],
|
|
|
|
|
payloadArgs[1]);
|
|
|
|
|
}
|
|
|
|
|
if (auto leTensor = dyn_cast<AtenLeTensorOp>(op)) {
|
|
|
|
|
return createCompareTensorOp(b, loc, leTensor, payloadArgs[0],
|
|
|
|
|
payloadArgs[1]);
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto gtTensor = dyn_cast<AtenGtTensorOp>(op)) {
|
2023-02-03 10:20:47 +08:00
|
|
|
|
return createCompareTensorOp(b, loc, gtTensor, payloadArgs[0],
|
|
|
|
|
payloadArgs[1]);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2023-02-03 10:20:47 +08:00
|
|
|
|
if (auto geTensor = dyn_cast<AtenGeTensorOp>(op)) {
|
|
|
|
|
return createCompareTensorOp(b, loc, geTensor, payloadArgs[0],
|
|
|
|
|
payloadArgs[1]);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2023-02-03 10:20:47 +08:00
|
|
|
|
if (auto eqTensor = dyn_cast<AtenEqTensorOp>(op)) {
|
|
|
|
|
return createCompareTensorOp(b, loc, eqTensor, payloadArgs[0],
|
|
|
|
|
payloadArgs[1]);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2023-06-07 10:06:27 +08:00
|
|
|
|
if (auto neTensor = dyn_cast<AtenNeTensorOp>(op)) {
|
|
|
|
|
return createCompareTensorOp(b, loc, neTensor, payloadArgs[0],
|
|
|
|
|
payloadArgs[1]);
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto div = dyn_cast<AtenDivTensorOp>(op)) {
|
|
|
|
|
AtenDivTensorOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(div.getType()))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2024-02-27 13:32:05 +08:00
|
|
|
|
return b.create<arith::DivFOp>(loc, lhs, rhs);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
else if (isa<mlir::IntegerType>(dtype)) {
|
2024-02-27 13:32:05 +08:00
|
|
|
|
if (dtype.isUnsignedInteger())
|
|
|
|
|
return b.create<arith::DivUIOp>(loc, lhs, rhs);
|
|
|
|
|
return b.create<arith::DivSIOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
div.emitError("unimplemented: non-floating point and non-integer dtype");
|
|
|
|
|
return nullptr;
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2024-04-16 04:45:10 +08:00
|
|
|
|
if (auto divScalarMode = dyn_cast<AtenDivScalarModeOp>(op)) {
|
|
|
|
|
return createDivModePayload(b, loc, converter, payloadArgs, divScalarMode,
|
|
|
|
|
operands);
|
|
|
|
|
}
|
2022-06-03 15:03:34 +08:00
|
|
|
|
if (auto divTensorMode = dyn_cast<AtenDivTensorModeOp>(op)) {
|
2024-04-16 04:45:10 +08:00
|
|
|
|
return createDivModePayload(b, loc, converter, payloadArgs, divTensorMode,
|
|
|
|
|
operands);
|
2022-06-03 15:03:34 +08:00
|
|
|
|
}
|
2023-08-31 20:20:22 +08:00
|
|
|
|
if (auto pow = dyn_cast<AtenPowScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<ValueTensorType>(pow.getType()).getDtype();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType>(dtype)) {
|
2023-08-31 20:20:22 +08:00
|
|
|
|
pow.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value selfPromoted = convertScalarToDtype(b, loc, operands[0], dtype);
|
|
|
|
|
Value expPromoted = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
return b.create<math::PowFOp>(loc, selfPromoted, expPromoted);
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto pow = dyn_cast<AtenPowTensorScalarOp>(op)) {
|
|
|
|
|
if (!pow.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
pow.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<ValueTensorType>(pow.getSelf().getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value expPromoted = convertScalarToDtype(b, loc, operands[1], dtype);
|
|
|
|
|
return b.create<math::PowFOp>(loc, payloadArgs[0], expPromoted);
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-06 20:35:38 +08:00
|
|
|
|
if (auto pow = dyn_cast<AtenPowTensorTensorOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(pow.getType()))
|
2022-09-06 20:35:38 +08:00
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType>(dtype)) {
|
2022-09-06 20:35:38 +08:00
|
|
|
|
pow.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
return b.create<math::PowFOp>(loc, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-20 00:22:57 +08:00
|
|
|
|
if (auto imag = dyn_cast<AtenImagOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(imag.getType()))
|
2023-04-20 00:22:57 +08:00
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType>(dtype)) {
|
2023-04-20 00:22:57 +08:00
|
|
|
|
imag.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value imagVal = b.create<complex::ImOp>(loc, payloadArgs[0]);
|
|
|
|
|
return imagVal;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto real = dyn_cast<AtenRealOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(real.getType()))
|
2023-04-20 00:22:57 +08:00
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType>(dtype)) {
|
2023-04-20 00:22:57 +08:00
|
|
|
|
real.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
Value realVal = b.create<complex::ReOp>(loc, payloadArgs[0]);
|
|
|
|
|
return realVal;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto gtScalar = dyn_cast<AtenGtScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(gtScalar.getSelf().getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
// TODO: `gtTensor` and `gtScalar` share similar code and can be called from
|
|
|
|
|
// one static function.
|
|
|
|
|
Value otherPromoted =
|
|
|
|
|
convertScalarToDtype(b, loc, operands[1], payloadArgs[0].getType());
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGT,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (IntegerType intType = dyn_cast<mlir::IntegerType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (!operands[1].getType().isa<mlir::IntegerType>()) {
|
|
|
|
|
// TODO: Promote tensor args from integer to float.
|
|
|
|
|
gtScalar.emitError(
|
|
|
|
|
"unimplemented: type promotion from tensor to scalar.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (intType.isUnsigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ugt,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
if (intType.isSigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sgt,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
}
|
|
|
|
|
gtScalar.emitError("unimplemented: dtype isn't supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto geScalar = dyn_cast<AtenGeScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(geScalar.getSelf().getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
// TODO: The `AtenGeScalarOp` and `AtenGtScalarOp` share a lot of code that
|
|
|
|
|
// can be refactored.
|
|
|
|
|
Value otherPromoted =
|
|
|
|
|
convertScalarToDtype(b, loc, operands[1], payloadArgs[0].getType());
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGE,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (IntegerType intType = dyn_cast<mlir::IntegerType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (!operands[1].getType().isa<mlir::IntegerType>()) {
|
|
|
|
|
// TODO: Promote tensor args from integer to float.
|
|
|
|
|
geScalar.emitError(
|
|
|
|
|
"unimplemented: type promotion from tensor to scalar.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (intType.isUnsigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::uge,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
if (intType.isSigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sge,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
}
|
|
|
|
|
geScalar.emitError("unimplemented: dtype isn't supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto eqScalar = dyn_cast<AtenEqScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(eqScalar.getSelf().getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value otherPromoted =
|
|
|
|
|
convertScalarToDtype(b, loc, operands[1], payloadArgs[0].getType());
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::IntegerType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (!operands[1].getType().isa<mlir::IntegerType>()) {
|
|
|
|
|
// TODO: Promote tensor operand from integer to float.
|
|
|
|
|
eqScalar.emitError(
|
|
|
|
|
"unimplemented: type promotion from tensor to scalar");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-04-04 00:19:01 +08:00
|
|
|
|
return createEqual(b, loc, dtype, payloadArgs[0], otherPromoted);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto neScalar = dyn_cast<AtenNeScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(neScalar.getSelf().getType()).getDtype();
|
2022-04-04 00:19:01 +08:00
|
|
|
|
Value otherPromoted =
|
|
|
|
|
convertScalarToDtype(b, loc, operands[1], payloadArgs[0].getType());
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::IntegerType>(dtype)) {
|
2022-04-04 00:19:01 +08:00
|
|
|
|
if (!operands[1].getType().isa<mlir::IntegerType>()) {
|
|
|
|
|
// TODO: Promote tensor operand from integer to float.
|
|
|
|
|
neScalar.emitError(
|
|
|
|
|
"unimplemented: type promotion from tensor to scalar");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return createNotEqual(b, loc, dtype, payloadArgs[0], otherPromoted);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto ltScalar = dyn_cast<AtenLtScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(ltScalar.getSelf().getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value otherPromoted =
|
|
|
|
|
convertScalarToDtype(b, loc, operands[1], payloadArgs[0].getType());
|
|
|
|
|
|
2022-04-04 00:19:01 +08:00
|
|
|
|
// TODO: Both tensor and scalar variants of `aten.gt` and `aten.lt` share
|
|
|
|
|
// a lot of code that can be refactored.
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULT,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (IntegerType intType = dyn_cast<mlir::IntegerType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (!operands[1].getType().isa<mlir::IntegerType>()) {
|
|
|
|
|
// TODO: Promote tensor operand from integer to float.
|
|
|
|
|
ltScalar.emitError(
|
|
|
|
|
"unimplemented: type promotion from tensor to scalar");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (intType.isUnsigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
if (intType.isSigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
}
|
|
|
|
|
ltScalar.emitError("unimplemented: dtype isn't supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto leScalar = dyn_cast<AtenLeScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(leScalar.getSelf().getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value otherPromoted =
|
|
|
|
|
convertScalarToDtype(b, loc, operands[1], payloadArgs[0].getType());
|
|
|
|
|
|
2022-04-04 00:19:01 +08:00
|
|
|
|
// TODO: The `AtenLeScalarOp` and `AtenLtScalarOp` share a lot of code
|
|
|
|
|
// that can be refactored.
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULE,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (IntegerType intType = dyn_cast<mlir::IntegerType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (!operands[1].getType().isa<mlir::IntegerType>()) {
|
|
|
|
|
// TODO: Promote tensor operand from integer to float.
|
|
|
|
|
leScalar.emitError(
|
|
|
|
|
"unimplemented: type promotion from tensor to scalar");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (intType.isUnsigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ule,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
if (intType.isSigned())
|
|
|
|
|
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sle,
|
|
|
|
|
payloadArgs[0], otherPromoted);
|
|
|
|
|
}
|
|
|
|
|
leScalar.emitError("unimplemented: dtype isn't supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto whereSelf = dyn_cast<AtenWhereSelfOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(whereSelf.getType()))
|
|
|
|
|
.getElementType();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[2], dtype);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, payloadArgs[0], lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto lerp = dyn_cast<AtenLerpTensorOp>(op)) {
|
|
|
|
|
if (!lerp.getType()
|
|
|
|
|
.cast<ValueTensorType>()
|
|
|
|
|
.getDtype()
|
|
|
|
|
.isa<mlir::FloatType>()) {
|
|
|
|
|
lerp.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
AtenLerpTensorOp::Adaptor adaptor(payloadArgs);
|
2022-12-08 04:20:41 +08:00
|
|
|
|
auto start = adaptor.getSelf();
|
|
|
|
|
auto end = adaptor.getEnd();
|
|
|
|
|
auto weight = adaptor.getWeight();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
auto delta = b.create<arith::SubFOp>(loc, end, start);
|
|
|
|
|
auto weightedDelta = b.create<arith::MulFOp>(loc, delta, weight);
|
|
|
|
|
return b.create<arith::AddFOp>(loc, start, weightedDelta);
|
|
|
|
|
}
|
|
|
|
|
if (auto minimum = dyn_cast<AtenMinimumOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(minimum.getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Type elemTy = converter->convertType(minimum.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], elemTy);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], elemTy);
|
|
|
|
|
Value pred = createLessThan(b, loc, dtype, lhs, rhs);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, pred, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
if (auto maximum = dyn_cast<AtenMaximumOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<BaseTensorType>(maximum.getType()).getDtype();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Type elemTy = converter->convertType(maximum.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], elemTy);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, payloadArgs[1], elemTy);
|
|
|
|
|
Value pred = createGreaterThan(b, loc, dtype, lhs, rhs);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, pred, lhs, rhs);
|
|
|
|
|
}
|
|
|
|
|
if (auto clamp = dyn_cast<AtenClampOp>(op)) {
|
|
|
|
|
AtenClampOp::Adaptor adaptor(operands);
|
2022-12-08 04:20:41 +08:00
|
|
|
|
auto min = adaptor.getMin();
|
|
|
|
|
auto max = adaptor.getMax();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (min.getType().isa<Torch::OptionalType>() ||
|
|
|
|
|
max.getType().isa<Torch::OptionalType>()) {
|
|
|
|
|
clamp.emitError("unimplemented: runtime optional type");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2024-01-06 07:16:49 +08:00
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(clamp.getType()))
|
2024-01-06 07:16:49 +08:00
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType, mlir::IntegerType>(dtype)) {
|
2024-01-06 07:16:49 +08:00
|
|
|
|
clamp.emitError("unimplement type for clamp");
|
|
|
|
|
return nullptr;
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2024-01-06 07:16:49 +08:00
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dstOriginalDtype = cast<BaseTensorType>(clamp.getType()).getDtype();
|
2024-01-06 07:16:49 +08:00
|
|
|
|
bool isUnsigned = isa<QUInt8Type>(dstOriginalDtype);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (auto intTy = dyn_cast<IntegerType>(dstOriginalDtype)) {
|
2024-01-06 07:16:49 +08:00
|
|
|
|
isUnsigned = intTy.isUnsigned();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
2024-01-06 07:16:49 +08:00
|
|
|
|
auto cmpSelect = [&](Value input, Value clamp, bool getMax) -> Value {
|
|
|
|
|
clamp = convertScalarToDtype(b, loc, clamp, dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/dstOriginalDtype);
|
|
|
|
|
|
|
|
|
|
Value pred;
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2024-01-06 07:16:49 +08:00
|
|
|
|
auto cmp =
|
|
|
|
|
getMax ? arith::CmpFPredicate::UGT : arith::CmpFPredicate::ULT;
|
|
|
|
|
pred = b.create<arith::CmpFOp>(loc, cmp, input, clamp);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(dtype)) {
|
2024-01-06 07:16:49 +08:00
|
|
|
|
auto cmp =
|
|
|
|
|
isUnsigned ? arith::CmpIPredicate::ult : arith::CmpIPredicate::slt;
|
|
|
|
|
if (getMax)
|
|
|
|
|
cmp = arith::invertPredicate(cmp);
|
|
|
|
|
pred = b.create<arith::CmpIOp>(loc, cmp, input, clamp);
|
|
|
|
|
}
|
|
|
|
|
return b.create<arith::SelectOp>(loc, pred, clamp, input);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
auto result = payloadArgs[0];
|
|
|
|
|
if (!min.getType().isa<Torch::NoneType>())
|
|
|
|
|
result = cmpSelect(result, min, /*getMax=*/false);
|
|
|
|
|
if (!max.getType().isa<Torch::NoneType>())
|
|
|
|
|
result = cmpSelect(result, max, /*getMax=*/true);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return result;
|
|
|
|
|
}
|
2023-12-05 13:55:51 +08:00
|
|
|
|
if (auto clampTensor = dyn_cast<AtenClampTensorOp>(op)) {
|
|
|
|
|
AtenClampTensorOp::Adaptor adaptor(operands);
|
|
|
|
|
auto min = adaptor.getMin();
|
|
|
|
|
auto max = adaptor.getMax();
|
|
|
|
|
if (min.getType().isa<Torch::OptionalType>() ||
|
|
|
|
|
max.getType().isa<Torch::OptionalType>()) {
|
|
|
|
|
clampTensor.emitError("unimplemented: runtime optional type");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(clampTensor.getType()))
|
|
|
|
|
.getElementType();
|
2023-12-05 13:55:51 +08:00
|
|
|
|
bool isMinNone = true;
|
|
|
|
|
auto result = payloadArgs[0];
|
|
|
|
|
if (!min.getType().isa<Torch::NoneType>()) {
|
|
|
|
|
isMinNone = false;
|
|
|
|
|
auto minPromoted = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
Value pred;
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2023-12-05 13:55:51 +08:00
|
|
|
|
pred = b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULT, result,
|
|
|
|
|
minPromoted);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(dtype)) {
|
2023-12-05 13:55:51 +08:00
|
|
|
|
pred = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, result,
|
|
|
|
|
minPromoted);
|
|
|
|
|
} else {
|
|
|
|
|
clampTensor.emitError(
|
|
|
|
|
"unimplemented: dtype other than float and integer "
|
|
|
|
|
"types are not supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
result = b.create<arith::SelectOp>(loc, pred, minPromoted, result);
|
|
|
|
|
}
|
|
|
|
|
if (!max.getType().isa<Torch::NoneType>()) {
|
|
|
|
|
max = isMinNone ? payloadArgs[1] : payloadArgs[2];
|
|
|
|
|
auto maxPromoted = convertScalarToDtype(b, loc, max, dtype);
|
|
|
|
|
Value pred;
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2023-12-05 13:55:51 +08:00
|
|
|
|
pred = b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGT, result,
|
|
|
|
|
maxPromoted);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(dtype)) {
|
2023-12-05 13:55:51 +08:00
|
|
|
|
pred = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sgt, result,
|
|
|
|
|
maxPromoted);
|
|
|
|
|
} else {
|
|
|
|
|
clampTensor.emitError(
|
|
|
|
|
"unimplemented: dtype other than float and integer "
|
|
|
|
|
"types are not supported.");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
result = b.create<arith::SelectOp>(loc, pred, maxPromoted, result);
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto rsub = dyn_cast<AtenRsubScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(converter->convertType(rsub.getType()))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
.getElementType();
|
2022-06-14 20:31:30 +08:00
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
|
2024-02-13 01:19:39 +08:00
|
|
|
|
Value alpha = convertScalarToDtype(
|
|
|
|
|
b, loc, operands[2], dtype, /*srcOriginalDtype=*/operands[2].getType(),
|
|
|
|
|
/*dstOriginalDtype=*/dtype);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype)) {
|
2022-06-14 20:31:30 +08:00
|
|
|
|
Value mult = b.create<arith::MulFOp>(loc, self, alpha);
|
|
|
|
|
return b.create<arith::SubFOp>(loc, other, mult);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(dtype)) {
|
2022-06-14 20:31:30 +08:00
|
|
|
|
Value mult = b.create<arith::MulIOp>(loc, self, alpha);
|
|
|
|
|
return b.create<arith::SubIOp>(loc, other, mult);
|
|
|
|
|
}
|
|
|
|
|
rsub.emitError("unimplemented: dtype other than float and integer "
|
|
|
|
|
"types are not supported.");
|
|
|
|
|
return nullptr;
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
|
|
|
|
if (auto mulScalar = dyn_cast<AtenMulScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(mulScalar.getType()))
|
|
|
|
|
.getElementType();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value lhs = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value rhs = convertScalarToDtype(b, loc, operands[1], dtype);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::MulFOp>(loc, lhs, rhs);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::IntegerType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return b.create<arith::MulIOp>(loc, lhs, rhs);
|
|
|
|
|
mulScalar.emitError("unimplemented: Only integer/float dtype supported");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (auto atenToDtype = dyn_cast<AtenToDtypeOp>(op)) {
|
|
|
|
|
Value input = payloadArgs[0];
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(atenToDtype.getType()))
|
|
|
|
|
.getElementType();
|
2023-09-29 20:19:18 +08:00
|
|
|
|
Type resultElementType;
|
|
|
|
|
int64_t dtypeInt;
|
|
|
|
|
if (!matchPattern(atenToDtype.getDtype(), m_TorchConstantInt(&dtypeInt))) {
|
|
|
|
|
atenToDtype.emitError("unimplemented: dtype must be a constant integer");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2023-11-30 01:43:09 +08:00
|
|
|
|
FailureOr<Type> maybeResultElementType =
|
|
|
|
|
torch_to_linalg::getBackendTypeForScalarType(
|
|
|
|
|
atenToDtype->getContext(), (torch_upstream::ScalarType)dtypeInt);
|
2023-09-29 20:19:18 +08:00
|
|
|
|
if (failed(maybeResultElementType)) {
|
|
|
|
|
atenToDtype.emitError("unable to convert `dtypeInt` to builtin type");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
resultElementType = *maybeResultElementType;
|
|
|
|
|
Value result = convertScalarToDtype(b, loc, input, dtype,
|
|
|
|
|
/*srcOriginalDtype=*/std::nullopt,
|
|
|
|
|
/*dstOriginalDtype=*/resultElementType);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
if (auto divScalar = dyn_cast<AtenDivScalarOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(divScalar.getType()))
|
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType>(dtype)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
divScalar.emitError("unimplemented: non-floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2023-06-12 17:18:38 +08:00
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
|
|
|
|
|
return b.create<arith::DivFOp>(loc, self, other);
|
|
|
|
|
}
|
2022-08-11 08:02:06 +08:00
|
|
|
|
if (auto remScalar = dyn_cast<AtenRemainderScalarOp>(op)) {
|
|
|
|
|
Type newResultType = converter->convertType(remScalar.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
|
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], newResultType);
|
|
|
|
|
Value other = convertScalarToDtype(b, loc, operands[1], newResultType);
|
|
|
|
|
Value result;
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(newResultType)) {
|
2022-08-11 08:02:06 +08:00
|
|
|
|
result = b.create<arith::RemFOp>(loc, self, other);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(newResultType)) {
|
2022-08-11 08:02:06 +08:00
|
|
|
|
result = b.create<arith::RemSIOp>(loc, self, other);
|
|
|
|
|
} else {
|
|
|
|
|
remScalar.emitError(
|
|
|
|
|
"Unsupported type encountered for AtenRemainderScalarOp.");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2024-01-19 20:39:08 +08:00
|
|
|
|
if (auto remTensor = dyn_cast<AtenRemainderTensorOp>(op)) {
|
|
|
|
|
Type newResultType = converter->convertType(remTensor.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
|
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], newResultType);
|
|
|
|
|
Value other = convertScalarToDtype(b, loc, payloadArgs[1], newResultType);
|
|
|
|
|
Value result;
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(newResultType)) {
|
2024-01-19 20:39:08 +08:00
|
|
|
|
result = b.create<arith::RemFOp>(loc, self, other);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(newResultType)) {
|
2024-01-19 20:39:08 +08:00
|
|
|
|
result = b.create<arith::RemSIOp>(loc, self, other);
|
|
|
|
|
} else {
|
|
|
|
|
remTensor.emitError(
|
|
|
|
|
"Unsupported type encountered for AtenRemainderTensorOp.");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2024-02-29 13:52:03 +08:00
|
|
|
|
if (auto fmod = dyn_cast<AtenFmodTensorOp>(op)) {
|
|
|
|
|
Type newResultType = converter->convertType(fmod.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
|
|
|
|
|
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], newResultType);
|
|
|
|
|
Value other = convertScalarToDtype(b, loc, payloadArgs[1], newResultType);
|
|
|
|
|
Value result;
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(newResultType)) {
|
2024-02-29 13:52:03 +08:00
|
|
|
|
Value n = b.create<arith::DivFOp>(loc, self, other);
|
|
|
|
|
n = b.create<math::TruncOp>(loc, n);
|
|
|
|
|
Value n_y = b.create<arith::MulFOp>(loc, n, other);
|
|
|
|
|
result = b.create<arith::SubFOp>(loc, self, n_y);
|
2024-04-11 21:47:35 +08:00
|
|
|
|
} else if (isa<mlir::IntegerType>(newResultType)) {
|
2024-02-29 13:52:03 +08:00
|
|
|
|
Value n = b.create<arith::DivSIOp>(loc, self, other);
|
|
|
|
|
Value n_y = b.create<arith::MulIOp>(loc, n, other);
|
|
|
|
|
result = b.create<arith::SubIOp>(loc, self, n_y);
|
|
|
|
|
} else {
|
|
|
|
|
fmod.emitError("Unsupported type encountered for AtenFmodTensorOp.");
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (auto reciprocal = dyn_cast<AtenReciprocalOp>(op)) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(reciprocal.getType()))
|
|
|
|
|
.getElementType();
|
2022-03-24 04:35:43 +08:00
|
|
|
|
Value arg = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Type elementType = arg.getType();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
// assert(element != 0)
|
|
|
|
|
auto zero =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 0.0));
|
2022-03-24 04:35:43 +08:00
|
|
|
|
auto pred =
|
|
|
|
|
b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ONE, arg, zero);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
b.create<cf::AssertOp>(
|
|
|
|
|
loc, pred, b.getStringAttr("unimplemented: tensor with zero element"));
|
|
|
|
|
|
|
|
|
|
auto one =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, FloatAttr::get(elementType, 1.0));
|
2022-03-24 04:35:43 +08:00
|
|
|
|
return b.create<arith::DivFOp>(loc, one, arg);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
|
|
|
|
if (auto thresholdOp = dyn_cast<AtenThresholdOp>(op)) {
|
|
|
|
|
// The approach used here is as follows:
|
|
|
|
|
// result = self <= threshold ? value : self
|
|
|
|
|
AtenThresholdOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(thresholdOp.getType()))
|
|
|
|
|
.getElementType();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
Value self = payloadArgs[0];
|
2023-09-28 20:53:02 +08:00
|
|
|
|
Value threshold =
|
|
|
|
|
convertScalarToDtype(b, loc, adaptor.getThreshold(), dtype);
|
2022-12-08 04:20:41 +08:00
|
|
|
|
Value value = convertScalarToDtype(b, loc, adaptor.getValue(), dtype);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
Value predicate;
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
predicate = b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULE, self,
|
|
|
|
|
threshold);
|
|
|
|
|
else
|
|
|
|
|
predicate = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sle, self,
|
|
|
|
|
threshold);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, predicate, value, self);
|
|
|
|
|
}
|
|
|
|
|
if (auto thresholdBackward = dyn_cast<AtenThresholdBackwardOp>(op)) {
|
|
|
|
|
// The approach used here is as follows:
|
|
|
|
|
// result = self <= threshold ? 0 : grad
|
|
|
|
|
AtenThresholdBackwardOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(
|
|
|
|
|
converter->convertType(thresholdBackward.getType()))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
.getElementType();
|
|
|
|
|
|
|
|
|
|
Value grad = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
|
|
|
|
Value self = convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
2023-09-28 20:53:02 +08:00
|
|
|
|
Value threshold =
|
|
|
|
|
convertScalarToDtype(b, loc, adaptor.getThreshold(), dtype);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value constantZero = b.create<arith::ConstantOp>(loc, b.getZeroAttr(dtype));
|
|
|
|
|
|
|
|
|
|
Value predicate;
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(dtype))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
predicate = b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULE, self,
|
|
|
|
|
threshold);
|
|
|
|
|
else
|
|
|
|
|
predicate = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sle, self,
|
|
|
|
|
threshold);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, predicate, constantZero, grad);
|
|
|
|
|
}
|
2022-10-28 23:06:11 +08:00
|
|
|
|
if (auto fillScalar = dyn_cast<AtenFillScalarOp>(op)) {
|
|
|
|
|
AtenFillScalarOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(fillScalar.getType()))
|
|
|
|
|
.getElementType();
|
2022-12-08 04:20:41 +08:00
|
|
|
|
return convertScalarToDtype(b, loc, adaptor.getValue(), dtype);
|
2022-10-28 23:06:11 +08:00
|
|
|
|
}
|
2022-08-08 23:27:11 +08:00
|
|
|
|
if (auto maskedFillTensor = dyn_cast<AtenMaskedFillTensorOp>(op)) {
|
|
|
|
|
AtenMaskedFillScalarOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype = cast<RankedTensorType>(
|
|
|
|
|
converter->convertType(maskedFillTensor.getType()))
|
2022-08-08 23:27:11 +08:00
|
|
|
|
.getElementType();
|
|
|
|
|
|
|
|
|
|
Value input = payloadArgs[0];
|
|
|
|
|
Value mask = payloadArgs[1];
|
|
|
|
|
Value fillValue = convertScalarToDtype(b, loc, payloadArgs[2], dtype);
|
|
|
|
|
return b.create<arith::SelectOp>(loc, mask, fillValue, input);
|
|
|
|
|
}
|
2022-10-26 20:18:49 +08:00
|
|
|
|
if (auto fillTensor = dyn_cast<AtenFillTensorOp>(op)) {
|
|
|
|
|
AtenFillTensorOp::Adaptor adaptor(operands);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
Type dtype =
|
|
|
|
|
cast<RankedTensorType>(converter->convertType(fillTensor.getType()))
|
|
|
|
|
.getElementType();
|
2022-10-26 20:18:49 +08:00
|
|
|
|
return convertScalarToDtype(b, loc, payloadArgs[1], dtype);
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
2022-06-24 13:45:48 +08:00
|
|
|
|
if (auto triu = dyn_cast<AtenTriuOp>(op)) {
|
2023-06-06 07:17:01 +08:00
|
|
|
|
Value result;
|
|
|
|
|
if (failed(createTriangularMatrix<arith::CmpIPredicate::sge>(
|
|
|
|
|
b, loc, payloadArgs, op, operands, result)))
|
2022-06-24 13:45:48 +08:00
|
|
|
|
return nullptr;
|
2023-06-06 07:17:01 +08:00
|
|
|
|
return result;
|
|
|
|
|
}
|
2022-06-24 13:45:48 +08:00
|
|
|
|
|
2023-06-06 07:17:01 +08:00
|
|
|
|
if (auto tril = dyn_cast<AtenTrilOp>(op)) {
|
|
|
|
|
Value result;
|
|
|
|
|
if (failed(createTriangularMatrix<arith::CmpIPredicate::sle>(
|
|
|
|
|
b, loc, payloadArgs, op, operands, result)))
|
|
|
|
|
return nullptr;
|
|
|
|
|
return result;
|
2022-06-24 13:45:48 +08:00
|
|
|
|
}
|
|
|
|
|
|
2022-09-07 14:58:42 +08:00
|
|
|
|
if (auto bitwiseNot = dyn_cast<AtenBitwiseNotOp>(op)) {
|
|
|
|
|
Type elementType = converter->convertType(bitwiseNot.getType())
|
|
|
|
|
.cast<RankedTensorType>()
|
|
|
|
|
.getElementType();
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (isa<mlir::FloatType>(elementType)) {
|
2022-09-07 14:58:42 +08:00
|
|
|
|
bitwiseNot.emitError("Bitwise_Not does not support floating point dtype");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Value allOnesVal = b.create<arith::ConstantOp>(
|
|
|
|
|
loc, b.getIntegerAttr(
|
|
|
|
|
elementType,
|
2023-03-11 01:50:56 +08:00
|
|
|
|
APSInt::getAllOnes(elementType.getIntOrFloatBitWidth())));
|
2022-09-07 14:58:42 +08:00
|
|
|
|
return b.create<arith::XOrIOp>(loc, payloadArgs[0], allOnesVal);
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-13 11:11:14 +08:00
|
|
|
|
if (isa<AtenDequantizeTensorOp, AtenDequantizeSelfOp>(op)) {
|
|
|
|
|
auto value = payloadArgs[0];
|
|
|
|
|
auto valueTy = value.getType();
|
|
|
|
|
auto qtensor = op->getOperand(0);
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto qtensorTy = cast<ValueTensorType>(qtensor.getType()).getDtype();
|
2024-01-31 05:46:47 +08:00
|
|
|
|
|
|
|
|
|
Value zp, scale;
|
|
|
|
|
if (auto makeQTensor =
|
|
|
|
|
qtensor.getDefiningOp<Aten_MakePerTensorQuantizedTensorOp>()) {
|
|
|
|
|
zp = makeQTensor.getZeroPoint();
|
|
|
|
|
scale = makeQTensor.getScale();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto quant = qtensor.getDefiningOp<AtenQuantizePerTensorOp>()) {
|
|
|
|
|
zp = quant.getZeroPoint();
|
|
|
|
|
scale = quant.getScale();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!zp || !scale) {
|
2024-01-13 11:11:14 +08:00
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto outFpTy = payloadArgs[1].getType();
|
|
|
|
|
auto outBw = outFpTy.getIntOrFloatBitWidth();
|
|
|
|
|
auto outIntTy = b.getIntegerType(outBw);
|
|
|
|
|
|
|
|
|
|
if (valueTy != outIntTy) {
|
|
|
|
|
if (torch_to_linalg::isUnsignedTorchType(qtensorTy)) {
|
|
|
|
|
value = b.create<arith::ExtUIOp>(loc, outIntTy, value);
|
|
|
|
|
} else {
|
|
|
|
|
value = b.create<arith::ExtSIOp>(loc, outIntTy, value);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
zp = converter->materializeTargetConversion(
|
2024-01-31 05:46:47 +08:00
|
|
|
|
b, loc, converter->convertType(zp.getType()), zp);
|
2024-01-13 11:11:14 +08:00
|
|
|
|
auto zpTy = zp.getType();
|
|
|
|
|
|
|
|
|
|
if (zpTy != outIntTy) {
|
|
|
|
|
zp = b.create<arith::TruncIOp>(loc, outIntTy, zp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
value = b.create<arith::SubIOp>(loc, value, zp);
|
2024-04-11 03:36:58 +08:00
|
|
|
|
// treat the i32 as a signed int regardless of original signed-ness
|
|
|
|
|
// this will prevent overflow from subtraction for unsigned quantizations.
|
|
|
|
|
value = b.create<arith::SIToFPOp>(loc, outFpTy, value);
|
2024-01-13 11:11:14 +08:00
|
|
|
|
|
|
|
|
|
scale = converter->materializeTargetConversion(
|
2024-01-31 05:46:47 +08:00
|
|
|
|
b, loc, converter->convertType(scale.getType()), scale);
|
2024-01-13 11:11:14 +08:00
|
|
|
|
if (scale.getType() != value.getType()) {
|
|
|
|
|
scale = b.create<arith::TruncFOp>(loc, value.getType(), scale);
|
|
|
|
|
}
|
|
|
|
|
value = b.create<arith::MulFOp>(loc, value, scale);
|
|
|
|
|
return value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (auto quant = dyn_cast<AtenQuantizePerTensorOp>(op)) {
|
|
|
|
|
Value value = payloadArgs[0];
|
|
|
|
|
Value scale = quant.getScale();
|
|
|
|
|
Value zp = quant.getZeroPoint();
|
|
|
|
|
auto valueTy = value.getType();
|
|
|
|
|
|
|
|
|
|
zp = converter->materializeTargetConversion(
|
|
|
|
|
b, loc, converter->convertType(zp.getType()), zp);
|
|
|
|
|
zp = b.create<arith::SIToFPOp>(loc, valueTy, zp);
|
|
|
|
|
|
|
|
|
|
scale = converter->materializeTargetConversion(
|
|
|
|
|
b, loc, converter->convertType(scale.getType()), scale);
|
|
|
|
|
scale = b.create<arith::TruncFOp>(loc, valueTy, scale);
|
|
|
|
|
|
|
|
|
|
value = b.create<arith::DivFOp>(loc, value, scale);
|
|
|
|
|
value = b.create<math::RoundOp>(loc, value);
|
|
|
|
|
value = b.create<arith::AddFOp>(loc, value, zp);
|
|
|
|
|
|
|
|
|
|
auto destTy = payloadArgs[1].getType();
|
|
|
|
|
auto bitwidth = destTy.getIntOrFloatBitWidth();
|
|
|
|
|
bool isUnsigned = torch_to_linalg::isUnsignedTorchType(quant.getType());
|
|
|
|
|
APInt min = isUnsigned ? APInt::getMinValue(bitwidth)
|
|
|
|
|
: APInt::getSignedMinValue(bitwidth);
|
|
|
|
|
APInt max = isUnsigned ? APInt::getMaxValue(bitwidth)
|
|
|
|
|
: APInt::getSignedMaxValue(bitwidth);
|
|
|
|
|
|
2024-03-21 04:37:47 +08:00
|
|
|
|
double minI = isUnsigned ? static_cast<double>(min.getZExtValue())
|
|
|
|
|
: static_cast<double>(min.getSExtValue());
|
|
|
|
|
double maxI = isUnsigned ? static_cast<double>(max.getZExtValue())
|
|
|
|
|
: static_cast<double>(max.getSExtValue());
|
|
|
|
|
Value minVal =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getFloatAttr(valueTy, minI));
|
|
|
|
|
Value maxVal =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getFloatAttr(valueTy, maxI));
|
2024-01-13 11:11:14 +08:00
|
|
|
|
Value minCmp =
|
|
|
|
|
b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::ULT, value, minVal);
|
|
|
|
|
Value maxCmp =
|
|
|
|
|
b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGT, value, maxVal);
|
|
|
|
|
value = b.create<arith::SelectOp>(loc, minCmp, minVal, value);
|
|
|
|
|
value = b.create<arith::SelectOp>(loc, maxCmp, maxVal, value);
|
|
|
|
|
|
|
|
|
|
if (isUnsigned) {
|
|
|
|
|
value = b.create<arith::FPToUIOp>(loc, destTy, value);
|
|
|
|
|
} else {
|
|
|
|
|
value = b.create<arith::FPToSIOp>(loc, destTy, value);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return value;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
op->emitError("unimplemented lowering in "
|
|
|
|
|
"createLinalgPayloadCalculationForElementwiseOp");
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
// Converts an elementwise op.
|
|
|
|
|
// This specifically includes:
|
|
|
|
|
// - converting elementwise ops of any tensor arity
|
|
|
|
|
// - converting elementwise ops with any number of scalar captures (such as a
|
|
|
|
|
// scalar alpha to torch.aten.Add)
|
|
|
|
|
// - broadcasting of static size-1 dimensions
|
|
|
|
|
//
|
|
|
|
|
// Currently, we adopt the behavior that "size 1" broadcasting is a runtime
|
|
|
|
|
// error if it happens dynamically.
|
|
|
|
|
//
|
|
|
|
|
// Looking forward a bit, eventually, it probably makes sense to have
|
|
|
|
|
// a "linalg.generic-like" op for modeling a fused subgraph of numpy-broadcasted
|
|
|
|
|
// operands. Modeling elementwise ops that way is potentially useful to allow a
|
|
|
|
|
// more centralized reasoning about multiversioning. However a cost model will
|
|
|
|
|
// be needed for "pre-fusing" elementwise ops that way, as it can potentially be
|
|
|
|
|
// a pessimization. A mild extension of this pattern should work for such a
|
|
|
|
|
// general op.
|
|
|
|
|
class ConvertElementwiseOp : public ConversionPattern {
|
|
|
|
|
public:
|
|
|
|
|
ConvertElementwiseOp(TypeConverter &typeConverter, MLIRContext *context)
|
|
|
|
|
: ConversionPattern(typeConverter, MatchAnyOpTypeTag(), /*benefit=*/1,
|
|
|
|
|
context) {}
|
|
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(Operation *op, ArrayRef<Value> operands,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2023-12-21 02:09:39 +08:00
|
|
|
|
if (!isa<AtenTanOp, AtenTanhOp, AtenSinhOp, AtenCoshOp, AtenReluOp,
|
|
|
|
|
AtenPreluOp, AtenGeluOp, AtenGeluBackwardOp, AtenAddTensorOp,
|
|
|
|
|
AtenMulTensorOp, AtenDivTensorOp, AtenDivTensorModeOp,
|
2024-04-16 04:45:10 +08:00
|
|
|
|
AtenDivScalarModeOp, AtenSubTensorOp, AtenAtan2Op,
|
|
|
|
|
AtenLerpTensorOp, AtenSigmoidOp, AtenExpOp, AtenExpm1Op,
|
|
|
|
|
AtenMinimumOp, AtenMaximumOp, AtenToDtypeOp, AtenClampOp,
|
|
|
|
|
AtenClampTensorOp, AtenRsubScalarOp, AtenMulScalarOp, AtenLogOp,
|
|
|
|
|
AtenErfOp, AtenSqrtOp, AtenFloorOp, AtenPowScalarOp,
|
|
|
|
|
AtenPowTensorScalarOp, AtenPowTensorTensorOp, AtenLog2Op,
|
|
|
|
|
AtenLog10Op, AtenLog1pOp, AtenRsqrtOp, AtenDivScalarOp,
|
2024-02-29 13:52:03 +08:00
|
|
|
|
AtenRemainderScalarOp, AtenRemainderTensorOp, AtenFmodTensorOp,
|
|
|
|
|
AtenAbsOp, AtenReciprocalOp, AtenBitwiseAndTensorOp,
|
|
|
|
|
AtenBitwiseAndScalarOp, AtenBitwiseOrTensorOp,
|
|
|
|
|
AtenBitwiseXorTensorOp, AtenBitwiseLeftShiftTensorOp,
|
|
|
|
|
AtenBitwiseRightShiftTensorOp, AtenGtScalarOp, AtenGeScalarOp,
|
|
|
|
|
AtenEqScalarOp, AtenLtScalarOp, AtenLeScalarOp, AtenWhereSelfOp,
|
|
|
|
|
AtenCeilOp, AtenGtTensorOp, AtenGeTensorOp, AtenEqTensorOp,
|
|
|
|
|
AtenNeTensorOp, AtenLtTensorOp, AtenLeTensorOp, AtenSubScalarOp,
|
|
|
|
|
AtenAddScalarOp, AtenThresholdOp, AtenThresholdBackwardOp,
|
|
|
|
|
AtenHardtanhBackwardOp, AtenCloneOp, AtenSinOp, AtenCosOp,
|
|
|
|
|
AtenNeScalarOp, AtenNegOp, AtenMaskedFillTensorOp, AtenLogicalOrOp,
|
|
|
|
|
AtenLogicalAndOp, AtenLogicalXorOp, AtenLogicalNotOp, AtenIsinfOp,
|
|
|
|
|
AtenTriuOp, AtenTrilOp, AtenBitwiseNotOp, AtenRoundOp,
|
|
|
|
|
AtenFillScalarOp, AtenFillTensorOp, AtenAtanOp, AtenAcosOp,
|
|
|
|
|
AtenAtanhOp, AtenAcoshOp, AtenAsinOp, AtenAsinhOp, AtenRealOp,
|
|
|
|
|
AtenImagOp, AtenDequantizeSelfOp, AtenDequantizeTensorOp,
|
2024-01-19 20:39:08 +08:00
|
|
|
|
AtenQuantizePerTensorOp>(op))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return rewriter.notifyMatchFailure(op, "not a supported elementwise op");
|
|
|
|
|
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
|
|
Location loc = op->getLoc();
|
|
|
|
|
auto tensorOperands = llvm::to_vector<6>(llvm::make_filter_range(
|
|
|
|
|
operands, [](Value v) { return v.getType().isa<RankedTensorType>(); }));
|
|
|
|
|
auto resultType = getTypeConverter()
|
|
|
|
|
->convertType(op->getResult(0).getType())
|
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
bool hadErrorCreatingPayload = false;
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
|
Value generic = torch_to_linalg::createElementwiseLinalgGeneric(
|
2022-03-11 01:54:13 +08:00
|
|
|
|
rewriter, loc, tensorOperands, resultType.getElementType(),
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange payloadArgs) {
|
|
|
|
|
Value result = createLinalgPayloadCalculationForElementwiseOp(
|
|
|
|
|
b, loc, getTypeConverter(), payloadArgs, op, operands);
|
|
|
|
|
if (!result) {
|
|
|
|
|
hadErrorCreatingPayload = true;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
b.create<linalg::YieldOp>(loc, result);
|
|
|
|
|
});
|
|
|
|
|
if (hadErrorCreatingPayload)
|
|
|
|
|
return failure();
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultType, generic);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
// Given `input`, `target`, `nll_loss_forward` is given by:
|
|
|
|
|
// for i in range(0, len(target)):
|
|
|
|
|
// indi = target[i];
|
|
|
|
|
// nll_loss_forward[i] = -(input[i][indi]);
|
|
|
|
|
// TODO: `weight`operand is still to be taken care of.
|
|
|
|
|
namespace {
|
2023-11-16 00:34:38 +08:00
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
class ConvertAtenNllLossForwardOp
|
|
|
|
|
: public OpConversionPattern<AtenNllLossForwardOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(AtenNllLossForwardOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
|
|
|
|
Location loc = op->getLoc();
|
2022-12-08 04:20:41 +08:00
|
|
|
|
Value input = adaptor.getSelf();
|
|
|
|
|
Value target = adaptor.getTarget();
|
|
|
|
|
Value weight = adaptor.getWeight();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
int64_t reduction;
|
2022-12-08 04:20:41 +08:00
|
|
|
|
if (!matchPattern(op.getReduction(), m_TorchConstantInt(&reduction)))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return rewriter.notifyMatchFailure(op, "dim must be constant");
|
|
|
|
|
|
|
|
|
|
// TODO: Incorporate the weight argument.
|
|
|
|
|
if (!weight.getType().isa<mlir::torch::Torch::NoneType>())
|
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
|
op, "Unimplemented, the weight operand is not incorporated.");
|
|
|
|
|
|
2022-12-08 04:20:41 +08:00
|
|
|
|
Value ignoreIndex = adaptor.getIgnoreIndex();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value ignoreIndexVal = castIntToIndex(rewriter, loc, ignoreIndex);
|
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
unsigned inputRank = cast<RankedTensorType>(input.getType()).getRank();
|
|
|
|
|
unsigned targetRank = cast<RankedTensorType>(target.getType()).getRank();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
// TODO: Add support for k-dim loss.
|
|
|
|
|
if (inputRank > 2) {
|
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
|
op, "expected input and target to be rank <= 2");
|
|
|
|
|
}
|
|
|
|
|
RankedTensorType resultType = getTypeConverter()
|
|
|
|
|
->convertType(op->getResult(0).getType())
|
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
Type elementType = resultType.getElementType();
|
|
|
|
|
|
|
|
|
|
Value zeroVal = rewriter.create<arith::ConstantOp>(
|
|
|
|
|
loc, rewriter.getZeroAttr(elementType));
|
|
|
|
|
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
|
Value finalRes = torch_to_linalg::createElementwiseLinalgGeneric(
|
2022-03-11 01:54:13 +08:00
|
|
|
|
rewriter, loc, {target}, elementType,
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
|
|
|
|
Value targetVal = args[0];
|
|
|
|
|
Value indTarget = rewriter.create<arith::IndexCastOp>(
|
|
|
|
|
loc, rewriter.getIndexType(), targetVal);
|
|
|
|
|
|
|
|
|
|
// The final result is given by:
|
|
|
|
|
// final_res = (indTarget == ignoreIndexVal) ? 0 :
|
|
|
|
|
// input[indI][IndTarget]
|
|
|
|
|
Value cmpEq = rewriter.create<arith::CmpIOp>(
|
|
|
|
|
loc, arith::CmpIPredicate::eq, indTarget, ignoreIndexVal);
|
|
|
|
|
|
|
|
|
|
SmallVector<Value> extractionIndices{indTarget};
|
|
|
|
|
if (inputRank == 2) {
|
|
|
|
|
Value indI = rewriter.create<linalg::IndexOp>(loc, 0);
|
|
|
|
|
extractionIndices.insert(extractionIndices.begin(), indI);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Value result =
|
|
|
|
|
rewriter.create<tensor::ExtractOp>(loc, input, extractionIndices);
|
|
|
|
|
|
|
|
|
|
Value negate =
|
|
|
|
|
rewriter.create<arith::NegFOp>(loc, elementType, result);
|
|
|
|
|
Value selectFinal =
|
|
|
|
|
rewriter.create<arith::SelectOp>(loc, cmpEq, zeroVal, negate);
|
|
|
|
|
b.create<linalg::YieldOp>(loc, selectFinal);
|
|
|
|
|
});
|
|
|
|
|
|
2023-05-23 20:44:57 +08:00
|
|
|
|
llvm::iota_range<int64_t> dimsToReduce(0, targetRank,
|
|
|
|
|
/*inclusive=*/false);
|
|
|
|
|
DenseSet<int64_t> dimSet(dimsToReduce.begin(), dimsToReduce.end());
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
if (reduction == torch_upstream::Reduction::Sum ||
|
|
|
|
|
reduction == torch_upstream::Reduction::Mean) {
|
|
|
|
|
Value numOfElems = getTensorSize(rewriter, loc, finalRes);
|
|
|
|
|
numOfElems = convertScalarToDtype(rewriter, loc, numOfElems, elementType);
|
|
|
|
|
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
|
auto opInfo = torch_to_linalg::ReductionOpInfo{false, finalRes, dimSet};
|
2022-03-11 01:54:13 +08:00
|
|
|
|
finalRes = torch_to_linalg::createReductionLinalgGeneric(
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
|
rewriter, loc, opInfo,
|
2022-03-11 01:54:13 +08:00
|
|
|
|
/*initElem=*/zeroVal,
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
|
|
|
|
Value newVal = args[0];
|
|
|
|
|
Value accumulator = args[1];
|
|
|
|
|
if (reduction == torch_upstream::Reduction::Mean)
|
|
|
|
|
newVal = b.create<arith::DivFOp>(loc, newVal, numOfElems);
|
|
|
|
|
Value result = b.create<arith::AddFOp>(loc, newVal, accumulator);
|
|
|
|
|
b.create<linalg::YieldOp>(loc, result);
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-23 20:44:57 +08:00
|
|
|
|
// The implementation for the `total_weight` has been adopted from here:
|
|
|
|
|
// https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/LossNLL.cpp#L154-L294
|
|
|
|
|
// As per the ref link, the `total_weight` value when the `weight` is
|
|
|
|
|
// `None`, is equal to `total_weight = batch_size - num_ignored_index`,
|
|
|
|
|
// where `batch_size` is equal to `target.shape[0]` when rank(target) > 0,
|
|
|
|
|
// otherwise 1. The value `num_ignored_index` is the number of elements of
|
|
|
|
|
// the `target` tensors that have been ignored.
|
|
|
|
|
|
|
|
|
|
if (reduction == torch_upstream::Reduction::None && inputRank == 2) {
|
|
|
|
|
Value totalWeight = createZeroInitTensor(rewriter, loc, {}, elementType);
|
|
|
|
|
rewriter.replaceOp(op, {finalRes, totalWeight});
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Value numIgnoredIndex;
|
|
|
|
|
if (targetRank == 0) {
|
|
|
|
|
Value targetVal = rewriter.create<tensor::ExtractOp>(loc, target);
|
|
|
|
|
numIgnoredIndex = rewriter.create<arith::CmpIOp>(
|
|
|
|
|
loc, arith::CmpIPredicate::eq, targetVal, ignoreIndex);
|
|
|
|
|
numIgnoredIndex = convertScalarToDtype(rewriter, loc, numIgnoredIndex,
|
|
|
|
|
ignoreIndex.getType());
|
|
|
|
|
} else {
|
|
|
|
|
Value zeroCstInt = rewriter.create<arith::ConstantOp>(
|
|
|
|
|
loc, rewriter.getZeroAttr(ignoreIndex.getType()));
|
|
|
|
|
|
|
|
|
|
auto opInfo =
|
|
|
|
|
torch_to_linalg::ReductionOpInfo{/*keepDim=*/false, target, dimSet};
|
|
|
|
|
numIgnoredIndex = torch_to_linalg::createReductionLinalgGeneric(
|
|
|
|
|
rewriter, loc, opInfo,
|
|
|
|
|
/*initElem=*/zeroCstInt,
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
|
|
|
|
Value targetVal = args[0];
|
|
|
|
|
Value accumulator = args[1];
|
|
|
|
|
Value result = b.create<arith::CmpIOp>(
|
|
|
|
|
loc, arith::CmpIPredicate::eq, targetVal, ignoreIndex);
|
|
|
|
|
result = b.create<arith::AddIOp>(
|
|
|
|
|
loc,
|
|
|
|
|
convertScalarToDtype(rewriter, loc, result,
|
|
|
|
|
ignoreIndex.getType()),
|
|
|
|
|
accumulator);
|
|
|
|
|
b.create<linalg::YieldOp>(loc, result);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
numIgnoredIndex =
|
|
|
|
|
rewriter.create<tensor::ExtractOp>(loc, numIgnoredIndex);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Value numtargetElems = getTensorSize(rewriter, loc, target);
|
|
|
|
|
Value totalWeightVal =
|
|
|
|
|
rewriter.create<arith::SubIOp>(loc, numtargetElems, numIgnoredIndex);
|
|
|
|
|
Value totalWeight = createInitTensor(
|
|
|
|
|
rewriter, loc, {}, elementType,
|
|
|
|
|
convertScalarToDtype(rewriter, loc, totalWeightVal, elementType));
|
|
|
|
|
|
|
|
|
|
rewriter.replaceOp(op, {finalRes, totalWeight});
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2022-03-16 20:51:57 +08:00
|
|
|
|
/// Inverted STD: rSTD = 1 / sqrt(var + eps).
|
|
|
|
|
static Value calculateRSTD(OpBuilder &b, Location loc, Type elemTy, Value eps,
|
|
|
|
|
Value var) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
// The eps is always f64.
|
|
|
|
|
Value truncatedEps = b.create<arith::TruncFOp>(loc, elemTy, eps);
|
|
|
|
|
Value varPlusEps = b.create<arith::AddFOp>(loc, var, truncatedEps);
|
|
|
|
|
Value rSTD = b.create<math::RsqrtOp>(loc, varPlusEps);
|
2022-03-16 20:51:57 +08:00
|
|
|
|
return rSTD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Normalization formula:
|
|
|
|
|
// ((input - mean) * rSTD * weight + bias
|
|
|
|
|
static Value createLinalgPayloadCalculationForNormOpsWithRSTD(
|
|
|
|
|
OpBuilder &b, Location loc, Type elemTy, Value input, Value mean,
|
|
|
|
|
Value rSTD, Value eps, Value weight, Value bias) {
|
|
|
|
|
Value inputSubMean = b.create<arith::SubFOp>(loc, input, mean);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value temp = b.create<arith::MulFOp>(loc, inputSubMean, rSTD);
|
|
|
|
|
Value timesWeight = b.create<arith::MulFOp>(loc, temp, weight);
|
|
|
|
|
Value plusBias = b.create<arith::AddFOp>(loc, timesWeight, bias);
|
|
|
|
|
return plusBias;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-16 20:51:57 +08:00
|
|
|
|
static Value createLinalgPayloadCalculationForNormOpsWithVar(
|
|
|
|
|
OpBuilder &b, Location loc, Type elemTy, Value input, Value mean, Value var,
|
|
|
|
|
Value eps, Value weight, Value bias) {
|
|
|
|
|
Value rSTD = calculateRSTD(b, loc, elemTy, eps, var);
|
|
|
|
|
Value result = createLinalgPayloadCalculationForNormOpsWithRSTD(
|
|
|
|
|
b, loc, elemTy, input, mean, rSTD, eps, weight, bias);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
namespace {
|
|
|
|
|
class ConvertAtenBatchNormOp : public OpConversionPattern<AtenBatchNormOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(AtenBatchNormOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
MLIRContext *context = op->getContext();
|
|
|
|
|
Location loc = op->getLoc();
|
2022-12-08 04:20:41 +08:00
|
|
|
|
Value input = adaptor.getInput();
|
|
|
|
|
Value weight = adaptor.getWeight();
|
|
|
|
|
Value bias = adaptor.getBias();
|
|
|
|
|
Value runningMean = adaptor.getRunningMean();
|
|
|
|
|
Value runningVar = adaptor.getRunningVar();
|
|
|
|
|
Value training = adaptor.getTraining();
|
|
|
|
|
Value eps = adaptor.getEps();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
|
|
// TODO: Handle the None cases for the optional parameters:
|
|
|
|
|
// weight, bias.
|
|
|
|
|
if (failed(checkNotNone(rewriter, op, weight)) ||
|
|
|
|
|
failed(checkNotNone(rewriter, op, bias)) ||
|
|
|
|
|
failed(checkNotNone(rewriter, op, runningMean)) ||
|
|
|
|
|
failed(checkNotNone(rewriter, op, runningVar)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto inputType = cast<RankedTensorType>(input.getType());
|
|
|
|
|
auto weightType = cast<RankedTensorType>(weight.getType());
|
|
|
|
|
auto biasType = cast<RankedTensorType>(bias.getType());
|
|
|
|
|
auto runningMeanType = cast<RankedTensorType>(runningMean.getType());
|
|
|
|
|
auto runningVarType = cast<RankedTensorType>(runningVar.getType());
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
auto inputRank = inputType.getRank();
|
2022-07-16 00:35:59 +08:00
|
|
|
|
if (inputRank < 2)
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return rewriter.notifyMatchFailure(
|
2022-07-16 00:35:59 +08:00
|
|
|
|
op, "input should have rank larger than 1");
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
if (weightType.getRank() != 1 || biasType.getRank() != 1 ||
|
|
|
|
|
runningMeanType.getRank() != 1 || runningVarType.getRank() != 1) {
|
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
|
op, "expect weight, bias, running_mean and running_var to be rank 1");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO: Add support for training.
|
|
|
|
|
auto constFalse = rewriter.create<arith::ConstantOp>(
|
|
|
|
|
loc, IntegerAttr::get(IntegerType::get(context, 1), 0));
|
|
|
|
|
auto trainingFalse = rewriter.create<arith::CmpIOp>(
|
|
|
|
|
loc, arith::CmpIPredicate::eq, training, constFalse);
|
|
|
|
|
rewriter.create<cf::AssertOp>(
|
|
|
|
|
loc, trainingFalse,
|
|
|
|
|
rewriter.getStringAttr("training is not supported for now"));
|
|
|
|
|
|
|
|
|
|
// num_features – C from an expected input of size (N,C,D,H,W ...)
|
|
|
|
|
Value numFeatures = rewriter.create<tensor::DimOp>(loc, input, 1);
|
|
|
|
|
auto contractingDim0EqualsNumFeatures = [&](Value v) {
|
|
|
|
|
auto dim0 = rewriter.create<tensor::DimOp>(loc, v, 0);
|
|
|
|
|
auto dim0Equal = rewriter.create<arith::CmpIOp>(
|
|
|
|
|
loc, arith::CmpIPredicate::eq, numFeatures, dim0);
|
|
|
|
|
rewriter.create<cf::AssertOp>(
|
|
|
|
|
loc, dim0Equal,
|
|
|
|
|
rewriter.getStringAttr(
|
|
|
|
|
"expect the size of dim 0 equal to the number of features"));
|
|
|
|
|
};
|
2023-09-30 07:45:48 +08:00
|
|
|
|
if (!isAssumingStrictSymbolicShapes(rewriter)) {
|
|
|
|
|
contractingDim0EqualsNumFeatures(weight);
|
|
|
|
|
contractingDim0EqualsNumFeatures(bias);
|
|
|
|
|
contractingDim0EqualsNumFeatures(runningMean);
|
|
|
|
|
contractingDim0EqualsNumFeatures(runningVar);
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
auto indexingMap = AffineMap::get(
|
|
|
|
|
/*dimCount=*/inputRank,
|
|
|
|
|
/*symbolCount=*/0, rewriter.getAffineDimExpr(1), context);
|
|
|
|
|
SmallVector<AffineMap> indexingMaps = {
|
|
|
|
|
rewriter.getMultiDimIdentityMap(inputRank), // input
|
|
|
|
|
indexingMap, // weight
|
|
|
|
|
indexingMap, // bias
|
|
|
|
|
indexingMap, // runningMean
|
|
|
|
|
indexingMap, // runningVar
|
|
|
|
|
rewriter.getMultiDimIdentityMap(inputRank), // output
|
|
|
|
|
};
|
2022-11-17 06:40:36 +08:00
|
|
|
|
SmallVector<utils::IteratorType> iteratorTypes(
|
|
|
|
|
inputRank, utils::IteratorType::parallel);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Value batchNorm =
|
|
|
|
|
rewriter
|
|
|
|
|
.create<linalg::GenericOp>(
|
|
|
|
|
loc, input.getType(),
|
|
|
|
|
ValueRange{input, weight, bias, runningMean, runningVar}, input,
|
|
|
|
|
/*indexingMaps=*/indexingMaps,
|
|
|
|
|
/*iteratorTypes=*/iteratorTypes,
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
|
|
|
|
Value input = args[0], weight = args[1], bias = args[2],
|
|
|
|
|
mean = args[3], var = args[4];
|
2022-03-16 20:51:57 +08:00
|
|
|
|
Value result =
|
|
|
|
|
createLinalgPayloadCalculationForNormOpsWithVar(
|
|
|
|
|
b, loc, var.getType(), input, mean, var, eps, weight,
|
|
|
|
|
bias);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
b.create<linalg::YieldOp>(loc, result);
|
|
|
|
|
})
|
|
|
|
|
.getResult(0);
|
|
|
|
|
Type newResultType = getTypeConverter()->convertType(op.getType());
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, newResultType, batchNorm);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
class ConvertAtenNllLossBackwardOp
|
|
|
|
|
: public OpConversionPattern<AtenNllLossBackwardOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(AtenNllLossBackwardOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
2022-04-05 01:57:49 +08:00
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
Location loc = op->getLoc();
|
2022-12-08 04:20:41 +08:00
|
|
|
|
Value gradOutput = adaptor.getGradOutput();
|
|
|
|
|
Value input = adaptor.getSelf();
|
|
|
|
|
Value target = adaptor.getTarget();
|
|
|
|
|
Value weight = adaptor.getWeight();
|
|
|
|
|
bool weightIsNone = op.getWeight().getType().isa<Torch::NoneType>();
|
|
|
|
|
Value ignoreIndex = castIntToIndex(rewriter, loc, adaptor.getIgnoreIndex());
|
|
|
|
|
Value totalWeight = adaptor.getTotalWeight();
|
2022-04-05 01:57:49 +08:00
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto inputType = cast<RankedTensorType>(input.getType());
|
2022-04-05 01:57:49 +08:00
|
|
|
|
int inputRank = inputType.getRank();
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto gradOutputType = cast<RankedTensorType>(gradOutput.getType());
|
2022-04-05 01:57:49 +08:00
|
|
|
|
Type resultElementType = gradOutputType.getElementType();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
|
|
|
|
int64_t reduction;
|
2022-12-08 04:20:41 +08:00
|
|
|
|
if (!matchPattern(op.getReduction(), m_TorchConstantInt(&reduction)))
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return rewriter.notifyMatchFailure(op, "dim must be constant");
|
|
|
|
|
|
2022-04-05 01:57:49 +08:00
|
|
|
|
if (!hasElementType<mlir::FloatType>(gradOutput) ||
|
|
|
|
|
!hasElementType<mlir::FloatType>(gradOutput) ||
|
|
|
|
|
(!weightIsNone && !hasElementType<mlir::FloatType>(weight))) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return rewriter.notifyMatchFailure(
|
2022-04-05 01:57:49 +08:00
|
|
|
|
op, "`gradOutput`, 'weight', and `totalWeight` must be tensors of "
|
|
|
|
|
"type float");
|
|
|
|
|
}
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
2022-04-05 01:57:49 +08:00
|
|
|
|
if (!hasElementType<mlir::IntegerType>(target)) {
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return rewriter.notifyMatchFailure(
|
2022-04-05 01:57:49 +08:00
|
|
|
|
op, "`target` must be a tensor of integer type");
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto outputSize = getTensorSizes(rewriter, loc, input);
|
2022-04-05 01:57:49 +08:00
|
|
|
|
Value gradInputTensor =
|
|
|
|
|
createZeroInitTensor(rewriter, loc, outputSize, resultElementType);
|
|
|
|
|
|
|
|
|
|
auto getAffineMapForSingleElementTensor = [&](Value tensor) {
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto tensorType = cast<RankedTensorType>(tensor.getType());
|
2022-04-05 01:57:49 +08:00
|
|
|
|
SmallVector<AffineExpr> affineExprs(tensorType.getRank(),
|
|
|
|
|
rewriter.getAffineConstantExpr(0));
|
|
|
|
|
return AffineMap::get(inputRank, /*symbolCount=*/0, affineExprs,
|
|
|
|
|
op->getContext());
|
|
|
|
|
};
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
2022-04-05 01:57:49 +08:00
|
|
|
|
AffineMap gradOutMap = AffineMap::get(inputRank, /*symbolCount=*/0,
|
|
|
|
|
rewriter.getAffineDimExpr(0));
|
|
|
|
|
if (reduction != torch_upstream::Reduction::None || inputRank == 1)
|
|
|
|
|
gradOutMap = getAffineMapForSingleElementTensor(gradOutput);
|
|
|
|
|
AffineMap targetMap = AffineMap::get(inputRank, /*symbolCount=*/0,
|
|
|
|
|
rewriter.getAffineDimExpr(0));
|
|
|
|
|
if (inputRank == 1)
|
|
|
|
|
targetMap = getAffineMapForSingleElementTensor(target);
|
|
|
|
|
AffineMap totalWeightMap = getAffineMapForSingleElementTensor(totalWeight);
|
|
|
|
|
AffineMap resultMap = rewriter.getMultiDimIdentityMap(inputRank);
|
|
|
|
|
|
|
|
|
|
SmallVector<AffineMap> indexingMaps{gradOutMap, targetMap, totalWeightMap,
|
|
|
|
|
resultMap};
|
2022-11-17 06:40:36 +08:00
|
|
|
|
SmallVector<utils::IteratorType> iteratorTypes(
|
|
|
|
|
inputRank, utils::IteratorType::parallel);
|
2022-04-05 01:57:49 +08:00
|
|
|
|
|
|
|
|
|
// The code generation is equivalent to the following pseudo-code:
|
|
|
|
|
//
|
|
|
|
|
// for batch_index in len(input.size(0)):
|
|
|
|
|
// for class_index in len(input.size(1)):
|
|
|
|
|
// target_elem = target[batch_index]
|
|
|
|
|
//
|
|
|
|
|
// if reduction == None:
|
|
|
|
|
// grad_out_elem = grad_output[batchIndex]
|
|
|
|
|
// else:
|
|
|
|
|
// grad_out_elem = grad_output[0]
|
|
|
|
|
//
|
|
|
|
|
// if reduction == Mean:
|
|
|
|
|
// total_weight_elem = total_weight[0]
|
|
|
|
|
// grad_out_elem /= total_weight_elem
|
|
|
|
|
//
|
|
|
|
|
// weight_elem = weight[target_elem] if weight != None else 1
|
|
|
|
|
//
|
|
|
|
|
// if target_elem != class_index or target_elem == ignore_index:
|
|
|
|
|
// grad_input_elem = -weight_elem * grad_out_elem
|
|
|
|
|
// else:
|
|
|
|
|
// grad_input_elem = 0
|
|
|
|
|
// grad_input[batch_index, target_elem] = grad_input_elem
|
|
|
|
|
//
|
|
|
|
|
// NOTE: In the case of not batch dimension, `batch_index` essentially
|
|
|
|
|
// becomes zero.
|
|
|
|
|
Value gradInput =
|
2022-03-11 01:54:13 +08:00
|
|
|
|
rewriter
|
|
|
|
|
.create<linalg::GenericOp>(
|
2022-04-05 01:57:49 +08:00
|
|
|
|
loc, gradInputTensor.getType(),
|
|
|
|
|
ValueRange{gradOutput, target, totalWeight}, gradInputTensor,
|
|
|
|
|
indexingMaps, iteratorTypes,
|
2022-03-11 01:54:13 +08:00
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
2022-04-05 01:57:49 +08:00
|
|
|
|
Value gradOutElem = args[0];
|
|
|
|
|
Value targetElem = castIntToIndex(b, loc, args[1]);
|
|
|
|
|
Value totalWeightElem = args[2];
|
|
|
|
|
Value classIndex =
|
|
|
|
|
b.create<linalg::IndexOp>(loc, inputRank - 1);
|
|
|
|
|
|
|
|
|
|
if (reduction == torch_upstream::Reduction::Mean) {
|
|
|
|
|
gradOutElem = b.create<arith::DivFOp>(loc, gradOutElem,
|
|
|
|
|
totalWeightElem);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Value negGradOutElem =
|
|
|
|
|
b.create<arith::NegFOp>(loc, gradOutElem);
|
|
|
|
|
Value weightElem = getConstant(b, loc, 1, resultElementType);
|
|
|
|
|
if (!weightIsNone) {
|
|
|
|
|
weightElem =
|
|
|
|
|
b.create<tensor::ExtractOp>(loc, weight, targetElem);
|
|
|
|
|
}
|
|
|
|
|
Value weightedNegGradOutElem =
|
|
|
|
|
b.create<arith::MulFOp>(loc, weightElem, negGradOutElem);
|
|
|
|
|
|
|
|
|
|
Value targetNeqClassIndex = b.create<arith::CmpIOp>(
|
|
|
|
|
loc, arith::CmpIPredicate::ne, targetElem, classIndex);
|
|
|
|
|
Value targetEqIgnoreIndex = b.create<arith::CmpIOp>(
|
|
|
|
|
loc, arith::CmpIPredicate::eq, targetElem, ignoreIndex);
|
|
|
|
|
Value gradInputIsZero = b.create<arith::OrIOp>(
|
|
|
|
|
loc, targetNeqClassIndex, targetEqIgnoreIndex);
|
|
|
|
|
|
|
|
|
|
Value zero = getConstant(b, loc, 0, resultElementType);
|
|
|
|
|
Value gradInElem = b.create<arith::SelectOp>(
|
|
|
|
|
loc, gradInputIsZero, zero, weightedNegGradOutElem);
|
|
|
|
|
b.create<linalg::YieldOp>(loc, gradInElem);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
})
|
2022-04-05 01:57:49 +08:00
|
|
|
|
->getResult(0);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
|
2022-04-05 01:57:49 +08:00
|
|
|
|
RankedTensorType resultType = getTypeConverter()
|
|
|
|
|
->convertType(op->getResult(0).getType())
|
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultType, gradInput);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2022-05-06 11:30:41 +08:00
|
|
|
|
namespace {
|
|
|
|
|
class ConvertAtenDetachOp : public OpConversionPattern<AtenDetachOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(AtenDetachOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
|
|
Type resultType = getTypeConverter()->convertType(op.getType());
|
2023-09-28 20:53:02 +08:00
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultType,
|
|
|
|
|
adaptor.getSelf());
|
2022-05-06 11:30:41 +08:00
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2023-11-21 23:56:09 +08:00
|
|
|
|
namespace {
|
|
|
|
|
class ConvertPrimsSplitDimOp : public OpConversionPattern<PrimsSplitDimOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(PrimsSplitDimOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto aRankedTensorType = cast<RankedTensorType>(adaptor.getA().getType());
|
2023-11-21 23:56:09 +08:00
|
|
|
|
|
|
|
|
|
const TypeConverter *typeConverter = getTypeConverter();
|
|
|
|
|
|
|
|
|
|
auto resultRankedTensorType =
|
2024-04-28 05:00:56 +08:00
|
|
|
|
cast<RankedTensorType>(typeConverter->convertType(op.getType()));
|
2023-11-21 23:56:09 +08:00
|
|
|
|
|
|
|
|
|
// The dimension being split must be statically known.
|
|
|
|
|
|
|
|
|
|
int64_t dimInt;
|
|
|
|
|
if (!matchPattern(op.getDim(), m_TorchConstantInt(&dimInt)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
|
|
SmallVector<ReassociationIndices> associations;
|
|
|
|
|
associations.reserve(aRankedTensorType.getRank());
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < dimInt; ++i) {
|
|
|
|
|
associations.push_back(ReassociationIndices{i});
|
|
|
|
|
}
|
|
|
|
|
associations.push_back(ReassociationIndices{dimInt, dimInt + 1});
|
|
|
|
|
for (int i = dimInt + 2; i < resultRankedTensorType.getRank(); ++i) {
|
|
|
|
|
associations.push_back(ReassociationIndices{i});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto expanded = rewriter.createOrFold<tensor::ExpandShapeOp>(
|
|
|
|
|
op.getLoc(), resultRankedTensorType, adaptor.getA(), associations);
|
|
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultRankedTensorType,
|
|
|
|
|
expanded);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2023-11-16 00:34:38 +08:00
|
|
|
|
namespace {
|
|
|
|
|
class ConvertPrimsCollapseOp : public OpConversionPattern<PrimsCollapseOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(PrimsCollapseOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto aRankedTensorType = cast<RankedTensorType>(adaptor.getA().getType());
|
2023-11-16 00:34:38 +08:00
|
|
|
|
const TypeConverter *typeConverter = getTypeConverter();
|
|
|
|
|
|
|
|
|
|
auto resultRankedTensorType =
|
2024-04-28 05:00:56 +08:00
|
|
|
|
cast<RankedTensorType>(typeConverter->convertType(op.getType()));
|
2023-11-16 00:34:38 +08:00
|
|
|
|
|
|
|
|
|
// Collapse range must be statically known.
|
|
|
|
|
int64_t startInt;
|
|
|
|
|
if (!matchPattern(op.getStart(), m_TorchConstantInt(&startInt)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
|
|
int64_t endInt;
|
|
|
|
|
if (!matchPattern(op.getEnd(), m_TorchConstantInt(&endInt)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
|
|
// Upstream MLIR is overly strict -- it fails verification if the
|
|
|
|
|
// collapse_shape is the identity op (i.e. when no dimensions are
|
|
|
|
|
// collapsed). We manually fold this case here.
|
|
|
|
|
if (startInt == endInt) {
|
|
|
|
|
rewriter.replaceOp(op, adaptor.getA());
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SmallVector<ReassociationIndices> associations;
|
|
|
|
|
associations.reserve(resultRankedTensorType.getRank());
|
|
|
|
|
|
|
|
|
|
// An example of is where input shape is [3,4,5,6] and
|
|
|
|
|
// start = 1, and end = 2. The collapsed shape is then [3,4*5,6],
|
|
|
|
|
// with reassociation indices of [0], [1,2], and [3].
|
|
|
|
|
|
|
|
|
|
// Append the singleton dimensions before the collapsed dimensions.
|
|
|
|
|
for (unsigned i = 0; i < startInt; ++i) {
|
|
|
|
|
associations.push_back(ReassociationIndices{i});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Append the collapsed dimensions.
|
|
|
|
|
ReassociationIndices collapseDims(endInt + 1 - startInt);
|
|
|
|
|
std::iota(collapseDims.begin(), collapseDims.end(), startInt);
|
|
|
|
|
associations.push_back(collapseDims);
|
|
|
|
|
|
|
|
|
|
// Append the singleton dimensions after the collapsed dimensions.
|
|
|
|
|
for (int i = endInt + 1; i < aRankedTensorType.getRank(); ++i) {
|
|
|
|
|
associations.push_back(ReassociationIndices{i});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CollapseShapeOp>(
|
|
|
|
|
op, resultRankedTensorType, adaptor.getA(), associations);
|
|
|
|
|
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
namespace {
|
|
|
|
|
class ConvertTensorStaticInfoCastOp
|
|
|
|
|
: public OpConversionPattern<TensorStaticInfoCastOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(TensorStaticInfoCastOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
RankedTensorType resultType = getTypeConverter()
|
|
|
|
|
->convertType(op->getResult(0).getType())
|
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultType,
|
2022-12-08 04:20:41 +08:00
|
|
|
|
adaptor.getOperand());
|
2022-03-11 01:54:13 +08:00
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2024-01-11 22:55:42 +08:00
|
|
|
|
namespace {
|
|
|
|
|
class ConvertLogitOp : public OpConversionPattern<AtenLogitOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(AtenLogitOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
|
|
Location loc = op->getLoc();
|
|
|
|
|
Value input = adaptor.getSelf();
|
|
|
|
|
Value eps = adaptor.getEps();
|
|
|
|
|
|
|
|
|
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
|
|
bool handleEps = false;
|
|
|
|
|
if (succeeded(checkNotNone(rewriter, op, eps)))
|
|
|
|
|
handleEps = true;
|
|
|
|
|
|
|
|
|
|
if (handleEps && !eps.getType().isa<mlir::FloatType>()) {
|
|
|
|
|
op.emitError("Logit does not support non-floating point type");
|
|
|
|
|
return failure();
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto inputType = cast<RankedTensorType>(input.getType());
|
2024-01-11 22:55:42 +08:00
|
|
|
|
auto inputElementType = inputType.getElementType();
|
|
|
|
|
|
2024-04-11 21:47:35 +08:00
|
|
|
|
if (!isa<mlir::FloatType>(inputElementType)) {
|
2024-01-11 22:55:42 +08:00
|
|
|
|
op.emitError("Logit does not support non-floating point type");
|
|
|
|
|
return failure();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto inputRank = inputType.getRank();
|
|
|
|
|
|
|
|
|
|
SmallVector<AffineMap> indexingMaps = {
|
|
|
|
|
rewriter.getMultiDimIdentityMap(inputRank), // input
|
|
|
|
|
rewriter.getMultiDimIdentityMap(inputRank), // output
|
|
|
|
|
};
|
|
|
|
|
SmallVector<utils::IteratorType> iteratorTypes(
|
|
|
|
|
inputRank, utils::IteratorType::parallel);
|
|
|
|
|
Value logit =
|
|
|
|
|
rewriter
|
|
|
|
|
.create<linalg::GenericOp>(
|
|
|
|
|
loc, input.getType(),
|
|
|
|
|
/*ins=*/input,
|
|
|
|
|
/*outs=*/input,
|
|
|
|
|
/*indexingMaps=*/indexingMaps,
|
|
|
|
|
/*iteratorTypes=*/iteratorTypes,
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
|
|
|
|
Value input = args[0];
|
|
|
|
|
|
|
|
|
|
TypedAttr oneAttr = b.getFloatAttr(inputElementType, 1.0);
|
|
|
|
|
Value oneValue = b.create<arith::ConstantOp>(loc, oneAttr);
|
|
|
|
|
|
|
|
|
|
Value zI;
|
|
|
|
|
if (!handleEps) {
|
|
|
|
|
zI = input;
|
|
|
|
|
} else {
|
|
|
|
|
Value truncEps =
|
|
|
|
|
b.create<arith::TruncFOp>(loc, inputElementType, eps);
|
|
|
|
|
Value oneMinusEps =
|
|
|
|
|
b.create<arith::SubFOp>(loc, oneValue, truncEps);
|
|
|
|
|
|
|
|
|
|
Value min =
|
|
|
|
|
b.create<arith::MinimumFOp>(loc, input, oneMinusEps);
|
|
|
|
|
Value clampedInput =
|
|
|
|
|
b.create<arith::MaximumFOp>(loc, min, truncEps);
|
|
|
|
|
|
|
|
|
|
zI = clampedInput;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Value probability =
|
|
|
|
|
b.create<arith::SubFOp>(loc, oneValue, zI);
|
|
|
|
|
Value odds = b.create<arith::DivFOp>(loc, zI, probability);
|
|
|
|
|
Value result = b.create<math::LogOp>(loc, odds);
|
|
|
|
|
|
|
|
|
|
b.create<linalg::YieldOp>(loc, result);
|
|
|
|
|
})
|
|
|
|
|
.getResult(0);
|
|
|
|
|
Type newResultType = getTypeConverter()->convertType(op.getType());
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, newResultType, logit);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
2024-01-13 11:11:14 +08:00
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
class ConvertAtenIntReprOp : public OpConversionPattern<AtenIntReprOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(AtenIntReprOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
RankedTensorType resultType = getTypeConverter()
|
|
|
|
|
->convertType(op->getResult(0).getType())
|
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultType,
|
|
|
|
|
adaptor.getSelf());
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
namespace {
|
2024-01-26 08:40:21 +08:00
|
|
|
|
class ConvertDequantizePerChannel
|
|
|
|
|
: public OpConversionPattern<AtenDequantizeSelfOp> {
|
2024-01-13 11:11:14 +08:00
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
2024-01-26 08:40:21 +08:00
|
|
|
|
matchAndRewrite(AtenDequantizeSelfOp op, OpAdaptor adaptor,
|
2024-01-13 11:11:14 +08:00
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2024-01-26 08:40:21 +08:00
|
|
|
|
auto loc = op.getLoc();
|
|
|
|
|
auto qoperand = op.getOperand();
|
|
|
|
|
auto make = qoperand.getDefiningOp<Aten_MakePerChannelQuantizedTensorOp>();
|
|
|
|
|
if (!make) {
|
|
|
|
|
return rewriter.notifyMatchFailure(op, "did not find per channel qint");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto converter = getTypeConverter();
|
|
|
|
|
auto operand = make.getOperand(0);
|
|
|
|
|
auto scale = make.getScale();
|
|
|
|
|
auto zeropoint = make.getZeroPoint();
|
|
|
|
|
auto axis = make.getAxis();
|
|
|
|
|
|
|
|
|
|
IntegerAttr axisAttr;
|
|
|
|
|
if (!matchPattern(axis, m_Constant(&axisAttr))) {
|
|
|
|
|
return failure();
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto operandDTy = cast<ValueTensorType>(operand.getType()).getDtype();
|
|
|
|
|
auto zeropointDTy = cast<ValueTensorType>(zeropoint.getType()).getDtype();
|
2024-01-26 08:40:21 +08:00
|
|
|
|
operand = converter->materializeTargetConversion(
|
|
|
|
|
rewriter, loc, converter->convertType(operand.getType()), operand);
|
|
|
|
|
scale = converter->materializeTargetConversion(
|
|
|
|
|
rewriter, loc, converter->convertType(scale.getType()), scale);
|
|
|
|
|
zeropoint = converter->materializeTargetConversion(
|
|
|
|
|
rewriter, loc, converter->convertType(zeropoint.getType()), zeropoint);
|
|
|
|
|
|
|
|
|
|
auto resultType = converter->convertType(op->getResult(0).getType())
|
|
|
|
|
.cast<RankedTensorType>();
|
|
|
|
|
|
|
|
|
|
llvm::SmallVector<Value> dynSizes;
|
|
|
|
|
for (auto [index, dim] : llvm::enumerate(resultType.getShape())) {
|
|
|
|
|
if (ShapedType::isDynamic(dim)) {
|
|
|
|
|
dynSizes.push_back(rewriter.create<tensor::DimOp>(loc, operand, index));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
llvm::SmallVector<utils::IteratorType> iterators(
|
|
|
|
|
resultType.getRank(), utils::IteratorType::parallel);
|
|
|
|
|
llvm::SmallVector<AffineMap> maps(
|
|
|
|
|
4, {rewriter.getMultiDimIdentityMap(resultType.getRank())});
|
|
|
|
|
auto broadcastMap = AffineMap::get(
|
|
|
|
|
resultType.getRank(), /*symbolCount=*/0,
|
|
|
|
|
{rewriter.getAffineDimExpr(axisAttr.getInt())}, rewriter.getContext());
|
|
|
|
|
maps[1] = broadcastMap;
|
|
|
|
|
maps[2] = broadcastMap;
|
|
|
|
|
|
|
|
|
|
auto empty =
|
|
|
|
|
rewriter.create<tensor::EmptyOp>(op.getLoc(), resultType, dynSizes);
|
|
|
|
|
auto linalgOp = rewriter.create<linalg::GenericOp>(
|
|
|
|
|
loc, resultType, ValueRange{operand, scale, zeropoint},
|
|
|
|
|
ValueRange{empty}, maps, iterators,
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
|
|
|
|
Value operand = args[0];
|
|
|
|
|
Value scale = args[1];
|
|
|
|
|
Value zeropoint = args[2];
|
|
|
|
|
if (operandDTy.isUnsignedInteger(8)) {
|
|
|
|
|
operand = b.create<arith::ExtUIOp>(loc, b.getI32Type(), operand);
|
|
|
|
|
} else if (operandDTy.isSignedInteger(8)) {
|
|
|
|
|
operand = b.create<arith::ExtSIOp>(loc, b.getI32Type(), operand);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (zeropointDTy.isUnsignedInteger(8)) {
|
|
|
|
|
zeropoint =
|
|
|
|
|
b.create<arith::ExtUIOp>(loc, b.getI32Type(), zeropoint);
|
|
|
|
|
} else if (zeropointDTy.isSignedInteger(8)) {
|
|
|
|
|
zeropoint =
|
|
|
|
|
b.create<arith::ExtSIOp>(loc, b.getI32Type(), zeropoint);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Value sub = rewriter.create<arith::SubIOp>(loc, operand, zeropoint);
|
|
|
|
|
Value fp =
|
|
|
|
|
rewriter.create<arith::SIToFPOp>(loc, args[3].getType(), sub);
|
|
|
|
|
Value mul = rewriter.create<arith::MulFOp>(loc, fp, scale);
|
|
|
|
|
b.create<linalg::YieldOp>(loc, mul);
|
|
|
|
|
});
|
|
|
|
|
rewriter.replaceOp(op, linalgOp.getResults());
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
|
|
template <typename OpTy>
|
|
|
|
|
class ConvertCastEquivalentOp : public OpConversionPattern<OpTy> {
|
|
|
|
|
using OpConversionPattern<OpTy>::OpConversionPattern;
|
|
|
|
|
using OpAdaptor = typename OpTy::Adaptor;
|
|
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(OpTy op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
auto converter = this->getTypeConverter();
|
|
|
|
|
RankedTensorType resultType = cast<RankedTensorType>(
|
|
|
|
|
converter->convertType(op->getResult(0).getType()));
|
2024-01-13 11:11:14 +08:00
|
|
|
|
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultType,
|
|
|
|
|
adaptor.getSelf());
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2024-02-24 01:14:38 +08:00
|
|
|
|
namespace {
|
|
|
|
|
class ConvertAtenGridSamplerOp : public OpConversionPattern<AtenGridSamplerOp> {
|
|
|
|
|
public:
|
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(AtenGridSamplerOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
Location loc = op->getLoc();
|
|
|
|
|
Type int64type = rewriter.getI64Type();
|
|
|
|
|
Type floatType = rewriter.getF32Type();
|
|
|
|
|
Value zeroIndex = rewriter.create<arith::ConstantIndexOp>(loc, 0);
|
|
|
|
|
Value oneIndex = rewriter.create<arith::ConstantIndexOp>(loc, 1);
|
|
|
|
|
Value twoIndex = rewriter.create<arith::ConstantIndexOp>(loc, 2);
|
|
|
|
|
Value zeroFloat = rewriter.create<arith::ConstantOp>(
|
|
|
|
|
loc, rewriter.getFloatAttr(floatType, 0.0));
|
|
|
|
|
Value oneFloat = rewriter.create<arith::ConstantOp>(
|
|
|
|
|
loc, rewriter.getFloatAttr(floatType, 1.0));
|
|
|
|
|
Value twoFloat = rewriter.create<arith::ConstantOp>(
|
|
|
|
|
loc, rewriter.getFloatAttr(floatType, 2.0));
|
|
|
|
|
Value input = adaptor.getInput();
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto inputType = cast<RankedTensorType>(input.getType());
|
2024-02-24 01:14:38 +08:00
|
|
|
|
auto inputShape = inputType.getShape();
|
|
|
|
|
Value innerDim0a = rewriter.create<tensor::DimOp>(loc, input, 2);
|
|
|
|
|
Value innerDim1a = rewriter.create<tensor::DimOp>(loc, input, 3);
|
|
|
|
|
Value innerDim0b =
|
|
|
|
|
rewriter.create<arith::SubIOp>(loc, innerDim0a, oneIndex);
|
|
|
|
|
Value innerDim1b =
|
|
|
|
|
rewriter.create<arith::SubIOp>(loc, innerDim1a, oneIndex);
|
|
|
|
|
Value innerDim0c =
|
|
|
|
|
rewriter.create<arith::IndexCastOp>(loc, int64type, innerDim0b);
|
|
|
|
|
Value innerDim1c =
|
|
|
|
|
rewriter.create<arith::IndexCastOp>(loc, int64type, innerDim1b);
|
|
|
|
|
Value innerDim0d =
|
|
|
|
|
rewriter.create<arith::SIToFPOp>(loc, floatType, innerDim0c);
|
|
|
|
|
Value innerDim1d =
|
|
|
|
|
rewriter.create<arith::SIToFPOp>(loc, floatType, innerDim1c);
|
|
|
|
|
Value innerDim0e =
|
|
|
|
|
rewriter.create<arith::DivFOp>(loc, innerDim0d, twoFloat);
|
|
|
|
|
Value innerDim1e =
|
|
|
|
|
rewriter.create<arith::DivFOp>(loc, innerDim1d, twoFloat);
|
|
|
|
|
Value grid = adaptor.getGrid();
|
2024-04-28 05:00:56 +08:00
|
|
|
|
auto gridType = cast<RankedTensorType>(grid.getType());
|
2024-02-24 01:14:38 +08:00
|
|
|
|
auto gridShape = gridType.getShape();
|
|
|
|
|
auto gridRank = gridType.getRank();
|
|
|
|
|
SmallVector<Value> extractGridOffsets0(gridRank, zeroIndex);
|
|
|
|
|
SmallVector<Value> extractGridShape = getTensorSizes(rewriter, loc, grid);
|
|
|
|
|
SmallVector<Value> extractGridStride(gridRank, oneIndex);
|
|
|
|
|
int64_t lastGridDim = gridRank - 1;
|
|
|
|
|
extractGridShape[lastGridDim] = oneIndex;
|
|
|
|
|
extractGridStride[lastGridDim] = twoIndex;
|
|
|
|
|
SmallVector<Value> extractGridOffsets1(gridRank, zeroIndex);
|
|
|
|
|
extractGridOffsets1[lastGridDim] = oneIndex;
|
|
|
|
|
SmallVector<int64_t> gridShapeExtracted(gridShape);
|
|
|
|
|
gridShapeExtracted.back() = 1;
|
|
|
|
|
SmallVector<int64_t> gridShapeCollapsed{gridShape[0], gridShape[1],
|
|
|
|
|
gridShape[2]};
|
|
|
|
|
auto grid0 = rewriter.create<tensor::ExtractSliceOp>(
|
|
|
|
|
loc, grid, extractGridOffsets0, extractGridShape, extractGridStride);
|
|
|
|
|
auto grid1 = rewriter.create<tensor::ExtractSliceOp>(
|
|
|
|
|
loc, grid, extractGridOffsets1, extractGridShape, extractGridStride);
|
|
|
|
|
SmallVector<ReassociationIndices> associations{ReassociationIndices{0},
|
|
|
|
|
ReassociationIndices{1},
|
|
|
|
|
ReassociationIndices{2, 3}};
|
|
|
|
|
auto gridCollapsed0 =
|
|
|
|
|
rewriter.create<tensor::CollapseShapeOp>(loc, grid0, associations);
|
|
|
|
|
auto gridCollapsed1 =
|
|
|
|
|
rewriter.create<tensor::CollapseShapeOp>(loc, grid1, associations);
|
|
|
|
|
AffineMap gridMap = AffineMap::get(4, 0,
|
|
|
|
|
{rewriter.getAffineDimExpr(0),
|
|
|
|
|
rewriter.getAffineDimExpr(2),
|
|
|
|
|
rewriter.getAffineDimExpr(3)},
|
|
|
|
|
op->getContext());
|
|
|
|
|
SmallVector<AffineMap> gridMaps{gridMap, gridMap,
|
|
|
|
|
rewriter.getMultiDimIdentityMap(gridRank)};
|
|
|
|
|
SmallVector<utils::IteratorType> gridIterators(
|
|
|
|
|
gridRank, utils::IteratorType::parallel);
|
|
|
|
|
SmallVector<int64_t> resultShape{inputShape[0], inputShape[1], gridShape[1],
|
|
|
|
|
gridShape[2]};
|
|
|
|
|
auto lambdaExtract = [](OpBuilder &b, Location loc, Value input, Value idxA,
|
|
|
|
|
Value idxB, Value idxC, Value idxD) -> Value {
|
|
|
|
|
SmallVector<Value> index{idxA, idxB, idxC, idxD};
|
|
|
|
|
Value result = b.create<tensor::ExtractOp>(loc, input, index);
|
|
|
|
|
return result;
|
|
|
|
|
};
|
|
|
|
|
auto lambdaInter = [&](OpBuilder &b, Location loc, Value x, Value y,
|
|
|
|
|
Value d) -> Value {
|
|
|
|
|
Value dm = b.create<arith::SubFOp>(loc, oneFloat, d);
|
|
|
|
|
Value ra = b.create<arith::MulFOp>(loc, x, dm);
|
|
|
|
|
Value rb = b.create<arith::MulFOp>(loc, y, d);
|
|
|
|
|
Value res = b.create<arith::AddFOp>(loc, ra, rb);
|
|
|
|
|
return res;
|
|
|
|
|
};
|
|
|
|
|
auto resultType = getTypeConverter()
|
|
|
|
|
->convertType(op.getResult().getType())
|
|
|
|
|
.cast<RankedTensorType>();
|
2024-03-07 02:56:58 +08:00
|
|
|
|
SmallVector<Value> resultSize{};
|
|
|
|
|
if (resultType.isDynamicDim(0))
|
|
|
|
|
resultSize.push_back(rewriter.create<tensor::DimOp>(loc, input, 0));
|
|
|
|
|
if (resultType.isDynamicDim(1))
|
|
|
|
|
resultSize.push_back(rewriter.create<tensor::DimOp>(loc, input, 1));
|
|
|
|
|
if (resultType.isDynamicDim(2))
|
|
|
|
|
resultSize.push_back(rewriter.create<tensor::DimOp>(loc, grid, 1));
|
|
|
|
|
if (resultType.isDynamicDim(3))
|
|
|
|
|
resultSize.push_back(rewriter.create<tensor::DimOp>(loc, grid, 2));
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value alignCorners = adaptor.getAlignCorners();
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value resultFinal =
|
|
|
|
|
rewriter.create<tensor::EmptyOp>(loc, resultType, resultSize);
|
|
|
|
|
auto sGrid = rewriter.create<linalg::GenericOp>(
|
|
|
|
|
loc, TypeRange{resultType}, ValueRange{gridCollapsed0, gridCollapsed1},
|
|
|
|
|
ValueRange(resultFinal), gridMaps, gridIterators,
|
|
|
|
|
[&](OpBuilder &b, Location loc, ValueRange args) {
|
2024-03-07 02:56:58 +08:00
|
|
|
|
Value gr0 = args[1];
|
|
|
|
|
Value gr1 = args[0];
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value gr0Half = b.create<arith::DivFOp>(loc, gr0, twoFloat);
|
|
|
|
|
Value gr1Half = b.create<arith::DivFOp>(loc, gr1, twoFloat);
|
|
|
|
|
Value gr0HalfSelect =
|
|
|
|
|
b.create<arith::SelectOp>(loc, alignCorners, zeroFloat, gr0Half);
|
|
|
|
|
Value gr1HalfSelect =
|
|
|
|
|
b.create<arith::SelectOp>(loc, alignCorners, zeroFloat, gr1Half);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value gplus0 = b.create<arith::AddFOp>(loc, gr0, oneFloat);
|
|
|
|
|
Value gplus1 = b.create<arith::AddFOp>(loc, gr1, oneFloat);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value gPlusMul0 = b.create<arith::MulFOp>(loc, gplus0, innerDim0e);
|
|
|
|
|
Value gPlusMul1 = b.create<arith::MulFOp>(loc, gplus1, innerDim1e);
|
|
|
|
|
Value result0 =
|
|
|
|
|
b.create<arith::AddFOp>(loc, gPlusMul0, gr0HalfSelect);
|
|
|
|
|
Value result1 =
|
|
|
|
|
b.create<arith::AddFOp>(loc, gPlusMul1, gr1HalfSelect);
|
|
|
|
|
Value checkLowerBound0 = b.create<arith::CmpFOp>(
|
|
|
|
|
loc, arith::CmpFPredicate::OLT, result0, zeroFloat);
|
|
|
|
|
Value checkLowerBound1 = b.create<arith::CmpFOp>(
|
|
|
|
|
loc, arith::CmpFPredicate::OLT, result1, zeroFloat);
|
|
|
|
|
Value lowerOrig0 = b.create<arith::FPToSIOp>(loc, int64type, result0);
|
|
|
|
|
Value lowerOrig1 = b.create<arith::FPToSIOp>(loc, int64type, result1);
|
|
|
|
|
Value zeroInt =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getIntegerAttr(int64type, 0));
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value oneInt =
|
|
|
|
|
b.create<arith::ConstantOp>(loc, b.getIntegerAttr(int64type, 1));
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value lowerSub0 = b.create<arith::SubIOp>(loc, lowerOrig0, oneInt);
|
|
|
|
|
Value lowerSub1 = b.create<arith::SubIOp>(loc, lowerOrig1, oneInt);
|
|
|
|
|
Value lower0 = b.create<arith::SelectOp>(loc, checkLowerBound0,
|
|
|
|
|
lowerSub0, lowerOrig0);
|
|
|
|
|
Value lower1 = b.create<arith::SelectOp>(loc, checkLowerBound1,
|
|
|
|
|
lowerSub1, lowerOrig1);
|
|
|
|
|
Value lowerValid0 =
|
|
|
|
|
b.create<arith::SelectOp>(loc, checkLowerBound0, zeroInt, lower0);
|
|
|
|
|
Value lowerValid1 =
|
|
|
|
|
b.create<arith::SelectOp>(loc, checkLowerBound1, zeroInt, lower1);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value upper0 =
|
|
|
|
|
b.create<arith::AddIOp>(loc, int64type, lower0, oneInt);
|
|
|
|
|
Value upper1 =
|
|
|
|
|
b.create<arith::AddIOp>(loc, int64type, lower1, oneInt);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value notValidUpper0 = rewriter.create<arith::CmpIOp>(
|
2024-02-24 01:14:38 +08:00
|
|
|
|
loc, arith::CmpIPredicate::sgt, upper0, innerDim0c);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value notValidUpper1 = rewriter.create<arith::CmpIOp>(
|
2024-02-24 01:14:38 +08:00
|
|
|
|
loc, arith::CmpIPredicate::sgt, upper1, innerDim1c);
|
|
|
|
|
Value upperValid0 =
|
2024-04-18 04:38:19 +08:00
|
|
|
|
b.create<arith::SelectOp>(loc, notValidUpper0, lower0, upper0);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value upperValid1 =
|
2024-04-18 04:38:19 +08:00
|
|
|
|
b.create<arith::SelectOp>(loc, notValidUpper1, lower1, upper1);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value lw0 =
|
2024-04-18 04:38:19 +08:00
|
|
|
|
b.create<arith::IndexCastOp>(loc, b.getIndexType(), lowerValid0);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value lw1 =
|
2024-04-18 04:38:19 +08:00
|
|
|
|
b.create<arith::IndexCastOp>(loc, b.getIndexType(), lowerValid1);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value up0 =
|
|
|
|
|
b.create<arith::IndexCastOp>(loc, b.getIndexType(), upperValid0);
|
|
|
|
|
Value up1 =
|
|
|
|
|
b.create<arith::IndexCastOp>(loc, b.getIndexType(), upperValid1);
|
|
|
|
|
Value N = b.create<linalg::IndexOp>(loc, 0);
|
|
|
|
|
Value C = b.create<linalg::IndexOp>(loc, 1);
|
|
|
|
|
Value result00 = lambdaExtract(b, loc, input, N, C, lw0, lw1);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value result00a = b.create<arith::SelectOp>(loc, checkLowerBound0,
|
|
|
|
|
zeroFloat, result00);
|
|
|
|
|
Value result00b = b.create<arith::SelectOp>(loc, checkLowerBound1,
|
|
|
|
|
zeroFloat, result00a);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value result01 = lambdaExtract(b, loc, input, N, C, lw0, up1);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value result01a = b.create<arith::SelectOp>(loc, notValidUpper1,
|
|
|
|
|
zeroFloat, result01);
|
|
|
|
|
Value result01b = b.create<arith::SelectOp>(loc, checkLowerBound0,
|
|
|
|
|
zeroFloat, result01a);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value result10 = lambdaExtract(b, loc, input, N, C, up0, lw1);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value result10a = b.create<arith::SelectOp>(loc, notValidUpper0,
|
|
|
|
|
zeroFloat, result10);
|
|
|
|
|
Value result10b = b.create<arith::SelectOp>(loc, checkLowerBound1,
|
|
|
|
|
zeroFloat, result10a);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value result11 = lambdaExtract(b, loc, input, N, C, up0, up1);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value result11a = b.create<arith::SelectOp>(loc, notValidUpper0,
|
|
|
|
|
zeroFloat, result11);
|
|
|
|
|
Value result11b = b.create<arith::SelectOp>(loc, notValidUpper1,
|
|
|
|
|
zeroFloat, result11a);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value lw0a = b.create<arith::SIToFPOp>(loc, floatType, lower0);
|
|
|
|
|
Value lw1a = b.create<arith::SIToFPOp>(loc, floatType, lower1);
|
2024-03-07 02:56:58 +08:00
|
|
|
|
Value d1 = b.create<arith::SubFOp>(loc, result0, lw0a);
|
|
|
|
|
Value d0 = b.create<arith::SubFOp>(loc, result1, lw1a);
|
2024-04-18 04:38:19 +08:00
|
|
|
|
Value resultScaled0 = lambdaInter(b, loc, result00b, result01b, d0);
|
|
|
|
|
Value resultScaled1 = lambdaInter(b, loc, result10b, result11b, d0);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
Value resultScaled =
|
|
|
|
|
lambdaInter(b, loc, resultScaled0, resultScaled1, d1);
|
|
|
|
|
b.create<linalg::YieldOp>(loc, resultScaled);
|
|
|
|
|
});
|
|
|
|
|
rewriter.replaceOp(op, sGrid.getResults());
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
|
void mlir::torch::torch_to_linalg::populateUncategorizedPatternsAndLegality(
|
|
|
|
|
TypeConverter &typeConverter, RewritePatternSet &patterns,
|
|
|
|
|
ConversionTarget &target) {
|
|
|
|
|
MLIRContext *context = patterns.getContext();
|
|
|
|
|
target.addIllegalOp<
|
2024-02-14 14:28:09 +08:00
|
|
|
|
AtenTanOp, AtenTanhOp, AtenSinhOp, AtenCoshOp, AtenAtanhOp, AtenAcoshOp,
|
|
|
|
|
AtenAsinOp, AtenAsinhOp, AtenReluOp, AtenGeluOp, AtenGeluBackwardOp,
|
|
|
|
|
AtenAddTensorOp, AtenMulTensorOp, AtenDivTensorOp, AtenDivTensorModeOp,
|
2024-04-16 04:45:10 +08:00
|
|
|
|
AtenDivScalarModeOp, AtenSubTensorOp, AtenLerpTensorOp, AtenSigmoidOp,
|
|
|
|
|
AtenMinimumOp, AtenAtan2Op, AtenMaximumOp, AtenToDtypeOp, AtenClampOp,
|
|
|
|
|
AtenClampTensorOp, AtenRsubScalarOp, AtenLogOp, AtenErfOp, AtenSqrtOp,
|
|
|
|
|
AtenFloorOp, AtenCeilOp, AtenPreluOp, AtenPowScalarOp,
|
|
|
|
|
AtenPowTensorScalarOp, AtenPowTensorTensorOp, AtenLog2Op, AtenLog10Op,
|
|
|
|
|
AtenLog1pOp, AtenRsqrtOp, AtenAbsOp, AtenReciprocalOp,
|
|
|
|
|
AtenBitwiseAndTensorOp, AtenBitwiseAndScalarOp, AtenBitwiseOrTensorOp,
|
|
|
|
|
AtenBitwiseXorTensorOp, AtenBitwiseLeftShiftTensorOp,
|
|
|
|
|
AtenBitwiseRightShiftTensorOp, AtenGtScalarOp, AtenGeScalarOp,
|
|
|
|
|
AtenEqScalarOp, AtenLtScalarOp, AtenLeScalarOp, AtenWhereSelfOp,
|
|
|
|
|
AtenGtTensorOp, AtenGeTensorOp, AtenEqTensorOp, AtenNeTensorOp,
|
|
|
|
|
AtenLtTensorOp, AtenLeTensorOp, AtenThresholdOp, AtenThresholdBackwardOp,
|
|
|
|
|
AtenHardtanhBackwardOp, AtenCloneOp, AtenSinOp, AtenCosOp, AtenNeScalarOp,
|
|
|
|
|
AtenMaskedFillTensorOp, AtenLogicalOrOp, AtenLogicalAndOp, AtenAtanOp,
|
|
|
|
|
AtenAcosOp, AtenLogicalXorOp, AtenLogicalNotOp, AtenIsinfOp, AtenTriuOp,
|
|
|
|
|
AtenTrilOp, AtenRemainderScalarOp, AtenFmodTensorOp,
|
|
|
|
|
AtenRemainderTensorOp, AtenBitwiseNotOp, AtenRoundOp, AtenFillScalarOp,
|
|
|
|
|
AtenFillTensorOp, AtenRealOp, AtenImagOp, AtenDequantizeSelfOp,
|
|
|
|
|
AtenDequantizeTensorOp, AtenQuantizePerTensorOp>();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
patterns.add<ConvertElementwiseOp>(typeConverter, context);
|
|
|
|
|
target.addIllegalOp<AtenNllLossForwardOp>();
|
2022-05-06 11:30:41 +08:00
|
|
|
|
patterns.add<ConvertAtenDetachOp>(typeConverter, context);
|
|
|
|
|
target.addIllegalOp<AtenDetachOp>();
|
2022-03-11 01:54:13 +08:00
|
|
|
|
patterns.add<ConvertAtenNllLossForwardOp>(typeConverter, context);
|
|
|
|
|
target.addIllegalOp<AtenBatchNormOp>();
|
|
|
|
|
patterns.add<ConvertAtenBatchNormOp>(typeConverter, context);
|
2024-01-11 22:55:42 +08:00
|
|
|
|
target.addIllegalOp<AtenLogitOp>();
|
|
|
|
|
patterns.add<ConvertLogitOp>(typeConverter, context);
|
2023-11-16 00:34:38 +08:00
|
|
|
|
target.addIllegalOp<PrimsCollapseOp>();
|
|
|
|
|
patterns.add<ConvertPrimsCollapseOp>(typeConverter, context);
|
2023-11-21 23:56:09 +08:00
|
|
|
|
target.addIllegalOp<PrimsSplitDimOp>();
|
|
|
|
|
patterns.add<ConvertPrimsSplitDimOp>(typeConverter, context);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
target.addIllegalOp<AtenNllLossBackwardOp>();
|
|
|
|
|
patterns.add<ConvertAtenNllLossBackwardOp>(typeConverter, context);
|
|
|
|
|
patterns.add<ConvertTensorStaticInfoCastOp>(typeConverter, context);
|
|
|
|
|
target.addIllegalOp<TensorStaticInfoCastOp>();
|
2024-01-13 11:11:14 +08:00
|
|
|
|
patterns.add<ConvertAtenIntReprOp>(typeConverter, context);
|
|
|
|
|
target.addIllegalOp<AtenIntReprOp>();
|
2024-01-26 08:40:21 +08:00
|
|
|
|
patterns.add<ConvertCastEquivalentOp<Aten_MakePerChannelQuantizedTensorOp>>(
|
|
|
|
|
typeConverter, context);
|
|
|
|
|
target.addIllegalOp<Aten_MakePerChannelQuantizedTensorOp>();
|
|
|
|
|
patterns.add<ConvertCastEquivalentOp<Aten_MakePerTensorQuantizedTensorOp>>(
|
|
|
|
|
typeConverter, context);
|
2024-01-13 11:11:14 +08:00
|
|
|
|
target.addIllegalOp<Aten_MakePerTensorQuantizedTensorOp>();
|
2024-01-26 08:40:21 +08:00
|
|
|
|
patterns.add<ConvertDequantizePerChannel>(typeConverter, context);
|
2024-02-24 01:14:38 +08:00
|
|
|
|
target.addIllegalOp<AtenGridSamplerOp>();
|
|
|
|
|
patterns.add<ConvertAtenGridSamplerOp>(typeConverter, context);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
}
|