2021-10-16 06:23:59 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
2022-05-06 09:35:34 +08:00
|
|
|
#include "mlir/IR/BuiltinDialect.h"
|
Add type promotion code to refine types.
The types have different levels of categories: where
complex > floating > integral > boolean (> means left hand
side has higher category).
The operands have different levels of priorities where:
dimensioned tensor > 0-dim tensor > scalar == wrapped 0-dim tensor.
This is represented by the `ResultTypeState.dimResult`,
`ResultTypeState.zeroResult` and `ResultTypeState..wrappedResult` in
the source code.
For operands of the same priorities, the result type should be the
highest categories with sufficient width to hold all operands.
By default, only the highest priority operands participate in the type
promotion logic. Lower priority operands participate if they are in
a higher category than any higher priority operands.
For example, <[],f32> (lower priority) and <[1], si64> tensor would
result in <[?],f32> tensor because floating > integeral. Another example
<[],f64> (lower priority) and <[1], f32> tensor would result in
<[?], f32> tensor because f32 and f64 are the same category.
The ScalarType enum definition, type promotion table, ResultTypeState
struct definition and some helpers are copied from
aten/src/ATen/native/TypeProperties.*
Other references:
- https://pytorch.org/docs/stable/tensor_attributes.html#type-promotion-doc
- https://github.com/pytorch/pytorch/issues/9515
Other minor changes:
1. Fix `visitExpandLikeOp` to consider cases where the given sizes list
size is larger than the input rank.
2. Add back the somehow deleted `torch.aten.softmax.int` tests in
decompose-complex-ops.mlir.
2021-10-21 03:31:28 +08:00
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
2021-10-16 06:23:59 +08:00
|
|
|
|
2022-03-16 08:08:45 +08:00
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::torch;
|
|
|
|
using namespace mlir::torch::Torch;
|
2022-02-04 19:43:25 +08:00
|
|
|
|
2022-03-16 08:08:45 +08:00
|
|
|
int64_t Torch::toPositiveDim(int64_t dim, int64_t inputRank) {
|
2021-10-16 06:23:59 +08:00
|
|
|
return dim >= 0 ? dim : dim + inputRank;
|
|
|
|
}
|
|
|
|
|
2022-03-16 08:08:45 +08:00
|
|
|
bool Torch::isValidDim(int64_t dim, int64_t inputRank) {
|
2021-10-16 06:23:59 +08:00
|
|
|
return dim >= 0 && dim < inputRank;
|
|
|
|
}
|
|
|
|
|
2022-12-20 18:17:27 +08:00
|
|
|
std::optional<int64_t>
|
2022-03-30 04:21:47 +08:00
|
|
|
Torch::matchLegalConstantIndexIntoListOfSize(Value v, int64_t length) {
|
|
|
|
int64_t dim;
|
|
|
|
if (!matchPattern(v, m_TorchConstantInt(&dim)))
|
2022-12-14 16:06:39 +08:00
|
|
|
return std::nullopt;
|
2022-03-30 04:21:47 +08:00
|
|
|
dim = toPositiveDim(dim, length);
|
|
|
|
if (!isValidDim(dim, length))
|
2022-12-14 16:06:39 +08:00
|
|
|
return std::nullopt;
|
2022-03-30 04:21:47 +08:00
|
|
|
return dim;
|
|
|
|
}
|
|
|
|
|
2022-03-16 08:08:45 +08:00
|
|
|
bool Torch::getListConstructElements(Value v, SmallVectorImpl<Value> &elems) {
|
Add type promotion code to refine types.
The types have different levels of categories: where
complex > floating > integral > boolean (> means left hand
side has higher category).
The operands have different levels of priorities where:
dimensioned tensor > 0-dim tensor > scalar == wrapped 0-dim tensor.
This is represented by the `ResultTypeState.dimResult`,
`ResultTypeState.zeroResult` and `ResultTypeState..wrappedResult` in
the source code.
For operands of the same priorities, the result type should be the
highest categories with sufficient width to hold all operands.
By default, only the highest priority operands participate in the type
promotion logic. Lower priority operands participate if they are in
a higher category than any higher priority operands.
For example, <[],f32> (lower priority) and <[1], si64> tensor would
result in <[?],f32> tensor because floating > integeral. Another example
<[],f64> (lower priority) and <[1], f32> tensor would result in
<[?], f32> tensor because f32 and f64 are the same category.
The ScalarType enum definition, type promotion table, ResultTypeState
struct definition and some helpers are copied from
aten/src/ATen/native/TypeProperties.*
Other references:
- https://pytorch.org/docs/stable/tensor_attributes.html#type-promotion-doc
- https://github.com/pytorch/pytorch/issues/9515
Other minor changes:
1. Fix `visitExpandLikeOp` to consider cases where the given sizes list
size is larger than the input rank.
2. Add back the somehow deleted `torch.aten.softmax.int` tests in
decompose-complex-ops.mlir.
2021-10-21 03:31:28 +08:00
|
|
|
auto listConstruct = v.getDefiningOp<PrimListConstructOp>();
|
|
|
|
if (!listConstruct)
|
|
|
|
return false;
|
2022-12-08 04:20:41 +08:00
|
|
|
elems = llvm::to_vector<4>(listConstruct.getElements());
|
Add type promotion code to refine types.
The types have different levels of categories: where
complex > floating > integral > boolean (> means left hand
side has higher category).
The operands have different levels of priorities where:
dimensioned tensor > 0-dim tensor > scalar == wrapped 0-dim tensor.
This is represented by the `ResultTypeState.dimResult`,
`ResultTypeState.zeroResult` and `ResultTypeState..wrappedResult` in
the source code.
For operands of the same priorities, the result type should be the
highest categories with sufficient width to hold all operands.
By default, only the highest priority operands participate in the type
promotion logic. Lower priority operands participate if they are in
a higher category than any higher priority operands.
For example, <[],f32> (lower priority) and <[1], si64> tensor would
result in <[?],f32> tensor because floating > integeral. Another example
<[],f64> (lower priority) and <[1], f32> tensor would result in
<[?], f32> tensor because f32 and f64 are the same category.
The ScalarType enum definition, type promotion table, ResultTypeState
struct definition and some helpers are copied from
aten/src/ATen/native/TypeProperties.*
Other references:
- https://pytorch.org/docs/stable/tensor_attributes.html#type-promotion-doc
- https://github.com/pytorch/pytorch/issues/9515
Other minor changes:
1. Fix `visitExpandLikeOp` to consider cases where the given sizes list
size is larger than the input rank.
2. Add back the somehow deleted `torch.aten.softmax.int` tests in
decompose-complex-ops.mlir.
2021-10-21 03:31:28 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-03-16 08:08:45 +08:00
|
|
|
torch_upstream::ScalarType Torch::getScalarTypeForType(Type type) {
|
2022-02-04 19:43:25 +08:00
|
|
|
if (type.isa<Float32Type>())
|
2022-03-16 08:08:45 +08:00
|
|
|
return torch_upstream::ScalarType::Float;
|
2022-02-04 19:43:25 +08:00
|
|
|
if (type.isa<Float64Type>())
|
2022-03-16 08:08:45 +08:00
|
|
|
return torch_upstream::ScalarType::Double;
|
2022-02-04 19:43:25 +08:00
|
|
|
if (type.isSignedInteger(64))
|
2022-03-16 08:08:45 +08:00
|
|
|
return torch_upstream::ScalarType::Long;
|
2022-02-04 19:43:25 +08:00
|
|
|
if (type.isSignedInteger(32))
|
2022-03-16 08:08:45 +08:00
|
|
|
return torch_upstream::ScalarType::Int;
|
2022-04-14 01:28:27 +08:00
|
|
|
if (type.isSignlessInteger(1))
|
2022-03-16 08:08:45 +08:00
|
|
|
return torch_upstream::ScalarType::Bool;
|
2022-04-30 00:01:49 +08:00
|
|
|
if (type.isBF16())
|
|
|
|
return torch_upstream::ScalarType::BFloat16;
|
2022-08-08 12:37:31 +08:00
|
|
|
if (type.isF16())
|
|
|
|
return torch_upstream::ScalarType::Half;
|
2022-09-20 02:50:51 +08:00
|
|
|
if (type.isUnsignedInteger(8))
|
|
|
|
return torch_upstream::ScalarType::Byte;
|
|
|
|
if (type.isSignedInteger(8))
|
|
|
|
return torch_upstream::ScalarType::Char;
|
2022-12-16 05:40:01 +08:00
|
|
|
if (type.isa<ComplexType>()) {
|
|
|
|
mlir::Type complexElemType = type.cast<ComplexType>().getElementType();
|
|
|
|
if (complexElemType.isF32())
|
|
|
|
return torch_upstream::ScalarType::ComplexHalf;
|
|
|
|
if (complexElemType.isF64())
|
|
|
|
return torch_upstream::ScalarType::ComplexFloat;
|
|
|
|
if (complexElemType.isF128())
|
|
|
|
return torch_upstream::ScalarType::ComplexDouble;
|
|
|
|
}
|
2022-02-04 19:43:25 +08:00
|
|
|
llvm::report_fatal_error("unhandled type for getScalarTypeForType");
|
|
|
|
}
|
|
|
|
|
2022-09-23 10:24:36 +08:00
|
|
|
Type Torch::getTypeForTorchType(
|
|
|
|
MLIRContext *context, Type type,
|
|
|
|
mlir::IntegerType::SignednessSemantics signedness) {
|
|
|
|
if (type.isa<Torch::IntType>())
|
|
|
|
return IntegerType::get(context, 64, signedness);
|
|
|
|
if (type.isa<Torch::FloatType>())
|
|
|
|
return Float64Type::get(context);
|
|
|
|
llvm::report_fatal_error("unhandled type for getTypeForTorchType");
|
|
|
|
}
|
|
|
|
|
2023-01-21 02:40:13 +08:00
|
|
|
FailureOr<Type>
|
|
|
|
Torch::getTypeForScalarType(MLIRContext *context,
|
|
|
|
torch_upstream::ScalarType dtypeInt,
|
|
|
|
mlir::IntegerType::SignednessSemantics signedness) {
|
2022-03-25 00:40:21 +08:00
|
|
|
switch (dtypeInt) {
|
|
|
|
case torch_upstream::ScalarType::Float:
|
|
|
|
return Float32Type::get(context);
|
|
|
|
case torch_upstream::ScalarType::Double:
|
|
|
|
return Float64Type::get(context);
|
|
|
|
case torch_upstream::ScalarType::Long:
|
|
|
|
return IntegerType::get(context, 64, signedness);
|
|
|
|
case torch_upstream::ScalarType::Int:
|
|
|
|
return IntegerType::get(context, 32, signedness);
|
|
|
|
case torch_upstream::ScalarType::Bool:
|
|
|
|
return IntegerType::get(context, 1);
|
2022-04-30 00:01:49 +08:00
|
|
|
case torch_upstream::ScalarType::BFloat16:
|
|
|
|
return mlir::FloatType::getBF16(context);
|
2022-08-08 12:37:31 +08:00
|
|
|
case torch_upstream::ScalarType::Half:
|
|
|
|
return mlir::FloatType::getF16(context);
|
2022-09-20 02:50:51 +08:00
|
|
|
case torch_upstream::ScalarType::Byte:
|
2023-06-13 10:38:20 +08:00
|
|
|
return mlir::IntegerType::get(context, 8, mlir::IntegerType::Unsigned);
|
2022-09-20 02:50:51 +08:00
|
|
|
case torch_upstream::ScalarType::Char:
|
|
|
|
return mlir::IntegerType::get(context, 8, signedness);
|
2022-12-16 05:40:01 +08:00
|
|
|
case torch_upstream::ScalarType::ComplexHalf:
|
|
|
|
return mlir::ComplexType::get(Float32Type::get(context));
|
|
|
|
case torch_upstream::ScalarType::ComplexFloat:
|
|
|
|
return mlir::ComplexType::get(Float64Type::get(context));
|
|
|
|
case torch_upstream::ScalarType::ComplexDouble:
|
|
|
|
return mlir::ComplexType::get(Float128Type::get(context));
|
2023-01-21 02:40:13 +08:00
|
|
|
case torch_upstream::ScalarType::Undefined:
|
|
|
|
return failure();
|
2022-03-25 00:40:21 +08:00
|
|
|
default:
|
2022-12-16 00:33:14 +08:00
|
|
|
llvm::report_fatal_error("unhandled type for getTypeForScalarType");
|
2022-03-25 00:40:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-14 00:25:41 +08:00
|
|
|
FailureOr<Type>
|
|
|
|
Torch::getTorchTypeForScalarType(MLIRContext *context,
|
|
|
|
torch_upstream::ScalarType dtypeInt) {
|
2022-05-06 09:35:34 +08:00
|
|
|
switch (dtypeInt) {
|
|
|
|
case torch_upstream::ScalarType::Double:
|
|
|
|
return Torch::FloatType::get(context);
|
|
|
|
case torch_upstream::ScalarType::Long:
|
|
|
|
return Torch::IntType::get(context);
|
2023-01-21 02:40:13 +08:00
|
|
|
case torch_upstream::ScalarType::Undefined:
|
2022-05-06 09:35:34 +08:00
|
|
|
default:
|
2022-12-14 00:25:41 +08:00
|
|
|
return failure();
|
2022-05-06 09:35:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-14 00:25:41 +08:00
|
|
|
Type Torch::getDefaultDtypeForTorchScalar(Type type) {
|
|
|
|
MLIRContext *context = type.getContext();
|
|
|
|
if (type.isa<Torch::FloatType>()) {
|
|
|
|
// For now, use float32 which is the initial default dtype returned by
|
|
|
|
// `torch.get_default_dtype`.
|
|
|
|
return Float32Type::get(context);
|
|
|
|
}
|
|
|
|
if (type.isa<Torch::IntType>())
|
|
|
|
return IntegerType::get(context, 64, IntegerType::Signed);
|
|
|
|
if (type.isa<Torch::BoolType>())
|
|
|
|
return IntegerType::get(context, 1);
|
|
|
|
llvm_unreachable(
|
|
|
|
"getDefaultDtypeForTorchScalar called on an unsupported type");
|
|
|
|
}
|
|
|
|
|
|
|
|
Type Torch::getBuiltInTypeForTorchScalar(Type type) {
|
|
|
|
MLIRContext *context = type.getContext();
|
|
|
|
if (type.isa<Torch::FloatType>())
|
|
|
|
return Float64Type::get(context);
|
|
|
|
if (type.isa<Torch::IntType>())
|
|
|
|
return IntegerType::get(context, 64, IntegerType::Signed);
|
|
|
|
if (type.isa<Torch::BoolType>())
|
|
|
|
return IntegerType::get(context, 1);
|
|
|
|
llvm_unreachable(
|
|
|
|
"getBuiltInTypeForTorchScalar called on an unsupported type");
|
|
|
|
}
|
|
|
|
|
2022-03-25 00:40:21 +08:00
|
|
|
Value Torch::getDtypeIntValueForType(PatternRewriter &rewriter, Location loc,
|
2022-03-16 08:08:45 +08:00
|
|
|
Type dtype) {
|
2022-03-03 00:48:15 +08:00
|
|
|
int intType = (int)getScalarTypeForType(dtype);
|
|
|
|
return rewriter.create<ConstantIntOp>(loc,
|
|
|
|
rewriter.getI64IntegerAttr(intType));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helper to convert a tensor to a specific scalar type.
|
2022-03-16 08:08:45 +08:00
|
|
|
Value Torch::convertTensorToDtype(PatternRewriter &rewriter, Location loc,
|
|
|
|
Value input, Type dtype) {
|
2022-03-03 00:48:15 +08:00
|
|
|
BaseTensorType origType = input.getType().cast<BaseTensorType>();
|
|
|
|
Type newType = origType.getWithSizesAndDtype(origType.getSizes(), dtype);
|
|
|
|
// `convertIntVal` contains the corresponding integer for the dtype which is
|
|
|
|
// used by the aten.to.dtype op.
|
|
|
|
Value convertIntVal = getDtypeIntValueForType(rewriter, loc, dtype);
|
|
|
|
Value falseVal = rewriter.create<ConstantBoolOp>(loc, false);
|
|
|
|
Value noneVal = rewriter.create<ConstantNoneOp>(loc);
|
|
|
|
Value converted = rewriter.create<AtenToDtypeOp>(
|
|
|
|
loc, newType, input, convertIntVal, falseVal, falseVal, noneVal);
|
|
|
|
return converted;
|
|
|
|
}
|
2022-04-01 00:27:21 +08:00
|
|
|
|
2022-05-06 09:35:34 +08:00
|
|
|
bool Torch::isBuiltInType(Type type) {
|
|
|
|
return isa<BuiltinDialect>(type.getDialect());
|
|
|
|
}
|
|
|
|
|
2022-12-20 18:17:27 +08:00
|
|
|
std::optional<unsigned> Torch::getTensorRank(Value tensor) {
|
2022-04-01 00:27:21 +08:00
|
|
|
BaseTensorType tensorType = tensor.getType().cast<BaseTensorType>();
|
2022-12-13 00:56:28 +08:00
|
|
|
if (!tensorType.hasSizes())
|
2022-12-14 16:06:39 +08:00
|
|
|
return std::nullopt;
|
2022-12-13 00:56:28 +08:00
|
|
|
return tensorType.getSizes().size();
|
2022-04-01 00:27:21 +08:00
|
|
|
}
|
2022-09-30 00:40:56 +08:00
|
|
|
|
|
|
|
bool Torch::isViewLikeOp(Operation *op) {
|
|
|
|
// AtenContiguousOp might return a view, so this is conservatively
|
|
|
|
// correct. We could potentially be more precise and identify the cases
|
|
|
|
// that it does not return a view and treat those as having value
|
|
|
|
// semantics.
|
|
|
|
return isa<AtenBroadcastToOp, AtenContiguousOp, AtenDetachOp, AtenExpandAsOp,
|
|
|
|
AtenExpandOp, AtenFlattenUsingIntsOp, AtenPermuteOp, AtenReshapeOp,
|
|
|
|
Aten_ReshapeAliasOp, AtenSelectIntOp, AtenSliceTensorOp,
|
|
|
|
AtenSqueezeDimOp, AtenSqueezeOp, AtenTOp, AtenToDtypeOp,
|
|
|
|
AtenTransposeIntOp, AtenUnsqueezeOp, AtenViewOp,
|
|
|
|
TensorStaticInfoCastOp, AtenToDtypeLayoutOp, AtenNumpyTOp,
|
2023-07-20 16:46:44 +08:00
|
|
|
AtenNarrowOp, AtenNarrowTensorOp, AtenToDeviceOp, PrimsSqueezeOp,
|
|
|
|
AtenMovedimIntOp, PrimsViewOfOp, AtenRealOp, AtenImagOp,
|
|
|
|
AtenViewAsComplexOp>(op);
|
2022-09-30 00:40:56 +08:00
|
|
|
}
|
2022-10-04 21:05:59 +08:00
|
|
|
|
|
|
|
Value Torch::getConstantWithGivenDtypeAndValue(PatternRewriter &rewriter,
|
|
|
|
Location loc, float value,
|
|
|
|
Type dtype) {
|
|
|
|
// Creating constants satisfying backend contract.
|
|
|
|
if (dtype.isInteger(64) || dtype.isInteger(32) || dtype.isInteger(8) ||
|
|
|
|
dtype.isInteger(1))
|
|
|
|
return rewriter.create<ConstantIntOp>(
|
|
|
|
loc, rewriter.getI64IntegerAttr((int64_t)value));
|
|
|
|
if (dtype.isF64() || dtype.isF32() || dtype.isF16() || dtype.isBF16())
|
|
|
|
return rewriter.create<ConstantFloatOp>(loc,
|
|
|
|
rewriter.getF64FloatAttr(value));
|
|
|
|
llvm::report_fatal_error(
|
|
|
|
"unhandled type for getConstantWithGivenDtypeAndValue");
|
|
|
|
}
|
2022-12-09 01:49:54 +08:00
|
|
|
|
|
|
|
// Return the number of elements of a tensor if the shape is static; otherwise,
|
|
|
|
// return -1.
|
|
|
|
int64_t Torch::getNumberOfElements(RankedTensorType inputType) {
|
|
|
|
if (!inputType.hasStaticShape())
|
|
|
|
return -1;
|
|
|
|
SmallVector<int64_t> inputShape =
|
|
|
|
makeShapeTorchCompatible(inputType.getShape());
|
|
|
|
int64_t numel = 1;
|
|
|
|
for (int64_t i = 0; i < inputType.getRank(); i++)
|
|
|
|
numel *= inputShape[i];
|
|
|
|
return numel;
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> Torch::makeShapeLLVMCompatible(ArrayRef<int64_t> shape) {
|
|
|
|
SmallVector<int64_t> updatedShape(shape);
|
|
|
|
int64_t kDynamic = ShapedType::kDynamic;
|
|
|
|
for (unsigned i = 0; i < shape.size(); i++) {
|
|
|
|
assert(shape[i] >= 0 || shape[i] == kUnknownSize);
|
|
|
|
if (shape[i] == kUnknownSize)
|
|
|
|
updatedShape[i] = kDynamic;
|
|
|
|
}
|
|
|
|
return updatedShape;
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> Torch::makeShapeTorchCompatible(ArrayRef<int64_t> shape) {
|
|
|
|
SmallVector<int64_t> updatedShape(shape);
|
|
|
|
int64_t kDynamic = ShapedType::kDynamic;
|
|
|
|
for (unsigned i = 0; i < shape.size(); i++) {
|
|
|
|
assert(shape[i] >= 0 || shape[i] == kDynamic);
|
|
|
|
if (shape[i] == kDynamic)
|
|
|
|
updatedShape[i] = kUnknownSize;
|
|
|
|
}
|
|
|
|
return updatedShape;
|
|
|
|
}
|
2022-11-16 13:57:58 +08:00
|
|
|
|
|
|
|
// Helper function to squeeze the input tensor at given dim.
|
|
|
|
// Return the squeezed tensor or failure.
|
|
|
|
FailureOr<Value> Torch::squeezeTensor(PatternRewriter &rewriter, Operation *op,
|
|
|
|
Location loc, int64_t dim, Value input) {
|
|
|
|
BaseTensorType inputType = input.getType().cast<BaseTensorType>();
|
|
|
|
if (!inputType.hasSizes()) {
|
|
|
|
return rewriter.notifyMatchFailure(loc, "input tensor must have size");
|
|
|
|
}
|
|
|
|
SmallVector<int64_t> inputShape{inputType.getSizes()};
|
|
|
|
unsigned inputRank = inputShape.size();
|
|
|
|
dim = toPositiveDim(dim, inputRank);
|
|
|
|
if (!isValidDim(dim, inputRank)) {
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "dimension to be squeezed is an invalid dim");
|
|
|
|
}
|
|
|
|
inputShape.erase(inputShape.begin() + dim);
|
|
|
|
Type squeezedType =
|
|
|
|
inputType.getWithSizesAndDtype(inputShape, inputType.getOptionalDtype());
|
|
|
|
|
|
|
|
Value cstDim = rewriter.create<Torch::ConstantIntOp>(
|
|
|
|
loc, rewriter.getI64IntegerAttr(dim));
|
|
|
|
// Adding a check to verify if the dimension to be squeezed has size 1 or not.
|
|
|
|
Value cstOne =
|
|
|
|
rewriter.create<Torch::ConstantIntOp>(loc, rewriter.getI64IntegerAttr(1));
|
|
|
|
Value dimSize = rewriter.create<AtenSizeIntOp>(loc, input, cstDim);
|
|
|
|
Value cmp = rewriter.create<Torch::AtenEqIntOp>(loc, dimSize, cstOne);
|
|
|
|
rewriter.create<Torch::RuntimeAssertOp>(
|
|
|
|
loc, cmp,
|
|
|
|
"squeeze operation possible for dim only when input_shape[dim] == 1.");
|
|
|
|
|
|
|
|
Value result =
|
|
|
|
rewriter.create<AtenSqueezeDimOp>(loc, squeezedType, input, cstDim);
|
|
|
|
return result;
|
|
|
|
}
|
2022-12-08 13:46:54 +08:00
|
|
|
|
|
|
|
// Helper function to unsqueeze the input tensor at given dim.
|
|
|
|
// Return the unsqueezed tensor or failure.
|
|
|
|
FailureOr<Value> Torch::unsqueezeTensor(PatternRewriter &rewriter,
|
|
|
|
Operation *op, Value input, Value dim) {
|
|
|
|
BaseTensorType inputType = input.getType().cast<BaseTensorType>();
|
|
|
|
if (!inputType.hasSizes()) {
|
|
|
|
return rewriter.notifyMatchFailure(op, "input tensor must have size");
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t> unsqueezedShape;
|
|
|
|
ArrayRef<int64_t> inputShape = inputType.getSizes();
|
|
|
|
// `input` has a reduced rank. Hence add 1.
|
|
|
|
int64_t unsqueezedRank = inputShape.size() + 1;
|
|
|
|
int64_t dimInt = 0;
|
|
|
|
if (matchPattern(dim, m_TorchConstantInt(&dimInt))) {
|
|
|
|
dimInt = toPositiveDim(dimInt, unsqueezedRank);
|
|
|
|
if (!isValidDim(dimInt, unsqueezedRank)) {
|
|
|
|
return rewriter.notifyMatchFailure(op, "dim is not a valid dim");
|
|
|
|
}
|
|
|
|
unsqueezedShape.append(inputShape.begin(), inputShape.end());
|
|
|
|
unsqueezedShape.insert(unsqueezedShape.begin() + dimInt, 1);
|
|
|
|
} else {
|
|
|
|
unsqueezedShape.resize(unsqueezedRank, kUnknownSize);
|
|
|
|
}
|
|
|
|
Type unsqueezedType = inputType.getWithSizesAndDtype(
|
|
|
|
unsqueezedShape, inputType.getOptionalDtype());
|
|
|
|
Value unsqueezed = rewriter.create<AtenUnsqueezeOp>(
|
|
|
|
op->getLoc(), unsqueezedType, input, dim);
|
|
|
|
return unsqueezed;
|
|
|
|
}
|