Implement lowering of aten.atleast_2d (#3546)

This operator is needed to implement aten.vstack, which will be
submitted in a subsequent PR
pull/2151/merge
pkapris-syrmia 2024-08-14 15:22:31 +02:00 committed by GitHub
parent da877a781e
commit 23ec5399e5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 185 additions and 0 deletions

View File

@ -10277,6 +10277,29 @@ def Torch_AtenAtleast1dOp : Torch_Op<"aten.atleast_1d", [
}];
}
def Torch_AtenAtleast2dOp : Torch_Op<"aten.atleast_2d", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::atleast_2d : (Tensor) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenAtleast2dOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 1);
}
void AtenAtleast2dOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 1);
}
}];
}
def Torch_AtenEinsumOp : Torch_Op<"aten.einsum", [
AllowsTypeRefinement,
HasValueSemantics,

View File

@ -10546,6 +10546,28 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" }\n"
" return %2 : !torch.list<int>\n"
" }\n"
" func.func @\"__torch_mlir_shape_fn.aten.atleast_2d\"(%arg0: !torch.list<int>) -> !torch.list<int> {\n"
" %int0 = torch.constant.int 0\n"
" %int1 = torch.constant.int 1\n"
" %0 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int\n"
" %1 = torch.aten.eq.int %0, %int0 : !torch.int, !torch.int -> !torch.bool\n"
" %2 = torch.prim.If %1 -> (!torch.list<int>) {\n"
" %3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>\n"
" torch.prim.If.yield %3 : !torch.list<int>\n"
" } else {\n"
" %3 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int\n"
" %4 = torch.aten.eq.int %3, %int1 : !torch.int, !torch.int -> !torch.bool\n"
" %5 = torch.prim.If %4 -> (!torch.list<int>) {\n"
" %6 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int\n"
" %7 = torch.prim.ListConstruct %int1, %6 : (!torch.int, !torch.int) -> !torch.list<int>\n"
" torch.prim.If.yield %7 : !torch.list<int>\n"
" } else {\n"
" torch.prim.If.yield %arg0 : !torch.list<int>\n"
" }\n"
" torch.prim.If.yield %5 : !torch.list<int>\n"
" }\n"
" return %2 : !torch.list<int>\n"
" }\n"
" func.func @\"__torch_mlir_shape_fn.aten.stack\"(%arg0: !torch.list<list<int>>, %arg1: !torch.int) -> !torch.list<int> {\n"
" %0 = call @__torch__.torch.jit._shape_functions.stack(%arg0, %arg1) : (!torch.list<list<int>>, !torch.int) -> !torch.list<int>\n"
" return %0 : !torch.list<int>\n"
@ -15044,6 +15066,10 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
" return %0#1 : !torch.int\n"
" }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.atleast_2d\"(%arg0: !torch.tuple<int, int>) -> !torch.int {\n"
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
" return %0#1 : !torch.int\n"
" }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.einsum\"(%arg0: !torch.str, %arg1: !torch.list<tuple<int, int>>, %arg2: !torch.optional<list<int>>) -> !torch.int {\n"
" %true = torch.constant.bool true\n"
" %none = torch.constant.none\n"

View File

@ -1799,6 +1799,53 @@ public:
};
} // namespace
namespace {
// Decompose aten.atleast_2d into: aten.reshape. See
// https://github.com/pytorch/pytorch/blob/9a8ab778d34bd24c5caceb340837483decc4c311/torch/_refs/__init__.py#L2604
// def atleast_2d(
// arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args:
// TensorLikeType
// ) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]:
// """Reference implementation of :func:`torch.atleast_2d`."""
// if not args and isinstance(arg, collections.abc.Sequence):
// args_ = arg
// else:
// assert not isinstance(arg, collections.abc.Sequence)
// args_ = (arg,) + args
// unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0)
// res = tuple(a if a.ndim >= 2 else unsqueeze_atleast_1d(a) for a in args_)
// return res if len(res) > 1 else res[0]
class DecomposeAtenAtleast2dOp : public OpRewritePattern<AtenAtleast2dOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(AtenAtleast2dOp op,
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value input = op.getSelf();
Type opType = op.getType();
auto inputType = cast<BaseTensorType>(input.getType());
SmallVector<int64_t> inputShape(inputType.getSizes());
if (inputShape.size() >= 2) {
rewriter.replaceOp(op, input);
return success();
}
auto atleast1dResShape =
inputShape.empty() ? SmallVector<int64_t, 1>{1} : inputShape;
auto atleast1dResType = rewriter.getType<ValueTensorType>(
atleast1dResShape, inputType.getOptionalDtype());
auto atleast1dRes =
rewriter.create<AtenAtleast1dOp>(loc, atleast1dResType, input);
Value zero = rewriter.create<Torch::ConstantIntOp>(
loc, rewriter.getI64IntegerAttr(0));
rewriter.replaceOpWithNewOp<AtenUnsqueezeOp>(op, opType, atleast1dRes,
zero);
return success();
}
};
} // namespace
namespace {
// Decompose AtenEinsumOp to AtenMatmulOp, and supports possible reduce
// operation and permute operation. Currently, this pass doesn't support
@ -9429,6 +9476,7 @@ public:
addPatternIfTargetOpIsIllegal<DecomposeAtenRreluOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenCeluOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenAtleast1dOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenAtleast2dOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenEinsumOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenTraceOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenHardswishOp>(patterns);

View File

@ -396,6 +396,7 @@ static void markDecomposedOpsAsIllegal(MLIRContext *context,
target.addIllegalOp<Aten_SoftmaxBackwardDataOp>();
target.addIllegalOp<AtenTanhBackwardOp>();
target.addIllegalOp<AtenAtleast1dOp>();
target.addIllegalOp<AtenAtleast2dOp>();
target.addIllegalOp<AtenEinsumOp>();
target.addIllegalOp<AtenTraceOp>();
target.addIllegalOp<AtenAddmmOp>();

View File

@ -879,6 +879,9 @@ STABLEHLO_PASS_SET = {
"TypeConversionUint8ToF32Module_basic",
"Atleast1dModule0dInput_basic",
"Atleast1dModule1dInput_basic",
"Atleast2dModule0dInput_basic",
"Atleast2dModule1dInput_basic",
"Atleast2dModule2dInput_basic",
"AtenLinear1D_basic",
"AtenLinear2D_basic",
"AtenLinear3DBias_basic",
@ -1576,6 +1579,9 @@ TOSA_PASS_SET = {
"TensorSplitSections_ListUnpackModule_basic",
"Atleast1dModule0dInput_basic",
"Atleast1dModule1dInput_basic",
"Atleast2dModule0dInput_basic",
"Atleast2dModule1dInput_basic",
"Atleast2dModule2dInput_basic",
"AtenLinear2D_basic",
"AtenLinear3DBias_basic",
"ElementwiseAddScalar_NumToTensorFloat_Module_basic",
@ -2075,6 +2081,9 @@ MAKE_FX_TOSA_PASS_SET = (
"AtenLinearVecMatBias_basic",
"Atleast1dModule0dInput_basic",
"Atleast1dModule1dInput_basic",
"Atleast2dModule0dInput_basic",
"Atleast2dModule1dInput_basic",
"Atleast2dModule2dInput_basic",
"MaxPool1dEmptyStrideStaticModule_basic",
"MaxPool1dStaticCeilModeTrueModule_basic",
"MaxPool1dStaticModule_basic",

View File

@ -2121,6 +2121,15 @@ def atenatleast_1d〡shape(self: List[int]) -> List[int]:
else:
return self
def atenatleast_2d〡shape(self: List[int]) -> List[int]:
if len(self) == 0:
return [1, 1]
elif len(self) == 1:
x = self[0]
return [1, x]
else:
return self
def atenstack〡shape(tensors: List[List[int]], dim: int = 0) -> List[int]:
return upstream_shape_functions.stack(tensors, dim)
@ -5265,6 +5274,11 @@ def atenatleast_1d〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
self_rank, self_dtype = self_rank_dtype
return self_dtype
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1))
def atenatleast_2d〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
self_rank, self_dtype = self_rank_dtype
return self_dtype
@check_dtype_function(
[Invocation("i,j->ij", [TensorOfShape(1, dtype=torch.float32),
TensorOfShape(1, dtype=torch.int32)]),])

View File

@ -795,6 +795,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit("aten::argmin : (Tensor, int?, bool) -> (Tensor)")
emit("aten::one_hot : (Tensor, int) -> (Tensor)")
emit("aten::atleast_1d : (Tensor) -> (Tensor)")
emit("aten::atleast_2d : (Tensor) -> (Tensor)")
emit("aten::einsum : (str, Tensor[], int[]?) -> (Tensor)")
emit("aten::trace : (Tensor) -> (Tensor)")
emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)")

View File

@ -1525,3 +1525,66 @@ class Atleast1dModule1dInput(torch.nn.Module):
@register_test_case(module_factory=lambda: Atleast1dModule1dInput())
def Atleast1dModule1dInput_basic(module, tu: TestUtils):
module.forward(tu.rand(4))
# ==============================================================================
class Atleast2dModule0dInput(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args(
[
None,
([], torch.float32, True),
]
)
def forward(self, x):
return torch.ops.aten.atleast_2d(x)
@register_test_case(module_factory=lambda: Atleast2dModule0dInput())
def Atleast2dModule0dInput_basic(module, tu: TestUtils):
module.forward(tu.rand())
class Atleast2dModule1dInput(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args(
[
None,
([4], torch.float32, True),
]
)
def forward(self, x):
return torch.ops.aten.atleast_2d(x)
@register_test_case(module_factory=lambda: Atleast2dModule1dInput())
def Atleast2dModule1dInput_basic(module, tu: TestUtils):
module.forward(tu.rand(4))
class Atleast2dModule2dInput(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args(
[
None,
([4, 4], torch.float32, True),
]
)
def forward(self, x):
return torch.ops.aten.atleast_2d(x)
@register_test_case(module_factory=lambda: Atleast2dModule2dInput())
def Atleast2dModule2dInput_basic(module, tu: TestUtils):
module.forward(tu.rand(4, 4))