[Torch Op] Add unbind.int support with ListUnpack (#2058)

* add unbind int

* reformat

* use unpack canonicalize

* address comments

* Empty commit, trigger test

* add ltc blacklist

* clean up

* address comments

* check permute list

* erase in recompose

---------

Co-authored-by: zhekun.zhang <zhekun.zhang@bytedance.com>
pull/2134/head snapshot-20230519.843
Zhekun Zhang 2023-05-18 19:07:58 -07:00 committed by GitHub
parent 1333674905
commit aa97c8383e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 139 additions and 1 deletions

View File

@ -7,6 +7,9 @@ blacklist:
- index_put # Error: TODO not sure if there are other valid types to handle here
- index_put_ # Error: TODO not sure if there are other valid types to handle here
# Ops with list of tensors output
- unbind.int
# Additional ops which autogen is supported for but don't compile yet
- _convolution
- detach

View File

@ -259,6 +259,10 @@ TORCHDYNAMO_XFAIL_SET = {
"AtenComplexImagModule_basic",
"AtenComplexRealModule_basic",
# END tests failing due to: complex floating point ops
# ERROR: Exception: Unsupported: return type List[Tensor] in schema for aten.unbind.int
"UnbindIntListUnpack_Module_basic",
"UnbindIntGetItem_Module_basic",
}
TORCHDYNAMO_CRASHING_SET = {
@ -722,6 +726,8 @@ STABLEHLO_PASS_SET = {
"PrimsViewOfModule_basic",
"PrimsViewOfZeroRankModule_basic",
"AtenComplex64Module_basic",
"UnbindIntListUnpack_Module_basic",
"UnbindIntGetItem_Module_basic",
}
# Write the TOSA set as a "passing" set as it is very early in development
@ -1001,6 +1007,8 @@ TOSA_PASS_SET = {
"PrimsViewOfModule_basic",
"PrimsViewOfZeroRankModule_basic",
"DetachModule_basic",
"UnbindIntListUnpack_Module_basic",
"UnbindIntGetItem_Module_basic",
"TensorsConcatStaticModule_basic",
"TensorsConcatNegativeDimStaticModule_basic",
"AtenComplex64Module_basic",
@ -1182,5 +1190,7 @@ LTC_XFAIL_SET = {
"VarMeanDimBiasedModule_basic",
"AtenComplexImagModule_basic",
"AtenComplexRealModule_basic",
"AtenComplexViewModule_basic"
"AtenComplexViewModule_basic",
"UnbindIntListUnpack_Module_basic",
"UnbindIntGetItem_Module_basic",
}

View File

@ -9521,6 +9521,29 @@ def Torch_AtenSortOp : Torch_Op<"aten.sort", [
}];
}
def Torch_AtenUnbindIntOp : Torch_Op<"aten.unbind.int", [
AllowsTypeRefinement,
ReadOnly
]> {
let summary = "Generated op for `aten::unbind.int : (Tensor, int) -> (Tensor[])`";
let arguments = (ins
AnyTorchTensorType:$self,
Torch_IntType:$dim
);
let results = (outs
AnyTorchListOfTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenUnbindIntOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void AtenUnbindIntOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
}
def Torch_AtenAddStrOp : Torch_Op<"aten.add.str", [
AllowsTypeRefinement,
HasValueSemantics,

View File

@ -121,6 +121,66 @@ public:
return success();
}
};
class RecomposeUnbindListUnpack : public OpRewritePattern<PrimListUnpackOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(PrimListUnpackOp op,
PatternRewriter &rewriter) const override {
// recompose AtenUnbindOp + PrimListUnpackOp to select.int
auto unbind = dyn_cast<AtenUnbindIntOp>(op.getOperand().getDefiningOp());
if (!unbind)
return failure();
if (isListPotentiallyMutated(unbind.getResult()))
return failure();
Value dim = unbind.getDim();
Value input = unbind.getSelf();
SmallVector<Value> slices;
for (int i = 0; i < op.getNumResults(); i++) {
// rewrite to slice op
auto resultTy = op.getResult(i).getType();
auto index = rewriter.create<Torch::ConstantIntOp>(
op->getLoc(), rewriter.getI64IntegerAttr(i));
auto newSelect = rewriter.create<AtenSelectIntOp>(op->getLoc(), resultTy,
input, dim, index);
slices.push_back(newSelect);
}
rewriter.replaceOp(op, slices);
if (unbind.getResult().use_empty())
rewriter.eraseOp(unbind);
return success();
}
};
class RecomposeUnbindGetItem : public OpRewritePattern<Aten__Getitem__TOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(Aten__Getitem__TOp op,
PatternRewriter &rewriter) const override {
// recompose AtenUnbindIntOp + __getitem__t to select.int
auto unbind = dyn_cast<AtenUnbindIntOp>(op.getList().getDefiningOp());
if (!unbind)
return failure();
if (isListPotentiallyMutated(unbind.getResult()))
return failure();
int64_t index;
if (!matchPattern(op.getIdx(), m_TorchConstantInt(&index)))
return rewriter.notifyMatchFailure(
op, "Expected `idx` of `Aten__Getitem__TOp` to be a constant int");
Location loc = op.getLoc();
Value dim = unbind.getDim();
Value input = unbind.getSelf();
// rewrite to slice op
auto resultTy = op.getResult().getType();
Value newSelect = rewriter.create<AtenSelectIntOp>(loc, resultTy, input,
dim, op.getIdx());
rewriter.replaceOp(op, newSelect);
if (unbind.getResult().use_empty())
rewriter.eraseOp(unbind);
return success();
}
};
} // namespace
namespace {
@ -134,6 +194,8 @@ public:
// pattern.add calls go here
patterns.add<RecomposeSliceCopy_>(context);
patterns.add<RecomposeSelectFill_>(context);
patterns.add<RecomposeUnbindListUnpack>(context);
patterns.add<RecomposeUnbindGetItem>(context);
GreedyRewriteConfig config;
config.useTopDownTraversal = true;

View File

@ -589,6 +589,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit("aten::any.bool : (bool[]) -> (bool)")
emit("aten::sort.int : (int[], bool) -> ()", has_canonicalizer=True)
emit("aten::sort : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::unbind.int : (Tensor, int) -> (Tensor[])")
# Str ops.
emit("aten::add.str : (str, str) -> (str)")

View File

@ -542,3 +542,42 @@ class SliceCopyNegative_Module(torch.nn.Module):
@register_test_case(module_factory=lambda: SliceCopyNegative_Module())
def SliceCopyNegative_Module_basic(module, tu: TestUtils):
module.forward(tu.rand(10, 4, 4), tu.rand(4, 4, 4))
# ==============================================================================
class UnbindIntListUnpack_Module(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([2, 3, 4], torch.float32, True),
])
def forward(self, x):
unbind_0, unbind_1 = torch.unbind(x, 0)
return torch.ops.aten.sub(unbind_0, unbind_1)
@register_test_case(module_factory=lambda: UnbindIntListUnpack_Module())
def UnbindIntListUnpack_Module_basic(module, tu: TestUtils):
module.forward(tu.rand(2, 3, 4))
# ==============================================================================
class UnbindIntGetItem_Module(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([2, 3, 4], torch.float32, True),
])
def forward(self, x):
unbind = torch.unbind(x, 0)
return torch.ops.aten.sub(unbind[0], unbind[1])
@register_test_case(module_factory=lambda: UnbindIntGetItem_Module())
def UnbindIntGetItem_Module_basic(module, tu: TestUtils):
module.forward(tu.rand(2, 3, 4))