[MLIR][TORCH] Add e2e support for `aten.amax` op

-- This commit adds e2e support for `atend.amax` op.

Signed-off-by: Abhishek Varma <abhishek@nod-labs.com>
pull/1633/head
Abhishek Varma 2022-11-22 18:37:28 +00:00 committed by Vivek Khandelwal
parent 2c643adcb9
commit c27c1791f1
8 changed files with 162 additions and 2 deletions

View File

@ -524,6 +524,7 @@ TOSA_PASS_SET = {
"SquareModule_basic", "SquareModule_basic",
"MaxPool2dStaticModule_basic", "MaxPool2dStaticModule_basic",
"ResNet18StaticModule_basic", "ResNet18StaticModule_basic",
"ReduceAmaxKeepDim_basic",
"NativeLayerNormModule4D_basic", "NativeLayerNormModule4D_basic",
"LayerNormNormalizeOverAllDimsModule_basic", "LayerNormNormalizeOverAllDimsModule_basic",
"PermuteModule_basic", "PermuteModule_basic",

View File

@ -6756,6 +6756,31 @@ def Torch_AtenMaxDimOp : Torch_Op<"aten.max.dim", [
}]; }];
} }
def Torch_AtenAmaxOp : Torch_Op<"aten.amax", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::amax : (Tensor, int[], bool) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchListOfTorchIntType:$dim,
Torch_BoolType:$keepdim
);
let results = (outs
AnyTorchTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenAmaxOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 3, 1);
}
void AtenAmaxOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 3, 1);
}
}];
}
def Torch_AtenToDtypeOp : Torch_Op<"aten.to.dtype", [ def Torch_AtenToDtypeOp : Torch_Op<"aten.to.dtype", [
AllowsTypeRefinement, AllowsTypeRefinement,
ReadOnly ReadOnly

View File

@ -167,6 +167,55 @@ static Value createSoftmaxBackwardCommonKernel(PatternRewriter &rewriter,
return sub; return sub;
} }
namespace {
/// We decompose aten.amax into a set of aten.max.dim op(s) depending on the
/// number of dimensions across which the max needs to be computed.
/// Eg:
/// INPUT:
/// final_output = aten.amax(initial_input, dim=(0, 2, 1), keepdim=False)
///
/// OUTPUT:
/// input_1 = aten.max.dim(initial_input, 2, keepdim) #1
/// input_2 = aten.max.dim(input_1, 1, keepdim) #2
/// final_output = aten.max.dim(input_2, 0, keepdim) #3
///
/// NOTE: We iterate over, in reverse order, every dimension included in `dim`
/// of the `aten.amax` op and create an `aten.amax.dim` op.
/// Input tensor to the next `aten.amax.dim` op is thus the output of the
/// previous `aten.amax.dim` op.
class DecomposeAtenAmaxOp : public OpRewritePattern<AtenAmaxOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(AtenAmaxOp op,
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
SmallVector<int64_t, 4> dims;
if (!matchPattern(op.dim(), m_TorchListOfConstantInts(dims)))
return rewriter.notifyMatchFailure(op,
"non-const dim parameter unsupported");
bool keepDim;
if (!matchPattern(op.keepdim(), m_TorchConstantBool(&keepDim)))
return rewriter.notifyMatchFailure(
op, "Expected a constant boolean value for keepDim");
Value input = op.self();
std::sort(dims.begin(), dims.end());
// For every dimension included in `dim` of the op, iterated over in
// reverse order, we create a call to aten.max.dim.
for (int64_t i = dims.size() - 1; i >= 0; i--) {
Value dim = rewriter.create<Torch::ConstantIntOp>(
loc, rewriter.getI64IntegerAttr(dims[i]));
// The input to the next invocation of aten.max.dim is the output of the
// previous aten.max.dim op.
input = createMaxAlongDimension(rewriter, loc, op, input, dim, keepDim);
}
rewriter.replaceOp(op, input);
return success();
}
};
} // end namespace
namespace { namespace {
class DecomposeAtenSizeOp : public OpRewritePattern<AtenSizeOp> { class DecomposeAtenSizeOp : public OpRewritePattern<AtenSizeOp> {
public: public:
@ -3364,6 +3413,8 @@ public:
target.addIllegalOp<AtenSelectScatterOp>(); target.addIllegalOp<AtenSelectScatterOp>();
patterns.add<DecomposeAtenVarDimOp>(context); patterns.add<DecomposeAtenVarDimOp>(context);
target.addIllegalOp<AtenVarDimOp>(); target.addIllegalOp<AtenVarDimOp>();
patterns.add<DecomposeAtenAmaxOp>(context);
target.addIllegalOp<AtenAmaxOp>();
patterns.add<DecomposeAtenVarCorrectionOp>(context); patterns.add<DecomposeAtenVarCorrectionOp>(context);
target.addIllegalOp<AtenVarCorrectionOp>(); target.addIllegalOp<AtenVarCorrectionOp>();
patterns.add<DecomposeAtenStdDimOp>(context); patterns.add<DecomposeAtenStdDimOp>(context);

View File

@ -1006,9 +1006,9 @@ void TypeAnalysis::visitOperation(Operation *op,
getDtypeOrDefault(mean.getContext(), mean.dtype(), defaultDtype); getDtypeOrDefault(mean.getContext(), mean.dtype(), defaultDtype);
visitReductionAlongAllDimsOp(mean, dtype, operands); visitReductionAlongAllDimsOp(mean, dtype, operands);
return; return;
} else if (auto max = dyn_cast<AtenMaxOp>(op)) { } else if (isa<AtenMaxOp, AtenAmaxOp>(op)) {
Type dtype = operands[0]->getValue().dtype; Type dtype = operands[0]->getValue().dtype;
visitReductionAlongAllDimsOp(max, dtype, operands); visitReductionAlongAllDimsOp(op, dtype, operands);
return; return;
} else if (isa<AtenStdOp, AtenStdDimOp, AtenVarOp, AtenVarDimOp, } else if (isa<AtenStdOp, AtenStdDimOp, AtenVarOp, AtenVarDimOp,
AtenVarCorrectionOp>(op)) { AtenVarCorrectionOp>(op)) {

View File

@ -5842,6 +5842,13 @@ StringRef mlir::torch::Torch::getShapeLibrary() {
" %1 = torch.prim.TupleConstruct %0, %0 : !torch.list<int>, !torch.list<int> -> !torch.tuple<list<int>, list<int>>\n" " %1 = torch.prim.TupleConstruct %0, %0 : !torch.list<int>, !torch.list<int> -> !torch.tuple<list<int>, list<int>>\n"
" return %1 : !torch.tuple<list<int>, list<int>>\n" " return %1 : !torch.tuple<list<int>, list<int>>\n"
" }\n" " }\n"
" func.func @\"__torch_mlir_shape_fn.aten.amax\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.bool) -> !torch.list<int> {\n"
" %none = torch.constant.none\n"
" %0 = torch.derefine %arg1 : !torch.list<int> to !torch.optional<list<int>>\n"
" %1 = torch.derefine %none : !torch.none to !torch.any\n"
" %2 = call @__torch__.torch.jit._shape_functions.sum_mean_dim(%arg0, %0, %arg2, %1) : (!torch.list<int>, !torch.optional<list<int>>, !torch.bool, !torch.any) -> !torch.list<int>\n"
" return %2 : !torch.list<int>\n"
" }\n"
" func.func @\"__torch_mlir_shape_fn.aten.mean.dim\"(%arg0: !torch.list<int>, %arg1: !torch.optional<list<int>>, %arg2: !torch.bool, %arg3: !torch.optional<int>) -> !torch.list<int> {\n" " func.func @\"__torch_mlir_shape_fn.aten.mean.dim\"(%arg0: !torch.list<int>, %arg1: !torch.optional<list<int>>, %arg2: !torch.bool, %arg3: !torch.optional<int>) -> !torch.list<int> {\n"
" %0 = torch.derefine %arg3 : !torch.optional<int> to !torch.any\n" " %0 = torch.derefine %arg3 : !torch.optional<int> to !torch.any\n"
" %1 = call @__torch__.torch.jit._shape_functions.sum_mean_dim(%arg0, %arg1, %arg2, %0) : (!torch.list<int>, !torch.optional<list<int>>, !torch.bool, !torch.any) -> !torch.list<int>\n" " %1 = call @__torch__.torch.jit._shape_functions.sum_mean_dim(%arg0, %arg1, %arg2, %0) : (!torch.list<int>, !torch.optional<list<int>>, !torch.bool, !torch.any) -> !torch.list<int>\n"

View File

@ -589,6 +589,9 @@ def atenmaxdim(self: List[int], dim: int, keepdim: bool = False) -> Tuple[
reduced_shape = _reduce_along_dim(self, dim, keepdim) reduced_shape = _reduce_along_dim(self, dim, keepdim)
return reduced_shape, reduced_shape return reduced_shape, reduced_shape
def atenamax(self: List[int], dim: List[int] = (), keepdim: bool = False) -> List[int]:
return upstream_shape_functions.sum_mean_dim(self, dim, keepdim, None)
def atenmeandim(self: List[int], dim: Optional[List[int]], keepdim: bool = False, dtype: Optional[int] = None) -> List[int]: def atenmeandim(self: List[int], dim: Optional[List[int]], keepdim: bool = False, dtype: Optional[int] = None) -> List[int]:
return upstream_shape_functions.sum_mean_dim(self, dim, keepdim, dtype) return upstream_shape_functions.sum_mean_dim(self, dim, keepdim, dtype)

View File

@ -478,6 +478,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit("aten::sum.dim_IntList : (Tensor, int[]?, bool, int?) -> (Tensor)") emit("aten::sum.dim_IntList : (Tensor, int[]?, bool, int?) -> (Tensor)")
emit("aten::max : (Tensor) -> (Tensor)") emit("aten::max : (Tensor) -> (Tensor)")
emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)") emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::amax : (Tensor, int[], bool) -> (Tensor)")
emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True) emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True) emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)") emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)")

View File

@ -462,6 +462,78 @@ def ReduceMaxUnsignedIntModule_basic(module, tu: TestUtils):
# ============================================================================== # ==============================================================================
class ReduceAmaxSingleDim(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
])
def forward(self, a):
return torch.ops.aten.amax(a, 1)
@register_test_case(module_factory=lambda: ReduceAmaxSingleDim())
def ReduceAmaxSingleDim_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4, 5, high=100))
# ==============================================================================
class ReduceAmaxMultiDim(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
])
def forward(self, a):
return torch.ops.aten.amax(a, (0, 2))
@register_test_case(module_factory=lambda: ReduceAmaxMultiDim())
def ReduceAmaxMultiDim_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4, 5, high=100))
# ==============================================================================
class ReduceAmaxOutOfOrderDim(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1, -1, -1], torch.float32, True),
])
def forward(self, a):
return torch.ops.aten.amax(a, (2, 1, 3))
@register_test_case(module_factory=lambda: ReduceAmaxOutOfOrderDim())
def ReduceAmaxOutOfOrderDim_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4, 5, 6, high=100))
# ==============================================================================
class ReduceAmaxKeepDim(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
])
def forward(self, a):
return torch.ops.aten.amax(a, (0, 2), keepdim=True)
@register_test_case(module_factory=lambda: ReduceAmaxKeepDim())
def ReduceAmaxKeepDim_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4, 5, high=100))
# ==============================================================================
class ReduceL1NormModule(torch.nn.Module): class ReduceL1NormModule(torch.nn.Module):
def __init__(self): def __init__(self):
super().__init__() super().__init__()