mirror of https://github.com/llvm/torch-mlir
[Torch Dialect] add support for AtenIsnanOp (#2170)
* add support for mhlo * Add Test for torch.ne * fix torch.ne shape/add static test case * add support for static torch.ne --------- Co-authored-by: root <root@n31-177-039.byted.org>pull/2194/head
parent
2480cb7a51
commit
e4f8fb1b8c
|
@ -378,6 +378,7 @@ STABLEHLO_PASS_SET = {
|
|||
"CumsumStaticModule_basic",
|
||||
"CumsumStaticNegativeDimModule_basic",
|
||||
"DetachModule_basic",
|
||||
"ElementwiseIsnanModule_basic",
|
||||
"ElementwiseAtenLogicalAndOpPromoteBroadcastStaticShapeModule_basic",
|
||||
"ElementwiseAtenLogicalNotOpModule_basic",
|
||||
"ElementwiseAtenLogicalNotOpPromoteModule_basic",
|
||||
|
@ -423,6 +424,9 @@ STABLEHLO_PASS_SET = {
|
|||
"ElementwiseEqDiffWidthScalarModule_basic",
|
||||
"ElementwiseEqFloatScalarModule_basic",
|
||||
"ElementwiseEqIntScalarModule_basic",
|
||||
"ElementwiseNeFloatScalarModule_basic",
|
||||
"ElementwiseNeFloatTensorStaticModule_basic",
|
||||
"ElementwiseNeIntTensorStaticModule_basic",
|
||||
"ElementwiseErfModule_basic",
|
||||
"ElementwiseGeluModule_basic",
|
||||
"ElementwiseGtFloatScalarModule_basic",
|
||||
|
@ -443,7 +447,6 @@ STABLEHLO_PASS_SET = {
|
|||
"ElementwiseMulScalarModule_basic",
|
||||
"ElementwiseMulScalarModule_float",
|
||||
"ElementwiseMulScalarModule_int",
|
||||
"ElementwiseNeFloatTensorModule_basic",
|
||||
"ElementwiseNeIntScalarModule_basic",
|
||||
"ElementwiseReciprocalModule_basic",
|
||||
"ElementwiseRelu6Module_basic",
|
||||
|
@ -875,6 +878,11 @@ TOSA_PASS_SET = {
|
|||
"ElementwiseEqDiffWidthScalarModule_basic",
|
||||
"ElementwiseEqFloatTensorModule_basic",
|
||||
"ElementwiseEqIntTensorModule_basic",
|
||||
"ElementwiseNeFloatScalarModule_basic",
|
||||
"ElementwiseNeFloatTensorModule_basic",
|
||||
"ElementwiseNeFloatTensorStaticModule_basic",
|
||||
"ElementwiseNeIntTensorModule_basic",
|
||||
"ElementwiseNeIntTensorStaticModule_basic",
|
||||
"ElementwiseMulScalarModule_int",
|
||||
"ElementwiseMulScalarModule_float",
|
||||
"ElementwiseMulTensorIntModule_basic",
|
||||
|
@ -885,6 +893,7 @@ TOSA_PASS_SET = {
|
|||
"ElementwiseMulScalarModule_float",
|
||||
"ElementwiseCeilModule_basic",
|
||||
"ElementwiseReciprocalModule_basic",
|
||||
"ElementwiseIsnanModule_basic",
|
||||
"TypePromotionAlphaWiderModule_basic",
|
||||
"Conv2dWithPaddingDilationStrideStaticModule_basic",
|
||||
"BatchNorm1DModule_basic",
|
||||
|
@ -955,7 +964,6 @@ TOSA_PASS_SET = {
|
|||
"ElementwiseGeluModule_basic",
|
||||
"GeluBackwardModule_basic",
|
||||
"ElementwiseNeIntScalarModule_basic",
|
||||
"ElementwiseNeFloatTensorModule_basic",
|
||||
"Convolution2DStaticModule_basic",
|
||||
"ElementwiseNegModule_basic",
|
||||
"TestMultipleTensorReturn_basic",
|
||||
|
|
|
@ -6524,6 +6524,29 @@ def Torch_Aten_ShapeAsTensorOp : Torch_Op<"aten._shape_as_tensor", [
|
|||
}];
|
||||
}
|
||||
|
||||
def Torch_AtenIsnanOp : Torch_Op<"aten.isnan", [
|
||||
AllowsTypeRefinement,
|
||||
HasValueSemantics,
|
||||
ReadOnly
|
||||
]> {
|
||||
let summary = "Generated op for `aten::isnan : (Tensor) -> (Tensor)`";
|
||||
let arguments = (ins
|
||||
AnyTorchTensorType:$self
|
||||
);
|
||||
let results = (outs
|
||||
AnyTorchTensorType:$result
|
||||
);
|
||||
let hasCustomAssemblyFormat = 1;
|
||||
let extraClassDefinition = [{
|
||||
ParseResult AtenIsnanOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||
return parseDefaultTorchOp(parser, result, 1, 1);
|
||||
}
|
||||
void AtenIsnanOp::print(OpAsmPrinter &printer) {
|
||||
printDefaultTorchOp(printer, *this, 1, 1);
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
def Torch_AtenAllOp : Torch_Op<"aten.all", [
|
||||
AllowsTypeRefinement,
|
||||
HasValueSemantics,
|
||||
|
|
|
@ -142,7 +142,8 @@ static Value createCompareTensorOp(OpBuilder &b, Location loc, OpTy op,
|
|||
std::is_same<OpTy, AtenLeTensorOp>() ||
|
||||
std::is_same<OpTy, AtenGtTensorOp>() ||
|
||||
std::is_same<OpTy, AtenGeTensorOp>() ||
|
||||
std::is_same<OpTy, AtenEqTensorOp>(),
|
||||
std::is_same<OpTy, AtenEqTensorOp>() ||
|
||||
std::is_same<OpTy, AtenNeTensorOp>(),
|
||||
"unimplemented: op type not supported");
|
||||
|
||||
Type lhsDtype = lhs.getType();
|
||||
|
@ -172,6 +173,9 @@ static Value createCompareTensorOp(OpBuilder &b, Location loc, OpTy op,
|
|||
if constexpr (std::is_same<OpTy, AtenEqTensorOp>()) {
|
||||
return createEqual(b, loc, elementalType, lhs, rhs);
|
||||
}
|
||||
if constexpr (std::is_same<OpTy, AtenNeTensorOp>()) {
|
||||
return createNotEqual(b, loc, elementalType, lhs, rhs);
|
||||
}
|
||||
llvm_unreachable("unimplemented: op type not supported");
|
||||
}
|
||||
|
||||
|
@ -595,6 +599,10 @@ static Value createLinalgPayloadCalculationForElementwiseOp(
|
|||
return createCompareTensorOp(b, loc, eqTensor, payloadArgs[0],
|
||||
payloadArgs[1]);
|
||||
}
|
||||
if (auto neTensor = dyn_cast<AtenNeTensorOp>(op)) {
|
||||
return createCompareTensorOp(b, loc, neTensor, payloadArgs[0],
|
||||
payloadArgs[1]);
|
||||
}
|
||||
if (auto div = dyn_cast<AtenDivTensorOp>(op)) {
|
||||
AtenDivTensorOp::Adaptor adaptor(operands);
|
||||
Type dtype = converter->convertType(div.getType())
|
||||
|
@ -1156,7 +1164,7 @@ public:
|
|||
AtenReciprocalOp, AtenBitwiseAndTensorOp, AtenBitwiseOrTensorOp,
|
||||
AtenBitwiseXorTensorOp, AtenGtScalarOp, AtenGeScalarOp,
|
||||
AtenEqScalarOp, AtenLtScalarOp, AtenLeScalarOp, AtenWhereSelfOp,
|
||||
AtenCeilOp, AtenGtTensorOp, AtenGeTensorOp, AtenEqTensorOp,
|
||||
AtenCeilOp, AtenGtTensorOp, AtenGeTensorOp, AtenEqTensorOp, AtenNeTensorOp,
|
||||
AtenLtTensorOp, AtenLeTensorOp, AtenSubScalarOp, AtenAddScalarOp,
|
||||
AtenThresholdOp, AtenThresholdBackwardOp, AtenHardtanhBackwardOp,
|
||||
AtenCloneOp, AtenSinOp, AtenCosOp, AtenNeScalarOp, AtenNegOp,
|
||||
|
@ -1689,7 +1697,7 @@ void mlir::torch::torch_to_linalg::populateUncategorizedPatternsAndLegality(
|
|||
AtenRsqrtOp, AtenAbsOp, AtenReciprocalOp, AtenBitwiseAndTensorOp,
|
||||
AtenBitwiseOrTensorOp, AtenBitwiseXorTensorOp, AtenGtScalarOp,
|
||||
AtenGeScalarOp, AtenEqScalarOp, AtenLtScalarOp, AtenLeScalarOp,
|
||||
AtenWhereSelfOp, AtenGtTensorOp, AtenGeTensorOp, AtenEqTensorOp,
|
||||
AtenWhereSelfOp, AtenGtTensorOp, AtenGeTensorOp, AtenEqTensorOp, AtenNeTensorOp,
|
||||
AtenLtTensorOp, AtenLeTensorOp, AtenThresholdOp, AtenThresholdBackwardOp,
|
||||
AtenHardtanhBackwardOp, AtenCloneOp, AtenSinOp, AtenCosOp, AtenNeScalarOp,
|
||||
AtenMaskedFillTensorOp, AtenLogicalOrOp, AtenLogicalAndOp, AtenAtanOp,
|
||||
|
|
|
@ -6341,6 +6341,14 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
|
|||
" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list<int>) -> !torch.list<int>\n"
|
||||
" return %0 : !torch.list<int>\n"
|
||||
" }\n"
|
||||
" func.func @\"__torch_mlir_shape_fn.aten.isnan\"(%arg0: !torch.list<int>) -> !torch.list<int> {\n"
|
||||
" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list<int>) -> !torch.list<int>\n"
|
||||
" return %0 : !torch.list<int>\n"
|
||||
" }\n"
|
||||
" func.func @\"__torch_mlir_shape_fn.aten.ne.Tensor\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>) -> !torch.list<int> {\n"
|
||||
" %0 = call @__torch__.torch.jit._shape_functions.broadcast(%arg0, %arg1) : (!torch.list<int>, !torch.list<int>) -> !torch.list<int>\n"
|
||||
" return %0 : !torch.list<int>\n"
|
||||
" }\n"
|
||||
" func.func @\"__torch_mlir_shape_fn.aten.eq.Scalar\"(%arg0: !torch.list<int>, %arg1: !torch.float) -> !torch.list<int> {\n"
|
||||
" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list<int>) -> !torch.list<int>\n"
|
||||
" return %0 : !torch.list<int>\n"
|
||||
|
@ -8627,6 +8635,14 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
|
|||
" %int11 = torch.constant.int 11\n"
|
||||
" return %int11 : !torch.int\n"
|
||||
" }\n"
|
||||
" func.func @\"__torch_mlir_dtype_fn.aten.isnan\"(%arg0: !torch.tuple<int, int>) -> !torch.int {\n"
|
||||
" %int11 = torch.constant.int 11\n"
|
||||
" return %int11 : !torch.int\n"
|
||||
" }\n"
|
||||
" func.func @\"__torch_mlir_dtype_fn.aten.ne.Tensor\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.tuple<int, int>) -> !torch.int {\n"
|
||||
" %int11 = torch.constant.int 11\n"
|
||||
" return %int11 : !torch.int\n"
|
||||
" }\n"
|
||||
" func.func @\"__torch_mlir_dtype_fn.aten.ne.Scalar\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.union<float, int>) -> !torch.int {\n"
|
||||
" %int11 = torch.constant.int 11\n"
|
||||
" return %int11 : !torch.int\n"
|
||||
|
|
|
@ -351,6 +351,20 @@ public:
|
|||
};
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
class DecomposeAtenIsnanOp : public OpRewritePattern<AtenIsnanOp> {
|
||||
public:
|
||||
using OpRewritePattern::OpRewritePattern;
|
||||
LogicalResult matchAndRewrite(AtenIsnanOp op,
|
||||
PatternRewriter &rewriter) const override {
|
||||
Value input = op.getSelf();
|
||||
// Create a new aten.ne operation with the same type and input value.
|
||||
rewriter.replaceOpWithNewOp<AtenNeTensorOp>(op, op.getType(), input, input);
|
||||
return success();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
class DecomposeAtenReshapeOp : public OpRewritePattern<AtenReshapeOp> {
|
||||
public:
|
||||
|
@ -4572,6 +4586,7 @@ public:
|
|||
DecomposeAtenBernoulliLikeOp<AtenBernoulliPOp>>(patterns);
|
||||
addPatternIfTargetOpIsIllegal<DecomposeAtenBernoulliTensorOp>(patterns);
|
||||
addPatternIfTargetOpIsIllegal<DecomposeAtenZeroOp>(patterns);
|
||||
addPatternIfTargetOpIsIllegal<DecomposeAtenIsnanOp>(patterns);
|
||||
addPatternIfTargetOpIsIllegal<DecomposeAtenRandLikeOp>(patterns);
|
||||
addPatternIfTargetOpIsIllegal<DecomposeAtenHardsigmoidOp>(patterns);
|
||||
addPatternIfTargetOpIsIllegal<DecomposeAtenRelu6Op>(patterns);
|
||||
|
|
|
@ -422,6 +422,7 @@ static void markDecomposedOpsAsIllegal(MLIRContext *context,
|
|||
target.addIllegalOp<AtenBernoulliPOp>();
|
||||
target.addIllegalOp<AtenBernoulliTensorOp>();
|
||||
target.addIllegalOp<AtenZeroOp>();
|
||||
target.addIllegalOp<AtenIsnanOp>();
|
||||
target.addIllegalOp<AtenRandLikeOp>();
|
||||
target.addIllegalOp<AtenHardsigmoidOp>();
|
||||
target.addIllegalOp<AtenRelu6Op>();
|
||||
|
|
|
@ -218,6 +218,12 @@ def aten〇lift_fresh_copy〡shape(self: List[int]) -> List[int]:
|
|||
def aten〇_log_softmax_backward_data〡shape(grad_output: List[int], output: List[int], dim: int, input_dtype: int) -> List[int]:
|
||||
return upstream_shape_functions.unary(grad_output)
|
||||
|
||||
def aten〇isnan〡shape(self: List[int]) -> List[int]:
|
||||
return upstream_shape_functions.unary(self)
|
||||
|
||||
def aten〇ne〇Tensor〡shape(self: List[int], other: List[int]) -> List[int]:
|
||||
return upstream_shape_functions.broadcast(self, other)
|
||||
|
||||
def aten〇eq〇Scalar〡shape(self: List[int], other: float) -> List[int]:
|
||||
return upstream_shape_functions.unary(self)
|
||||
|
||||
|
@ -1976,6 +1982,14 @@ def aten〇lt〇Tensor〡dtype(self_rank_dtype: Tuple[int, int], other_rank_dtyp
|
|||
def aten〇le〇Tensor〡dtype(self_rank_dtype: Tuple[int, int], other_rank_dtype: Tuple[int, int]) -> int:
|
||||
return torch.bool
|
||||
|
||||
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1))
|
||||
def aten〇isnan〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
|
||||
return torch.bool
|
||||
|
||||
@check_dtype_function(_check_two_tensor_op())
|
||||
def aten〇ne〇Tensor〡dtype(self_rank_dtype: Tuple[int, int], other_rank_dtype: Tuple[int, int]) -> int:
|
||||
return torch.bool
|
||||
|
||||
@check_dtype_function(
|
||||
_check_tensors_with_the_same_dtype(num_of_tensors=1, other=0.0) +
|
||||
_check_tensors_with_the_same_dtype(num_of_tensors=1, other=0))
|
||||
|
|
|
@ -466,6 +466,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
|||
emit("aten::tensor.int : (int, int?, Device?, bool) -> (Tensor)")
|
||||
emit("aten::scalar_tensor : (Scalar, int?, int?, Device?, bool?) -> (Tensor)")
|
||||
emit("aten::_shape_as_tensor : (Tensor) -> (Tensor)")
|
||||
emit("aten::isnan : (Tensor) -> (Tensor)")
|
||||
emit("aten::all : (Tensor) -> (Tensor)")
|
||||
emit("aten::all.bool : (bool[]) -> (bool)")
|
||||
emit("aten::any : (Tensor) -> (Tensor)")
|
||||
|
|
|
@ -553,7 +553,7 @@ class ElementwiseNeFloatScalarModule(torch.nn.Module):
|
|||
|
||||
|
||||
@register_test_case(module_factory=lambda: ElementwiseNeFloatScalarModule())
|
||||
def ElementwiseNeFloatTensorModule_basic(module, tu: TestUtils):
|
||||
def ElementwiseNeFloatScalarModule_basic(module, tu: TestUtils):
|
||||
module.forward(
|
||||
torch.tensor([[1.0, 2.2, 2.0], [6.0, 2.0, 3.1]]).to(torch.float32))
|
||||
|
||||
|
@ -578,6 +578,90 @@ def ElementwiseNeIntScalarModule_basic(module, tu: TestUtils):
|
|||
|
||||
# ==============================================================================
|
||||
|
||||
class ElementwiseNeFloatTensorModule(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@export
|
||||
@annotate_args([
|
||||
None,
|
||||
([-1, -1], torch.float32, True),
|
||||
([-1, -1], torch.float32, True),
|
||||
])
|
||||
def forward(self, x, y):
|
||||
return torch.ne(x, y)
|
||||
|
||||
|
||||
@register_test_case(module_factory=lambda: ElementwiseNeFloatTensorModule())
|
||||
def ElementwiseNeFloatTensorModule_basic(module, tu: TestUtils):
|
||||
module.forward(
|
||||
torch.tensor([[1.0, 2.2, 6.0], [6.0, 2.0, 3.1]]).to(torch.float32),
|
||||
torch.tensor([[1.0, 2.4, 6.0], [torch.nan, 2.0, 6.0]]).to(torch.float32))
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
class ElementwiseNeIntTensorModule(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@export
|
||||
@annotate_args([
|
||||
None,
|
||||
([-1, -1], torch.int64, True),
|
||||
([-1], torch.int64, True),
|
||||
])
|
||||
def forward(self, x, y):
|
||||
return torch.ne(x, y)
|
||||
|
||||
|
||||
@register_test_case(module_factory=lambda: ElementwiseNeIntTensorModule())
|
||||
def ElementwiseNeIntTensorModule_basic(module, tu: TestUtils):
|
||||
module.forward(tu.randint(8, 5, low=2, high=4), tu.randint(5, low=2, high=4))
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
class ElementwiseNeFloatTensorStaticModule(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@export
|
||||
@annotate_args([
|
||||
None,
|
||||
([2, 3], torch.float32, True),
|
||||
([2, 3], torch.float32, True),
|
||||
])
|
||||
def forward(self, x, y):
|
||||
return torch.ne(x, y)
|
||||
|
||||
|
||||
@register_test_case(module_factory=lambda: ElementwiseNeFloatTensorStaticModule())
|
||||
def ElementwiseNeFloatTensorStaticModule_basic(module, tu: TestUtils):
|
||||
module.forward(
|
||||
torch.tensor([[1.0, 2.2, 6.0], [6.0, 2.0, 3.1]]).to(torch.float32),
|
||||
torch.tensor([[1.0, 2.4, 6.0], [torch.nan, 2.0, 6.0]]).to(torch.float32))
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
class ElementwiseNeIntTensorStaticModule(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@export
|
||||
@annotate_args([
|
||||
None,
|
||||
([8, 5], torch.int64, True),
|
||||
([5], torch.int64, True),
|
||||
])
|
||||
def forward(self, x, y):
|
||||
return torch.ne(x, y)
|
||||
|
||||
|
||||
@register_test_case(module_factory=lambda: ElementwiseNeIntTensorStaticModule())
|
||||
def ElementwiseNeIntTensorStaticModule_basic(module, tu: TestUtils):
|
||||
module.forward(tu.randint(8, 5, low=2, high=4), tu.randint(5, low=2, high=4))
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
class AnyBoolTrueModule(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -651,3 +735,23 @@ class AllBoolFalseModule(torch.nn.Module):
|
|||
@register_test_case(module_factory=lambda: AllBoolFalseModule())
|
||||
def AllBoolFalseModule_basic(module, tu: TestUtils):
|
||||
module.forward()
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
class ElementwiseIsnanModule(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@export
|
||||
@annotate_args([
|
||||
None,
|
||||
([-1, -1, -1], torch.float32, True),
|
||||
])
|
||||
def forward(self, x):
|
||||
return torch.ops.aten.isnan(x)
|
||||
|
||||
|
||||
@register_test_case(module_factory=lambda: ElementwiseIsnanModule())
|
||||
def ElementwiseIsnanModule_basic(module, tu: TestUtils):
|
||||
x = torch.full((1, 1, 32), torch.nan)
|
||||
module.forward(x)
|
||||
|
|
Loading…
Reference in New Issue