[MLIR][TORCH] Add E2E support for aten.Bool.[float|int] op

This commit adds lowering of `aten.Bool.float` and `aten.Bool.int` op.

Signed-Off By: Vivek Khandelwal <vivek@nod-labs.com>
pull/871/head snapshot-20220524.471
Vivek Khandelwal 2022-05-20 13:56:52 +05:30
parent 014a6d16c7
commit 56e77d4213
7 changed files with 276 additions and 0 deletions

View File

@ -7189,6 +7189,54 @@ def Torch_AtenSqrtIntOp : Torch_Op<"aten.sqrt.int", [
let hasFolder = 1; let hasFolder = 1;
} }
def Torch_AtenBoolFloatOp : Torch_Op<"aten.Bool.float", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::Bool.float : (float) -> (bool)`";
let arguments = (ins
Torch_FloatType:$a
);
let results = (outs
Torch_BoolType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenBoolFloatOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 1);
}
void AtenBoolFloatOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 1);
}
}];
let hasFolder = 1;
}
def Torch_AtenBoolIntOp : Torch_Op<"aten.Bool.int", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::Bool.int : (int) -> (bool)`";
let arguments = (ins
Torch_IntType:$a
);
let results = (outs
Torch_BoolType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenBoolIntOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 1);
}
void AtenBoolIntOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 1);
}
}];
let hasFolder = 1;
}
def Torch_AtenEqDeviceOp : Torch_Op<"aten.eq.device", [ def Torch_AtenEqDeviceOp : Torch_Op<"aten.eq.device", [
AllowsTypeRefinement, AllowsTypeRefinement,
HasValueSemantics, HasValueSemantics,

View File

@ -225,6 +225,33 @@ public:
}; };
} // namespace } // namespace
namespace {
template <typename OpTy, typename CmpOpTy, typename CmpOpPred, CmpOpPred Pred>
class ConvertAtenBoolLikeOp : public OpConversionPattern<OpTy> {
public:
using OpConversionPattern<OpTy>::OpConversionPattern;
using OpAdaptor = typename OpTy::Adaptor;
LogicalResult
matchAndRewrite(OpTy op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Type inputType = adaptor.a().getType();
Value cstZero = rewriter.create<arith::ConstantOp>(
loc, rewriter.getZeroAttr(inputType));
Value cstTrue =
rewriter.create<arith::ConstantOp>(loc, rewriter.getBoolAttr(true));
Value cstFalse =
rewriter.create<arith::ConstantOp>(loc, rewriter.getBoolAttr(false));
Value cmpPred;
cmpPred = rewriter.create<CmpOpTy>(loc, Pred, adaptor.a(), cstZero);
rewriter.replaceOpWithNewOp<arith::SelectOp>(op, cmpPred, cstTrue,
cstFalse);
return success();
}
};
} // namespace
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// The pass // The pass
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
@ -315,6 +342,15 @@ public:
typeConverter, context); typeConverter, context);
target.addIllegalOp<AtenAnyBoolOp>(); target.addIllegalOp<AtenAnyBoolOp>();
patterns.add<ConvertAtenAnyBoolOp>(typeConverter, context); patterns.add<ConvertAtenAnyBoolOp>(typeConverter, context);
target.addIllegalOp<AtenBoolFloatOp, AtenBoolIntOp>();
patterns.add<
ConvertAtenBoolLikeOp<AtenBoolFloatOp, arith::CmpFOp,
arith::CmpFPredicate, arith::CmpFPredicate::UNE>>(
typeConverter, context);
patterns.add<
ConvertAtenBoolLikeOp<AtenBoolIntOp, arith::CmpIOp,
arith::CmpIPredicate, arith::CmpIPredicate::ne>>(
typeConverter, context);
if (failed(applyPartialConversion(getOperation(), target, if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns)))) std::move(patterns))))

View File

@ -1002,6 +1002,28 @@ OpFoldResult AtenGeIntOp::fold(ArrayRef<Attribute> operands) {
[](int64_t a, int64_t b) { return a >= b; }); [](int64_t a, int64_t b) { return a >= b; });
} }
//===----------------------------------------------------------------------===//
// AtenBoolFloatOp
//===----------------------------------------------------------------------===//
OpFoldResult AtenBoolFloatOp::fold(ArrayRef<Attribute> operands) {
double c;
if (matchPattern(getOperand(), m_TorchConstantFloat(&c)))
return getI1IntegerAttr(getContext(), c != 0.0);
return nullptr;
}
//===----------------------------------------------------------------------===//
// AtenBoolIntOp
//===----------------------------------------------------------------------===//
OpFoldResult AtenBoolIntOp::fold(ArrayRef<Attribute> operands) {
int64_t c;
if (matchPattern(getOperand(), m_TorchConstantInt(&c)))
return getI1IntegerAttr(getContext(), c != 0);
return nullptr;
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// AtenFloatScalarOp // AtenFloatScalarOp
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -523,6 +523,8 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit("aten::div : (Scalar, Scalar) -> (float)") emit("aten::div : (Scalar, Scalar) -> (float)")
emit("aten::add : (Scalar, Scalar) -> (Scalar)") emit("aten::add : (Scalar, Scalar) -> (Scalar)")
emit("aten::sqrt.int : (int) -> (float)", has_folder=True) emit("aten::sqrt.int : (int) -> (float)", has_folder=True)
emit("aten::Bool.float : (float) -> (bool)", has_folder=True)
emit("aten::Bool.int : (int) -> (bool)", has_folder=True)
emit("aten::eq.device : (Device, Device) -> (bool)") emit("aten::eq.device : (Device, Device) -> (bool)")
emit("aten::ceil.float : (float) -> (int)", has_folder=True) emit("aten::ceil.float : (float) -> (int)", has_folder=True)

View File

@ -191,3 +191,123 @@ class SqrtIntConstantModule(torch.nn.Module):
@register_test_case(module_factory=lambda: SqrtIntConstantModule()) @register_test_case(module_factory=lambda: SqrtIntConstantModule())
def SqrtIntConstantModule_basic(module, tu: TestUtils): def SqrtIntConstantModule_basic(module, tu: TestUtils):
module.forward() module.forward()
# ==============================================================================
class BoolFloatFalseModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([], torch.float64, True),
])
def forward(self, a):
sub = float(a) - float(a)
return bool(torch.ops.aten.Bool(float(sub)))
@register_test_case(module_factory=lambda: BoolFloatFalseModule())
def BoolFloatFalseModule_basic(module, tu: TestUtils):
module.forward(tu.rand(low=0.5).double())
class BoolFloatTrueModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([], torch.float64, True),
])
def forward(self, a):
return bool(torch.ops.aten.Bool(float(a)))
@register_test_case(module_factory=lambda: BoolFloatTrueModule())
def BoolFloatTrueModule_basic(module, tu: TestUtils):
module.forward(tu.rand(low=0.5).double())
class BoolFloatConstantModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
])
def forward(self):
return bool(torch.ops.aten.Bool(5.0))
@register_test_case(module_factory=lambda: BoolFloatConstantModule())
def BoolFloatConstantModule_basic(module, tu: TestUtils):
module.forward()
# ==============================================================================
class BoolIntFalseModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([], torch.int64, True),
])
def forward(self, a):
sub = int(a) - int(a)
return bool(torch.ops.aten.Bool(int(sub)))
@register_test_case(module_factory=lambda: BoolIntFalseModule())
def BoolIntFalseModule_basic(module, tu: TestUtils):
module.forward(torch.randint(1, 100, ()))
class BoolIntTrueModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([], torch.int64, True),
])
def forward(self, a):
return bool(torch.ops.aten.Bool(int(a)))
@register_test_case(module_factory=lambda: BoolIntTrueModule())
def BoolIntTrueModule_basic(module, tu: TestUtils):
module.forward(torch.randint(1, 100, ()))
class BoolIntConstantModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
])
def forward(self):
return bool(torch.ops.aten.Bool(5))
@register_test_case(module_factory=lambda: BoolIntConstantModule())
def BoolIntConstantModule_basic(module, tu: TestUtils):
module.forward()

View File

@ -262,3 +262,33 @@ func.func @torch.aten.any.bool() -> !torch.bool {
%0 = torch.aten.any.bool %input : !torch.list<bool> -> !torch.bool %0 = torch.aten.any.bool %input : !torch.list<bool> -> !torch.bool
return %0 : !torch.bool return %0 : !torch.bool
} }
// CHECK-LABEL: func.func @torch.aten.Bool.float(
// CHECK-SAME: %[[ARG:.*]]: !torch.float) -> !torch.bool {
// CHECK: %[[ARG_F64:.*]] = torch_c.to_f64 %[[ARG]]
// CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f64
// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[FALSE:.*]] = arith.constant false
// CHECK: %[[CMP:.*]] = arith.cmpf une, %[[ARG_F64]], %[[CST]] : f64
// CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[TRUE]], %[[FALSE]] : i1
// CHECK: %[[OUT:.*]] = torch_c.from_i1 %[[SELECT]]
// CHECK: return %[[OUT]] : !torch.bool
func.func @torch.aten.Bool.float(%arg0: !torch.float) -> !torch.bool {
%0 = torch.aten.Bool.float %arg0 : !torch.float -> !torch.bool
return %0 : !torch.bool
}
// CHECK-LABEL: func.func @torch.aten.Bool.int(
// CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.bool {
// CHECK: %[[ARG_I64:.*]] = torch_c.to_i64 %[[ARG]]
// CHECK: %[[CST:.*]] = arith.constant 0 : i64
// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[FALSE:.*]] = arith.constant false
// CHECK: %[[CMP:.*]] = arith.cmpi ne, %[[ARG_I64]], %[[CST]] : i64
// CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[TRUE]], %[[FALSE]] : i1
// CHECK: %[[OUT:.*]] = torch_c.from_i1 %[[SELECT]]
// CHECK: return %[[OUT]] : !torch.bool
func.func @torch.aten.Bool.int(%arg0: !torch.int) -> !torch.bool {
%0 = torch.aten.Bool.int %arg0 : !torch.int -> !torch.bool
return %0 : !torch.bool
}

View File

@ -1249,3 +1249,21 @@ func.func @torch.aten.sqrt.int$no_fold(%arg0 : !torch.int) -> !torch.float {
%0 = torch.aten.sqrt.int %arg0 : !torch.int -> !torch.float %0 = torch.aten.sqrt.int %arg0 : !torch.int -> !torch.float
return %0 : !torch.float return %0 : !torch.float
} }
// CHECK-LABEL: func.func @torch.aten.Bool.float$fold_cst() -> !torch.bool {
// CHECK: %[[CST2:.*]] = torch.constant.bool true
// CHECK: return %[[CST2]] : !torch.bool
func.func @torch.aten.Bool.float$fold_cst() -> !torch.bool {
%float = torch.constant.float 1.5
%1 = torch.aten.Bool.float %float : !torch.float -> !torch.bool
return %1 : !torch.bool
}
// CHECK-LABEL: func.func @torch.aten.Bool.int$fold_cst() -> !torch.bool {
// CHECK: %[[CST2:.*]] = torch.constant.bool true
// CHECK: return %[[CST2]] : !torch.bool
func.func @torch.aten.Bool.int$fold_cst() -> !torch.bool {
%int = torch.constant.int 2
%1 = torch.aten.Bool.int %int : !torch.int -> !torch.bool
return %1 : !torch.bool
}