[E2E] add nan case in elementwise comparison e2e tests (#2575)

pull/2585/head snapshot-20231120.1028
Yuanqiang Liu 2023-11-20 11:27:08 +08:00 committed by GitHub
parent 5eae0adff1
commit 7b94189e07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 35 additions and 7 deletions

View File

@ -57,7 +57,7 @@ static Value createComparisonTemplate(OpBuilder &b, Location loc, Type type,
static Value createGreaterThan(OpBuilder &b, Location loc, Type elementalType,
Value lhs, Value rhs) {
return createComparisonTemplate<arith::CmpFPredicate::UGT,
return createComparisonTemplate<arith::CmpFPredicate::OGT,
arith::CmpIPredicate::ugt,
arith::CmpIPredicate::sgt>(
b, loc, elementalType, lhs, rhs);
@ -66,7 +66,7 @@ static Value createGreaterThan(OpBuilder &b, Location loc, Type elementalType,
static Value createGreaterThanOrEqual(OpBuilder &b, Location loc,
Type elementalType, Value lhs,
Value rhs) {
return createComparisonTemplate<arith::CmpFPredicate::UGE,
return createComparisonTemplate<arith::CmpFPredicate::OGE,
arith::CmpIPredicate::uge,
arith::CmpIPredicate::sge>(
b, loc, elementalType, lhs, rhs);
@ -74,7 +74,7 @@ static Value createGreaterThanOrEqual(OpBuilder &b, Location loc,
static Value createLessThan(OpBuilder &b, Location loc, Type elementalType,
Value lhs, Value rhs) {
return createComparisonTemplate<arith::CmpFPredicate::ULT,
return createComparisonTemplate<arith::CmpFPredicate::OLT,
arith::CmpIPredicate::ult,
arith::CmpIPredicate::slt>(
b, loc, elementalType, lhs, rhs);
@ -82,7 +82,7 @@ static Value createLessThan(OpBuilder &b, Location loc, Type elementalType,
static Value createLessThanOrEqual(OpBuilder &b, Location loc,
Type elementalType, Value lhs, Value rhs) {
return createComparisonTemplate<arith::CmpFPredicate::ULE,
return createComparisonTemplate<arith::CmpFPredicate::OLE,
arith::CmpIPredicate::ule,
arith::CmpIPredicate::sle>(
b, loc, elementalType, lhs, rhs);

View File

@ -160,7 +160,9 @@ class ElementwiseGeFloatTensorModule(torch.nn.Module):
@register_test_case(module_factory=lambda: ElementwiseGeFloatTensorModule())
def ElementwiseGeFloatTensorModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5), tu.rand(5))
module.forward(
torch.tensor([[1.0, 2.2, torch.nan], [6.0, 2.0, 3.1]]).to(torch.float32),
torch.tensor([6.0, 2.1, torch.nan]).to(torch.float32))
# ==============================================================================
@ -200,7 +202,9 @@ class ElementwiseGtFloatTensorModule(torch.nn.Module):
@register_test_case(module_factory=lambda: ElementwiseGtFloatTensorModule())
def ElementwiseGtFloatTensorModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5), tu.rand(5))
module.forward(
torch.tensor([[1.0, 2.2, torch.nan], [6.0, 2.0, 3.1]]).to(torch.float32),
torch.tensor([6.0, 2.1, torch.nan]).to(torch.float32))
# ==============================================================================
@ -378,6 +382,28 @@ def ElementwiseLeFloatTensorModule_basic(module, tu: TestUtils):
# ==============================================================================
class ElementwiseLeFloatTensorNanModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
([-1], torch.float32, True),
])
def forward(self, x, y):
return torch.le(x, y)
@register_test_case(module_factory=lambda: ElementwiseLeFloatTensorNanModule())
def ElementwiseLeFloatTensorNanModule_basic(module, tu: TestUtils):
module.forward(
torch.tensor([[1.0, 2.2, torch.nan], [6.0, 2.0, 3.1]]).to(torch.float32),
torch.tensor([6.0, 2.1, torch.nan]).to(torch.float32))
# ==============================================================================
class ElementwiseLeIntTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
@ -414,7 +440,9 @@ class ElementwiseLtFloatTensorModule(torch.nn.Module):
@register_test_case(module_factory=lambda: ElementwiseLtFloatTensorModule())
def ElementwiseLtFloatTensorModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5), tu.rand(5))
module.forward(
torch.tensor([[1.0, 2.2, torch.nan], [6.0, 2.0, 3.1]]).to(torch.float32),
torch.tensor([6.0, 2.1, torch.nan]).to(torch.float32))
# ==============================================================================