[torch-mlir] remove trailing whitespace from e2e test files (#2727)

pull/2728/head
Aart Bik 2024-01-04 14:09:12 -08:00 committed by GitHub
parent 4e5e34d215
commit aa7e95f7c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 42 additions and 42 deletions

View File

@ -254,7 +254,7 @@ def ArangeFalsePinMemoryModule_basic(module, tu: TestUtils):
class ArangeStartOutModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
@ -270,7 +270,7 @@ def ArangeStartOutModule_basic(module, tu: TestUtils):
class ArangeStartOutViewModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
@ -286,7 +286,7 @@ def ArangeStartOutViewModule_basic(module, tu: TestUtils):
class ArangeStartOutDtypeModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,

View File

@ -13,10 +13,10 @@ from torch_mlir_e2e_test.annotations import annotate_args, export
# ==============================================================================
class ScalarConstantTupleModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
@ -4490,7 +4490,7 @@ class OneHotModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([None, ([-1], torch.long, True)])
def forward(self, x):

View File

@ -28,7 +28,7 @@ class TorchPrimLoopForLikeModule(torch.nn.Module):
for i in range(x_val):
sum += i
return sum
@register_test_case(module_factory=lambda: TorchPrimLoopForLikeModule())
def TorchPrimLoopForLikeModule_basic(module, tu: TestUtils):
@ -50,7 +50,7 @@ class TorchPrimLoopWhileLikeModule(torch.nn.Module):
while(x_val > sum):
sum += 1
return sum
@register_test_case(module_factory=lambda: TorchPrimLoopWhileLikeModule())
def TorchPrimLoopWhileLikeModule_basic(module, tu: TestUtils):

View File

@ -3184,7 +3184,7 @@ def ElementwiseAtenLogicalOrOpRandomFloatModule_basic(module, tu: TestUtils):
class ElementwiseAtenLogicalOrOpNegativeModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
@ -3203,7 +3203,7 @@ def ElementwiseAtenLogicalOrOpNegativeModule_basic(module, tu: TestUtils):
class ElementwiseAtenLogicalOrOpBrodcastModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
@ -4089,7 +4089,7 @@ def ElementwiseBitwiseAndScalarInt8Module_basic(module, tu: TestUtils):
class GluStaticModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,

View File

@ -779,7 +779,7 @@ class AllBoolFalseModule(torch.nn.Module):
def forward(self):
input = [True, False, True, True, False]
return torch.ops.aten.all(input)
@register_test_case(module_factory=lambda: AllBoolFalseModule())
def AllBoolFalseModule_basic(module, tu: TestUtils):
module.forward()

View File

@ -28,7 +28,7 @@ class MatmulDot(torch.nn.Module):
@register_test_case(module_factory=lambda: MatmulDot())
def Matmul_dot(module, tu: TestUtils):
module.forward(tu.rand(3), tu.rand(3))
# ==============================================================================
class Matmul2D(torch.nn.Module):
@ -48,7 +48,7 @@ class Matmul2D(torch.nn.Module):
@register_test_case(module_factory=lambda: Matmul2D())
def Matmul_2d(module, tu: TestUtils):
module.forward(tu.rand(3, 4), tu.rand(4, 5))
# ==============================================================================
class MatmulVecMat(torch.nn.Module):
@ -68,7 +68,7 @@ class MatmulVecMat(torch.nn.Module):
@register_test_case(module_factory=lambda: MatmulVecMat())
def Matmul_vecmat(module, tu: TestUtils):
module.forward(tu.rand(4), tu.rand(4, 5))
# ==============================================================================
class MatmulMatVec(torch.nn.Module):
@ -88,7 +88,7 @@ class MatmulMatVec(torch.nn.Module):
@register_test_case(module_factory=lambda: MatmulMatVec())
def Matmul_matvec(module, tu: TestUtils):
module.forward(tu.rand(4, 5), tu.rand(5))
# ==============================================================================
class Matmul3D(torch.nn.Module):
@ -108,7 +108,7 @@ class Matmul3D(torch.nn.Module):
@register_test_case(module_factory=lambda: Matmul3D())
def Matmul_3d(module, tu: TestUtils):
module.forward(tu.rand(3, 4, 5), tu.rand(3, 5, 4))
# ==============================================================================
class Matmul4d(torch.nn.Module):
@ -128,7 +128,7 @@ class Matmul4d(torch.nn.Module):
@register_test_case(module_factory=lambda: Matmul4d())
def Matmul_4d(module, tu: TestUtils):
module.forward(tu.rand(4, 5, 6, 7), tu.rand(4, 5, 7, 6))
# ==============================================================================
class Matmul4dStatic(torch.nn.Module):
@ -188,7 +188,7 @@ class MatmulSingleDynamicBatchDim(torch.nn.Module):
@register_test_case(module_factory=lambda: MatmulSingleDynamicBatchDim())
def MatmulSingleDynamicBatchDim_basic(module, tu: TestUtils):
module.forward(tu.rand(4, 5, 6, 7), tu.rand(4, 5, 7, 6))
# ==============================================================================
class MatmulBroadcastBatchDim(torch.nn.Module):
@ -208,7 +208,7 @@ class MatmulBroadcastBatchDim(torch.nn.Module):
@register_test_case(module_factory=lambda: MatmulBroadcastBatchDim())
def MatmulBroadcastBatchDim_basic(module, tu: TestUtils):
module.forward(tu.rand(4, 5, 6, 7), tu.rand(5, 7, 6))
# ==============================================================================
class Mv(torch.nn.Module):

View File

@ -130,7 +130,7 @@ class BatchNorm1DStaticShapeModule(torch.nn.Module):
])
def forward(self, x, weight, bias, running_mean, running_var):
return torch.ops.aten.batch_norm(
x, weight, bias, running_mean, running_var, training=False,
x, weight, bias, running_mean, running_var, training=False,
momentum=0.1, eps=0.00001, cudnn_enabled=False)
@ -156,7 +156,7 @@ class NativeBatchNorm1DModule(torch.nn.Module):
])
def forward(self, x, weight, bias, running_mean, running_var):
return torch.ops.aten.native_batch_norm(
x, weight, bias, running_mean, running_var, training=False,
x, weight, bias, running_mean, running_var, training=False,
momentum=0.1, eps=0.00001)
@ -182,7 +182,7 @@ class NativeBatchNorm2DModule(torch.nn.Module):
])
def forward(self, x, weight, bias, running_mean, running_var):
return torch.ops.aten.native_batch_norm(
x, weight, bias, running_mean, running_var, training=False,
x, weight, bias, running_mean, running_var, training=False,
momentum=0.1, eps=0.00001)
@ -208,7 +208,7 @@ class NativeBatchNorm3DModule(torch.nn.Module):
])
def forward(self, x, weight, bias, running_mean, running_var):
return torch.ops.aten.native_batch_norm(
x, weight, bias, running_mean, running_var, training=False,
x, weight, bias, running_mean, running_var, training=False,
momentum=0.1, eps=0.00001)
@ -233,7 +233,7 @@ class NativeBatchNormNoneWeightModule(torch.nn.Module):
])
def forward(self, x, bias, running_mean, running_var):
return torch.ops.aten.native_batch_norm(
x, None, bias, running_mean, running_var, training=False,
x, None, bias, running_mean, running_var, training=False,
momentum=0.1, eps=0.00001)

View File

@ -826,7 +826,7 @@ class ArgminKeepDimsModule(torch.nn.Module):
@export
@annotate_args([
None,
None,
([-1, -1], torch.float32, True),
])
def forward(self, a):
@ -908,7 +908,7 @@ class ArgmaxKeepDimsModule(torch.nn.Module):
@export
@annotate_args([
None,
None,
([-1, -1], torch.float32, True),
])
def forward(self, a):
@ -1068,8 +1068,8 @@ def NormScalarOptDimKeepDimModule_basic(module, tu: TestUtils):
class ReduceFrobeniusNormModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@export
@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
@ -1086,8 +1086,8 @@ def ReduceFrobeniusNormModule_basic(module, tu: TestUtils):
class ReduceFrobeniusNormKeepDimModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@export
@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
@ -1104,8 +1104,8 @@ def ReduceFrobeniusNormKeepDimModule_basic(module, tu: TestUtils):
class LinalgVectorNormModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@export
@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
@ -1122,8 +1122,8 @@ def LinalgVectorNormModule_basic(module, tu: TestUtils):
class LinalgVectorNormKeepDimModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@export
@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),

View File

@ -708,8 +708,8 @@ def UnsafeView1DFoldModule_basic(module, tu: TestUtils):
class ReshapeAsModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@export
@export
@annotate_args([
None,
([4, 3], torch.float32, True),

View File

@ -456,7 +456,7 @@ class NarrowHorizontalTest(torch.nn.Module):
])
def forward(self, x):
return torch.ops.aten.narrow(x, dim=0, start=0, length=2)
@register_test_case(module_factory=lambda: NarrowHorizontalTest())
def NarrowHorizontalTest_basic(module, tu: TestUtils):
@ -495,7 +495,7 @@ class NarrowHorizontalTest2(torch.nn.Module):
])
def forward(self, x):
return torch.ops.aten.narrow(x, dim=0, start=0, length=2)
@register_test_case(module_factory=lambda: NarrowHorizontalTest2())
def NarrowHorizontalTest2_basic(module, tu: TestUtils):
@ -738,7 +738,7 @@ def SplitTensorGetItem_Module_basic(module, tu: TestUtils):
class SplitTensorListUnpackModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,

View File

@ -96,7 +96,7 @@ class SqueezeDimStaticModule(torch.nn.Module):
module_factory=lambda: SqueezeDimStaticModule())
def SqueezeDimModule_static(module, tu: TestUtils):
module.forward(tu.rand(1, 7))
# ==============================================================================

View File

@ -275,7 +275,7 @@ class TypeAsDifferentModule(torch.nn.Module):
@register_test_case(module_factory=lambda: TypeAsDifferentModule())
def TypeAsDifferentModule_basic(module, tu: TestUtils):
module.forward(
tu.randint(3, 5, low=0, high=10, dtype=torch.int),
tu.randint(3, 5, low=0, high=10, dtype=torch.int),
tu.randint(3, 5, low=0, high=10, dtype=torch.int64)
)