[torch-mlir][NFC] remove trailing whitespace (#2936)

pull/2908/head
Aart Bik 2024-02-20 11:23:14 -08:00 committed by GitHub
parent 13113df33e
commit 534b266f2d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 38 additions and 38 deletions

View File

@ -64,7 +64,7 @@ def run_pipeline_with_repro_report(module,
{sys.stderr.getvalue()}
python exception: {e}
For Torch-MLIR developers, the error can be reproduced with:
$ torch-mlir-opt -pass-pipeline='{pipeline}' {filename}
Add '{debug_options}' to get the IR dump for debugging purpose.

View File

@ -19,12 +19,12 @@ warnings.filterwarnings("ignore", module="torch.jit._check")
def _get_decomposition_table():
"""Get a decomposition table suitable for Torch-MLIR.
Sometimes TorchDynamo traces slightly different ops than what TorchScript
captures. Historically we have been driven by the ops captured by
TorchScript, so we try to decompose the ops captured by TorchDynamo into
other ops that we already support.
There isn't a highly principled solution here. Torch-MLIR currently supports
a somewhat random set of ops, added in a demand-driven way over time,
including direct backend support and decompositions internal to Torch-MLIR.

View File

@ -81,7 +81,7 @@ def atendiagonal〡shape(self: List[int], offset: int = 0, dim1: int = 0, dim
pass
else:
diagonal.append(self_dim)
diag_size = max(min(self[dim1], self[dim2] - offset), 0)
if offset<0:
diag_size = max(min(self[dim1] + offset, self[dim2]), 0)
@ -295,10 +295,10 @@ def primscollapse〡shape(a: List[int], start: int, end: int) -> List[int]:
assert end >= 0, "end out of bounds"
assert start <= end, "start must be less than or equal to end"
# Examples:
# Examples:
#
# torch._prims.collapse(torch.empty(2,3,4), 1,2).shape
# is
# is
# torch.Size([2, 12])
#
# torch._prims.collapse(torch.empty(2,3,4), 1,3).shape
@ -592,7 +592,7 @@ def atenpixel_shuffle〡shape(self: List[int], upscale_factor: int) -> List[i
assert len(self) >= 3, "input must be at least rank-3 in pixel_shuffle"
upscale_factor_squared = upscale_factor * upscale_factor
assert self[-3] % (upscale_factor_squared) == 0, "number of input channels must be divisible by upscale_factor^2 in pixel_shuffle"
out = self[0:-3]
out.append(self[-3] // upscale_factor_squared)
out.append(self[-2] * upscale_factor)
@ -756,7 +756,7 @@ def _max_pool3d(
assert (
len(stride) == 0 or len(stride) == 1 or len(stride) == 3
), "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints"
if len(stride) == 0:
(dD, dH, dW) = (kD, kD, kD)
elif len(stride) == 1:
@ -808,14 +808,14 @@ def _max_pool3d(
return [nInputPlane, outputDepth, outputHeight, outputWidth]
else:
return [nbatch, nInputPlane, outputDepth, outputHeight, outputWidth]
def atenmax_pool2d〡shape(self: List[int], kernel_size: List[int], stride: List[int] = (), padding: List[int] = (0, 0,), dilation: List[int] = (1, 1,), ceil_mode: bool = False) -> List[int]:
return upstream_shape_functions.max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode)
@check_shape_function([
Invocation(TensorOfShape(3, 6, 10, 10, 10), [2]), # Basic using defaults
Invocation(TensorOfShape(3, 6, 10, 10, 10), [4], [2], [2], [2]), # Using single values for each parameter
Invocation(TensorOfShape(3, 6, 64, 64, 64), [4, 6, 8], [2, 4, 2], [1, 2, 4], [1, 2, 4]), # Using dimensions should be
Invocation(TensorOfShape(3, 6, 64, 64, 64), [4, 6, 8], [2, 4, 2], [1, 2, 4], [1, 2, 4]), # Using dimensions should be
ErrorInvocation(TensorOfShape(3, 6, 2, 2, 2), [4]), # Input is too small
ErrorInvocation(TensorOfShape(3, 6, 10, 10, 10), [4], [2], [4], [2]), # The following relationship between kernel and padding needs to apply: Kernel size >= 2 * padding size
])
@ -1374,15 +1374,15 @@ def atenconv_tbc〡shape(self: List[int], weight: List[int], bias: List[int],
assert channels == channels_w
# the out_channels in weights and biases should also match, but this assert doesn't work because typing problems
# assert out_channels == out_channels_b
# assert out_channels == out_channels_b
self_bct = [batch, channels, time]
weight_bct = [out_channels, channels, kernel_width]
bias_bct = bias
# use existing shape inf
# use existing shape inf
output_size_bct = upstream_shape_functions.conv_forwards(self, weight, bias, stride=[1], padding=[pad], dilation=[], transposed=False, output_padding=[], groups=1)
batch_out, channels_out, time_out = output_size_bct
# bct -> tbc
@ -1544,7 +1544,7 @@ def atenreplication_pad2d〡shape(self: List[int], padding: List[int]) -> Lis
return pad_shape_fn(self, padding)
def atenreplication_pad2d〡dtype(self_rank_dtype: Tuple[int, int], padding: List[int]) -> int:
self_rank, self_dtype = self_rank_dtype
self_rank, self_dtype = self_rank_dtype
return self_dtype
def atenpad〡shape(self: List[int], pad: List[int], mode: str = "constant", value: Optional[float] = None) -> List[int]:
@ -3618,7 +3618,7 @@ def atenwhereScalarSelf〡dtype(condition_rank_dtype: Tuple[int, int], sel
@check_dtype_function(
_check_tensors_with_the_same_dtype(num_of_tensors=1))
def atennan_to_num〡dtype(self_rank_dtype: Tuple[int, int], nan: Optional[float] = None, posinf: Optional[float] = None, neginf: Optional[float] = None) -> int:
self_rank, self_dtype = self_rank_dtype
self_rank, self_dtype = self_rank_dtype
return self_dtype
@check_dtype_function(
@ -4258,7 +4258,7 @@ def atencat〡dtype(tensors_rank_dtype: List[Tuple[int, int]], dim: int = 0)
return promote_dtypes(ranks, dtypes)
@check_dtype_function(
[Invocation("i,j->ij", [TensorOfShape(1, dtype=torch.float32),
[Invocation("i,j->ij", [TensorOfShape(1, dtype=torch.float32),
TensorOfShape(1, dtype=torch.int32)]),])
def ateneinsum〡dtype(equation: str, tensors_rank_dtype: List[Tuple[int, int]], path: Optional[List[int]] = None) -> int:
ranks: List[Optional[int]] = []

View File

@ -324,7 +324,7 @@ def compile_and_run_test(test: Test, config: TestConfig, verbose=False) -> Any:
def run_tests(tests: List[Test], config: TestConfig, sequential=False, verbose=False) -> List[TestResult]:
"""Invoke the given `Test`'s with the provided `TestConfig`."""
"""Invoke the given `Test`'s with the provided `TestConfig`."""
num_processes = min(int(mp.cpu_count() * 0.8) + 1, len(tests))
try:
env_concurrency = int(os.getenv("TORCH_MLIR_TEST_CONCURRENCY", "0"))

View File

@ -49,7 +49,7 @@ class LinalgOnTensorsOnnxBackend(OnnxBackend):
imported_module,
f"builtin.module(func.func({ONNX_TO_TORCH_FUNC_PIPELINE}))",
"Lowering Onnx backend contract to Linalg-on-Tensors backend contract")
run_pipeline_with_repro_report(
imported_module,
f"builtin.module(torch-lower-to-backend-contract)",

View File

@ -853,7 +853,7 @@ class ConvTbcModule(torch.nn.Module):
])
def forward(self, x, weight, bias):
return torch.conv_tbc(x, weight, bias)
@register_test_case(module_factory=lambda: ConvTbcModule())
def ConvTbcModule_basic(module, tu: TestUtils):
module.forward(tu.rand(9, 4, 5), tu.rand(3, 5, 6), tu.rand(6))

View File

@ -94,7 +94,7 @@ class DiagonalWithOffsetModule(torch.nn.Module):
@export
@annotate_args([
None,
None,
([-1, -1], torch.float32, True),
])
def forward(self, a):

View File

@ -3833,7 +3833,7 @@ class ElementwiseAtenIsposinfOpModule(torch.nn.Module):
([2, 5], torch.float32, True),
])
def forward(self, x):
return torch.ops.aten.isposinf(x)
return torch.ops.aten.isposinf(x)
@register_test_case(module_factory=lambda: ElementwiseAtenIsposinfOpModule())
def ElementwiseAtenIsposinfOpModule_basic(module, tu:TestUtils):

View File

@ -945,7 +945,7 @@ def AvgPool1dStaticModule_basic(module, tu: TestUtils):
# ==============================================================================
class AdaptiveAvgPool1dStaticLargerOutput(torch.nn.Module):
def __init__(self):
super().__init__()
self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=13)
@ -965,7 +965,7 @@ def AdaptiveAvgPool1dStaticLargerOutput_basic(
module.forward(tu.rand(5, 512, 7))
class AdaptiveAvgPool1dStaticEvenMultiple(torch.nn.Module):
def __init__(self):
super().__init__()
self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7)
@ -985,7 +985,7 @@ def AdaptiveAvgPool1dStaticEvenMultiple_basic(
module.forward(tu.rand(5, 512, 147))
class AdaptiveAvgPool1dGeneralDynamic(torch.nn.Module):
def __init__(self):
super().__init__()
self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7)
@ -1085,7 +1085,7 @@ def AdaptiveAvgPool1dUnitOutputSizeDynamicModule_basic(
module.forward(tu.rand(1, 512, 7))
class AdaptiveMaxPool2dDynamic(torch.nn.Module):
def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=False)
@ -1105,7 +1105,7 @@ def AdaptiveMaxPool2dDynamic_basic(
module.forward(tu.rand(1, 512, 10, 16))
class AdaptiveMaxPool2dDynamicWithIndices(torch.nn.Module):
def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=True)
@ -1123,10 +1123,10 @@ class AdaptiveMaxPool2dDynamicWithIndices(torch.nn.Module):
def AdaptiveMaxPool2dDynamicWithIndices_basic(
module, tu: TestUtils):
module.forward(tu.rand(1, 512, 10, 16))
class AdaptiveMaxPool2dStatic(torch.nn.Module):
def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=False)
@ -1146,7 +1146,7 @@ def AdaptiveMaxPool2dStatic_basic(
module.forward(tu.rand(1, 512, 10, 9))
class AdaptiveMaxPool2dStaticWithIndices(torch.nn.Module):
def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=True)

View File

@ -327,13 +327,13 @@ class ReduceAllDimEmpty(torch.nn.Module):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=0, keepdim=False)
@register_test_case(module_factory=lambda: ReduceAllDimEmpty())
def ReduceAllDimEmpty_basic(module, tu: TestUtils):
module.forward(torch.tensor([]))
# ==============================================================================
class ReduceAllDimFloat(torch.nn.Module):
def __init__(self):
super().__init__()
@ -345,13 +345,13 @@ class ReduceAllDimFloat(torch.nn.Module):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=1, keepdim=True)
@register_test_case(module_factory=lambda: ReduceAllDimFloat())
def ReduceAllDimFloat_basic(module, tu: TestUtils):
module.forward(torch.tensor([[5.0,1e-6,-5.0],[0,5.0,0]]))
# ==============================================================================
class ReduceAllDimInt(torch.nn.Module):
def __init__(self):
super().__init__()
@ -363,13 +363,13 @@ class ReduceAllDimInt(torch.nn.Module):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=1, keepdim=True)
@register_test_case(module_factory=lambda: ReduceAllDimInt())
def ReduceAllDimInt_basic(module, tu: TestUtils):
module.forward(torch.tensor([[5,-5,0],[5,1e10,5]]).to(torch.int32))
# ==============================================================================
class ReduceAllDimBool(torch.nn.Module):
def __init__(self):
super().__init__()
@ -381,13 +381,13 @@ class ReduceAllDimBool(torch.nn.Module):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=1, keepdim=False)
@register_test_case(module_factory=lambda: ReduceAllDimBool())
def ReduceAllDimBool_basic(module, tu: TestUtils):
module.forward(torch.tensor([[True, False, True], [True, True, True]]))
# ==============================================================================
class ReduceMaxAlongDim(torch.nn.Module):
def __init__(self):
super().__init__()