[Dynamo] turn on `no_python=True` for dynamo tests (#2040)

pull/2074/head snapshot-20230429.823
Maksim Levental 2023-04-28 18:05:17 -05:00 committed by GitHub
parent 61a8142d23
commit c9fba95642
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 168 additions and 13 deletions

View File

@ -27,7 +27,14 @@ from torch_mlir_e2e_test.linalg_on_tensors_backends.refbackend import RefBackend
from torch_mlir_e2e_test.stablehlo_backends.linalg_on_tensors import LinalgOnTensorsStablehloBackend
from torch_mlir_e2e_test.tosa_backends.linalg_on_tensors import LinalgOnTensorsTosaBackend
from .xfail_sets import LINALG_XFAIL_SET, STABLEHLO_PASS_SET, TOSA_PASS_SET, LTC_XFAIL_SET, TORCHDYNAMO_XFAIL_SET
from .xfail_sets import (
LINALG_XFAIL_SET,
STABLEHLO_PASS_SET,
TOSA_PASS_SET,
LTC_XFAIL_SET,
TORCHDYNAMO_XFAIL_SET,
TORCHDYNAMO_CRASHING_SET
)
# Import tests to register them in the global registry.
from torch_mlir_e2e_test.test_suite import register_all_tests
@ -77,26 +84,33 @@ def main():
if args.config == "linalg":
config = LinalgOnTensorsBackendTestConfig(RefBackendLinalgOnTensorsBackend())
xfail_set = LINALG_XFAIL_SET
if args.config == "tosa":
crashing_set = set()
elif args.config == "tosa":
config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend())
xfail_set = all_test_unique_names - TOSA_PASS_SET
if args.config == "stablehlo":
crashing_set = set()
elif args.config == "stablehlo":
config = StablehloBackendTestConfig(LinalgOnTensorsStablehloBackend())
xfail_set = all_test_unique_names - STABLEHLO_PASS_SET
crashing_set = set()
elif args.config == "native_torch":
config = NativeTorchTestConfig()
xfail_set = {}
xfail_set = set()
crashing_set = set()
elif args.config == "torchscript":
config = TorchScriptTestConfig()
xfail_set = {}
xfail_set = set()
crashing_set = set()
elif args.config == "lazy_tensor_core":
config = LazyTensorCoreTestConfig()
xfail_set = LTC_XFAIL_SET
crashing_set = set()
elif args.config == "torchdynamo":
config = TorchDynamoTestConfig()
xfail_set = TORCHDYNAMO_XFAIL_SET
crashing_set = TORCHDYNAMO_CRASHING_SET
do_not_attempt = set(args.crashing_tests_to_not_attempt_to_run_and_a_bug_is_filed or [])
do_not_attempt = set(args.crashing_tests_to_not_attempt_to_run_and_a_bug_is_filed or []).union(crashing_set)
available_tests = [test for test in GLOBAL_TEST_REGISTRY if test.unique_name not in do_not_attempt]
if args.crashing_tests_to_not_attempt_to_run_and_a_bug_is_filed is not None:
for arg in args.crashing_tests_to_not_attempt_to_run_and_a_bug_is_filed:

View File

@ -98,6 +98,147 @@ TORCHDYNAMO_XFAIL_SET = {
# ERROR: RuntimeError: Found a custom (non-ATen) operator that either mutates or its inputs: prims::view_of.. Getting these operators to work with functionalization requires some extra work. For mutable ops you need to register a corresponding out-of-place variant of the op, and you also need to register a Functionalization kernel that performs some boilerplate, telling functionalization to map from the mutable op to the out-of-place op. See a more complete example of how to do this at https://gist.github.com/bdhirsh/7dadbf6296f8f7d1abcf4c482f438aaa.
"PrimsViewOfModule_basic",
"PrimsViewOfZeroRankModule_basic",
# See https://github.com/llvm/torch-mlir/pull/2040 and corresponding upstream issue
# https://github.com/pytorch/pytorch/issues/99752.
# torch._dynamo.exc.Unsupported: call_function BuiltinVariable(bool) [TensorVariable()] {}
'TensorToBoolZeroRank_basic',
'TensorToBool_basic',
# torch._dynamo.exc.Unsupported: call_function BuiltinVariable(float) [TensorVariable()] {}
'AtenSubFloatModule_basic',
'BoolFloatFalseModule_basic',
'BoolFloatTrueModule_basic',
'CeilFloatModule_basic',
'DivFloatModule_basic',
'GeFloatIntModule_basic',
'GeFloatModule_basic',
'GtFloatIntModule_basic',
'NeFloatIntModule_basic',
'SubFloatModule_basic',
'TensorToFloatZeroRank_basic',
'TensorToFloat_basic',
# torch._dynamo.exc.Unsupported: call_function BuiltinVariable(int) [TensorVariable()] {}
'AddIntModule_basic',
'AtenIntTensorCharDtypeModule_basic',
'BoolIntFalseModule_basic',
'BoolIntTrueModule_basic',
'DivIntModule_basic',
'EqIntModule_basic',
'GeIntModule_basic',
'GtIntModule_basic',
'MulIntModule_basic',
'NeIntModule_basic',
'SqrtIntModule_basic',
'SubIntModule_basic',
'TensorToIntZeroRank_basic',
'TensorToInt_basic',
'UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic',
'ViewCollapseDynamicWithAtenSizeIntModule_basic',
# torch._dynamo.exc.Unsupported: call_method ListVariable() sort [] {'reverse': ConstantVariable(bool)}
'SortIntListReverse_basic',
# torch._dynamo.exc.Unsupported: call_method ListVariable() sort [] {}
'SortIntList_basic',
# torch._dynamo.exc.Unsupported: data dependent operator: aten._local_scalar_dense.default
'AtenFloatScalarModule_basic',
'AtenIntBoolOpModule_basic',
'OneHotModule_basic',
'QuantizedMLP_basic',
'ScalarImplicitFloatModule_basic',
'ScalarImplicitIntModule_basic',
# torch._dynamo.exc.Unsupported: dynamic shape operator: aten.bincount.default
'BincountMinlengthModule_basic',
'BincountModule_basic',
'BincountStaticSizeModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor bool call_function aten.Bool
'BoolFloatConstantModule_basic',
'BoolIntConstantModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor bool call_function aten.__contains__
'ContainsIntList_False',
'ContainsIntList_True',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor bool call_function aten.all
'AllBoolFalseModule_basic',
'AllBoolTrueModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor bool call_function aten.any
'AnyBoolFalseModule_basic',
'AnyBoolTrueModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor float call_function aten.sqrt
'SqrtIntConstantModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor int call_function aten.Int
'AtenIntBoolOpConstFalseModule_basic',
'AtenIntBoolOpConstTrueModule_basic',
'IntFloatModule_basic',
'PowIntFloatModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor int call_function aten.len
'LenStrModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor int call_function aten.numel
'NumelModule_basic',
'NumelZeroRankModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor int call_function prim.max
'PrimMaxIntModule_basic',
# torch._dynamo.exc.Unsupported: torch.* op returned non-Tensor int call_function prim.min
'PrimMinIntModule_basic',
# empty graph
'IsFloatingPointFloat_True',
'IsFloatingPointInt_False',
'TorchPrimLoopForLikeModule_basic',
'TorchPrimLoopWhileLikeModule_basic',
}
# See https://github.com/llvm/torch-mlir/issues/2050
TORCHDYNAMO_CRASHING_SET = {
"ElementwiseCloneChannelsLastMemoryFormatModule_basic",
"ElementwiseCloneContiguousModule_basic"
"ElementwiseCloneModule_basic"
"ExpandAsFloatModule_basic"
"ExpandAsFloatModule_basic",
"ExpandAsIntModule_basic"
"ExpandAsIntModule_basic",
"ExpandModule_basic"
"ExpandModule_basic",
"MoveDimIntModule_basic"
"MoveDimIntModule_basic",
"MoveDimIntNegativeIndexModule_basic"
"MoveDimIntNegativeIndexModule_basic",
"NarrowVerticalTest2_basic"
"NarrowVerticalTest_basic"
"NumpyTRank2Module_basic"
"NumpyTRank2Module_basic",
"NumpyTRankNDynamicModule_basic"
"NumpyTRankNDynamicModule_basic",
"NumpyTRankNStaticModule_basic"
"NumpyTRankNStaticModule_basic",
"PermuteModule_basic"
"PermuteModule_basic",
"PermuteNegativeIndexModule_basic"
"PermuteNegativeIndexModule_basic",
"SelectIntNegativeDimAndIndexStaticModule_basic"
"SelectIntNegativeDimAndIndexStaticModule_basic",
"SliceModule_basic"
"SliceNegIdxModule_basic"
"SliceOutOfLowerBoundStartIndexModule_basic"
"SliceSizeTwoStepModule_basic"
"SliceStaticModule_basic"
"TModuleRank2_basic"
"ToCopyModule_basic"
"TransposeIntModule_basic"
"TransposeIntNegDimsModule_basic"
}
STABLEHLO_PASS_SET = {

View File

@ -65,6 +65,8 @@ def run_pipeline_with_repro_report(module,
{description} failed with the following diagnostics:
{sys.stderr.getvalue()}
python exception: {e}
For Torch-MLIR developers, the error can be reproduced with:
$ torch-mlir-opt -pass-pipeline='{pipeline}' {filename}
Add '{debug_options}' to get the IR dump for debugging purpose.

View File

@ -2,7 +2,6 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
from typing import List
import numpy
@ -24,6 +23,7 @@ def _returns_empty_tuple(fx_graph: torch.fx.GraphModule) -> bool:
return False
return True
@make_simple_dynamo_backend
def _refbackend_torchdynamo_backend(fx_graph: torch.fx.GraphModule,
example_inputs: List[torch.Tensor]):
@ -45,11 +45,8 @@ def _refbackend_torchdynamo_backend(fx_graph: torch.fx.GraphModule,
# Torch-MLIR does not support returning an empty tuple. The reason is
# that both returning an empty tuple and returning `None` results in MLIR
# functions that have as a return type `()`. In other words, there is no
# way of differentiating between the two. Moreover, since Torch-MLIR treats
# inputs as having value semantics, graphs that return nothing are no-ops to
# Torch-MLIR.
if _returns_empty_tuple(fx_graph):
return fx_graph
# way of differentiating between the two.
assert not _returns_empty_tuple(fx_graph), "encountered graph that does not return anything"
mlir_module = torch_mlir.compile(
fx_graph, example_inputs, output_type="linalg-on-tensors")
@ -92,7 +89,8 @@ class TorchDynamoTestConfig(TestConfig):
result: Trace = []
for item in trace:
f = lambda method, *inputs: method(*inputs)
dynamo_f = dynamo.optimize(_refbackend_torchdynamo_backend)(f)
torch._dynamo.reset()
dynamo_f = dynamo.optimize(_refbackend_torchdynamo_backend, nopython=True)(f)
output = dynamo_f(item_symbol_that_clones_inputs, *item.inputs)
result.append(
TraceItem(symbol=item.symbol,