From 6e84752c395a828eb612b21be4ab26d9f7e60b22 Mon Sep 17 00:00:00 2001 From: Vivek Khandelwal Date: Thu, 7 Mar 2024 21:42:38 +0530 Subject: [PATCH] build: manually update PyTorch version (#2992) Set PyTorch and TorchVision version to nightly release 2024-03-07. This commit also removes the deprecated constraints API: https://github.com/pytorch/pytorch/commit/342e7929b804ec56121e82e92d6a199b549c38b1 Signed-Off By: Vivek Khandelwal --- projects/pt1/e2e_testing/xfail_sets.py | 7 ++++++- python/torch_mlir/fx.py | 3 +-- pytorch-hash.txt | 2 +- pytorch-requirements.txt | 2 +- test/python/fx_importer/sparse_test.py | 2 +- torchvision-requirements.txt | 2 +- 6 files changed, 11 insertions(+), 7 deletions(-) diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index 222911818..ee0b3608c 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -2167,7 +2167,6 @@ ONNX_XFAIL_SET = { "ElementwiseTanIntModule_basic", "ElementwiseUnaryIntModule_basic", "ElementwiseUnsqueezeNegDimsModule_basic", - "ElementwiseWhereScalarModule_basic", "EmbeddingModuleF16_basic", "EmbeddingModuleI32_basic", "EmbeddingModuleI64_basic", @@ -2192,5 +2191,11 @@ ONNX_XFAIL_SET = { "TensorsStackPromoteDTypeModule_basic", } +if torch_version_for_comparison() < version.parse("2.3.0.dev"): + ONNX_XFAIL_SET = ONNX_XFAIL_SET | { + # ERROR: dtype (torch.float64) is not equal to golden dtype (torch.float32) + "ElementwiseWhereScalarModule_basic", + } + ONNX_CRASHING_SET = { } diff --git a/python/torch_mlir/fx.py b/python/torch_mlir/fx.py index 76cd91f82..3622efafd 100644 --- a/python/torch_mlir/fx.py +++ b/python/torch_mlir/fx.py @@ -20,7 +20,6 @@ def export_and_import( f, *args, fx_importer: Optional[FxImporter] = None, - constraints: Optional[torch.export.Constraint] = None, experimental_support_mutation: bool = False, hooks: Optional[FxImporterHooks] = None, func_name: str = "main", @@ -31,7 +30,7 @@ def export_and_import( if fx_importer is None: fx_importer = FxImporter(context=context, hooks=hooks) - prog = torch.export.export(f, args, kwargs, constraints=constraints) + prog = torch.export.export(f, args, kwargs) decomp_table = get_decomposition_table() prog = prog.run_decompositions(decomp_table) if experimental_support_mutation: diff --git a/pytorch-hash.txt b/pytorch-hash.txt index 81f0390b4..a5e23f46e 100644 --- a/pytorch-hash.txt +++ b/pytorch-hash.txt @@ -1 +1 @@ -8efa066dc0870521652c1319bd6b5b0f6dc3fe25 +ce013333221ff2d1285a8e8cf7c427584e65fea2 diff --git a/pytorch-requirements.txt b/pytorch-requirements.txt index 26abce08d..e1bb61745 100644 --- a/pytorch-requirements.txt +++ b/pytorch-requirements.txt @@ -1,3 +1,3 @@ -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html --pre -torch==2.3.0.dev20240220 +torch==2.3.0.dev20240307 diff --git a/test/python/fx_importer/sparse_test.py b/test/python/fx_importer/sparse_test.py index 6d801a1d8..6260a5bba 100644 --- a/test/python/fx_importer/sparse_test.py +++ b/test/python/fx_importer/sparse_test.py @@ -104,7 +104,7 @@ def sparse_export( mask = [a.layout in SPARSE_LAYOUTS for a in args] # Build the regular FX traced graph with only dense arguments # (the current version would crash otherwise, see issue above). - prog = torch.export.export(f, dargs, kwargs, constraints=None) + prog = torch.export.export(f, dargs, kwargs) # Annotate sparse arguments in the graph. Note that we currently # only account for sparsity defined by the user inputs to the model. # TODO: support sparsity in model parameters (weights, biases) diff --git a/torchvision-requirements.txt b/torchvision-requirements.txt index ce099fb91..a0b4c6fe6 100644 --- a/torchvision-requirements.txt +++ b/torchvision-requirements.txt @@ -1,3 +1,3 @@ -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html --pre -torchvision==0.18.0.dev20240220 +torchvision==0.18.0.dev20240307