update llvm tag to a2620e00. (#1567)

- also update MHLO to 57ba12a2(branch greencommit/2022-11-07-a2620e00)
- change -pass-pipeline format to make tests pass.
pull/1571/head snapshot-20221110.653
Xiafei Qiu 2022-11-10 18:39:28 +08:00 committed by GitHub
parent 64914603fa
commit 4f173c6e0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 15 additions and 15 deletions

@ -1 +1 @@
Subproject commit 74fb770de9399d7258a8eda974c93610cfde698e
Subproject commit a2620e00ffa232a406de3a1d8634beeda86956fd

2
externals/mlir-hlo vendored

@ -1 +1 @@
Subproject commit 36238f16441cd1a884af988d4400d2ebb0c75bbc
Subproject commit 57ba12a2a1934c3c9fc3cd1580f28f0c233f41d4

View File

@ -346,7 +346,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
option_string = "{backend-legal-ops=" + ",".join(backend_legal_ops) + "}"
run_pipeline_with_repro_report(
mb.module,
f"torchscript-module-to-torch-backend-pipeline{option_string}",
f"builtin.module(torchscript-module-to-torch-backend-pipeline{option_string})",
"Lowering TorchScript IR -> Torch Backend IR",
)
@ -361,7 +361,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
if output_type == OutputType.TOSA:
run_pipeline_with_repro_report(
mb.module,
"torch-backend-to-tosa-backend-pipeline",
"builtin.module(torch-backend-to-tosa-backend-pipeline)",
"Lowering Torch Backend IR -> TOSA Backend IR")
if verbose:
print("\n====================")
@ -372,7 +372,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
if output_type == OutputType.LINALG_ON_TENSORS:
run_pipeline_with_repro_report(
mb.module,
"torch-backend-to-linalg-on-tensors-backend-pipeline",
"builtin.module(torch-backend-to-linalg-on-tensors-backend-pipeline)",
"Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
if verbose:
print("\n====================")
@ -383,7 +383,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
elif output_type == OutputType.MHLO:
run_pipeline_with_repro_report(
mb.module,
"torch-backend-to-mhlo-backend-pipeline",
"builtin.module(torch-backend-to-mhlo-backend-pipeline)",
"Lowering Torch Backend IR -> MHLO Backend IR")
if verbose:
print("\n====================")

View File

@ -1261,7 +1261,7 @@ def main(args):
for function in torch.jit._state._python_cu.get_functions():
mb.import_function(function)
# Clean up the IR a bit before writing it out.
pm = PassManager.parse("canonicalize", context=mb.module.context)
pm = PassManager.parse("builtin.module(canonicalize)", context=mb.module.context)
pm.run(mb.module)
# Munge the IR a bit to make it more systematically accessible.
asm = mb.module.operation.get_asm()

View File

@ -67,7 +67,7 @@ class EagerModeRefBackend(TorchMLIREagerBackend):
if module_hash not in self.module_to_refbackend_invoker:
run_pipeline_with_repro_report(
imported_module,
"torch-function-to-torch-backend-pipeline,torch-backend-to-linalg-on-tensors-backend-pipeline",
"builtin.module(torch-function-to-torch-backend-pipeline,torch-backend-to-linalg-on-tensors-backend-pipeline)",
"EagerMode",
)
self.module_to_refbackend_invoker[module_hash] = _ref_backend.load(

View File

@ -114,7 +114,7 @@ class RefBackendInvoker:
return invoke
LOWERING_PIPELINE = ",".join([
LOWERING_PIPELINE = "builtin.module(" + ",".join([
"func.func(refback-generalize-tensor-pad)",
# Bufferize.
"func.func(scf-bufferize)",
@ -152,7 +152,7 @@ LOWERING_PIPELINE = ",".join([
"convert-func-to-llvm",
"convert-cf-to-llvm",
"reconcile-unrealized-casts",
])
]) + ")"
class RefBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend):

View File

@ -36,7 +36,7 @@ class LinalgOnTensorsMhloBackend(MhloBackend):
"""
run_pipeline_with_repro_report(
imported_module,
"func.func(symbolic-shape-optimization),func.func(hlo-legalize-to-linalg),func.func(canonicalize)",
"builtin.module(func.func(symbolic-shape-optimization),func.func(hlo-legalize-to-linalg),func.func(canonicalize))",
"Lowering MLIR-HLO to Linalg-on-Tensors")
return self.refbackend.compile(imported_module)

View File

@ -43,20 +43,20 @@ class LinalgOnTensorsTosaBackend(TosaBackend):
# that depend on TOSA as well as TOSA-to-Standard.
run_pipeline_with_repro_report(
imported_module,
"func.func(tosa-to-arith)",
"builtin.module(func.func(tosa-to-arith))",
"Lowering TOSA to Arith")
# Named ops must be legalized prior to general tosa-to-linalg
run_pipeline_with_repro_report(
imported_module,
"func.func(tosa-to-linalg-named)",
"builtin.module(func.func(tosa-to-linalg-named))",
"Lowering TOSA to Linalg-on-Tensors for Named Ops")
# TOSA-to-LinAlg may generate tosa.const() ops, so we want to lower them
# to arith.constants here before proceeding further.
run_pipeline_with_repro_report(
imported_module,
"func.func(tosa-to-tensor),func.func(tosa-to-linalg),func.func(tosa-to-arith)",
"builtin.module(func.func(tosa-to-tensor),func.func(tosa-to-linalg),func.func(tosa-to-arith))",
"Lowering TOSA to Linalg-on-Tensors")
return self.refbackend.compile(imported_module)

View File

@ -1,4 +1,4 @@
// RUN: torch-mlir-opt -pass-pipeline='torch-function-to-torch-backend-pipeline{backend-legal-ops=torch.aten.square,torch.aten.argmax}' -split-input-file %s | FileCheck %s
// RUN: torch-mlir-opt -pass-pipeline='builtin.module(torch-function-to-torch-backend-pipeline{backend-legal-ops=torch.aten.square,torch.aten.argmax})' -split-input-file %s | FileCheck %s
// CHECK-LABEL: func.func @torch.aten.square
func.func @torch.aten.square(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> {