mirror of https://github.com/llvm/torch-mlir
update llvm tag to a2620e00. (#1567)
- also update MHLO to 57ba12a2(branch greencommit/2022-11-07-a2620e00) - change -pass-pipeline format to make tests pass.pull/1571/head snapshot-20221110.653
parent
64914603fa
commit
4f173c6e0f
|
@ -1 +1 @@
|
||||||
Subproject commit 74fb770de9399d7258a8eda974c93610cfde698e
|
Subproject commit a2620e00ffa232a406de3a1d8634beeda86956fd
|
|
@ -1 +1 @@
|
||||||
Subproject commit 36238f16441cd1a884af988d4400d2ebb0c75bbc
|
Subproject commit 57ba12a2a1934c3c9fc3cd1580f28f0c233f41d4
|
|
@ -346,7 +346,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
|
||||||
option_string = "{backend-legal-ops=" + ",".join(backend_legal_ops) + "}"
|
option_string = "{backend-legal-ops=" + ",".join(backend_legal_ops) + "}"
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
mb.module,
|
mb.module,
|
||||||
f"torchscript-module-to-torch-backend-pipeline{option_string}",
|
f"builtin.module(torchscript-module-to-torch-backend-pipeline{option_string})",
|
||||||
"Lowering TorchScript IR -> Torch Backend IR",
|
"Lowering TorchScript IR -> Torch Backend IR",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -361,7 +361,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
|
||||||
if output_type == OutputType.TOSA:
|
if output_type == OutputType.TOSA:
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
mb.module,
|
mb.module,
|
||||||
"torch-backend-to-tosa-backend-pipeline",
|
"builtin.module(torch-backend-to-tosa-backend-pipeline)",
|
||||||
"Lowering Torch Backend IR -> TOSA Backend IR")
|
"Lowering Torch Backend IR -> TOSA Backend IR")
|
||||||
if verbose:
|
if verbose:
|
||||||
print("\n====================")
|
print("\n====================")
|
||||||
|
@ -372,7 +372,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
|
||||||
if output_type == OutputType.LINALG_ON_TENSORS:
|
if output_type == OutputType.LINALG_ON_TENSORS:
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
mb.module,
|
mb.module,
|
||||||
"torch-backend-to-linalg-on-tensors-backend-pipeline",
|
"builtin.module(torch-backend-to-linalg-on-tensors-backend-pipeline)",
|
||||||
"Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
|
"Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
|
||||||
if verbose:
|
if verbose:
|
||||||
print("\n====================")
|
print("\n====================")
|
||||||
|
@ -383,7 +383,7 @@ PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
|
||||||
elif output_type == OutputType.MHLO:
|
elif output_type == OutputType.MHLO:
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
mb.module,
|
mb.module,
|
||||||
"torch-backend-to-mhlo-backend-pipeline",
|
"builtin.module(torch-backend-to-mhlo-backend-pipeline)",
|
||||||
"Lowering Torch Backend IR -> MHLO Backend IR")
|
"Lowering Torch Backend IR -> MHLO Backend IR")
|
||||||
if verbose:
|
if verbose:
|
||||||
print("\n====================")
|
print("\n====================")
|
||||||
|
|
|
@ -1261,7 +1261,7 @@ def main(args):
|
||||||
for function in torch.jit._state._python_cu.get_functions():
|
for function in torch.jit._state._python_cu.get_functions():
|
||||||
mb.import_function(function)
|
mb.import_function(function)
|
||||||
# Clean up the IR a bit before writing it out.
|
# Clean up the IR a bit before writing it out.
|
||||||
pm = PassManager.parse("canonicalize", context=mb.module.context)
|
pm = PassManager.parse("builtin.module(canonicalize)", context=mb.module.context)
|
||||||
pm.run(mb.module)
|
pm.run(mb.module)
|
||||||
# Munge the IR a bit to make it more systematically accessible.
|
# Munge the IR a bit to make it more systematically accessible.
|
||||||
asm = mb.module.operation.get_asm()
|
asm = mb.module.operation.get_asm()
|
||||||
|
|
|
@ -67,7 +67,7 @@ class EagerModeRefBackend(TorchMLIREagerBackend):
|
||||||
if module_hash not in self.module_to_refbackend_invoker:
|
if module_hash not in self.module_to_refbackend_invoker:
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
imported_module,
|
imported_module,
|
||||||
"torch-function-to-torch-backend-pipeline,torch-backend-to-linalg-on-tensors-backend-pipeline",
|
"builtin.module(torch-function-to-torch-backend-pipeline,torch-backend-to-linalg-on-tensors-backend-pipeline)",
|
||||||
"EagerMode",
|
"EagerMode",
|
||||||
)
|
)
|
||||||
self.module_to_refbackend_invoker[module_hash] = _ref_backend.load(
|
self.module_to_refbackend_invoker[module_hash] = _ref_backend.load(
|
||||||
|
|
|
@ -114,7 +114,7 @@ class RefBackendInvoker:
|
||||||
return invoke
|
return invoke
|
||||||
|
|
||||||
|
|
||||||
LOWERING_PIPELINE = ",".join([
|
LOWERING_PIPELINE = "builtin.module(" + ",".join([
|
||||||
"func.func(refback-generalize-tensor-pad)",
|
"func.func(refback-generalize-tensor-pad)",
|
||||||
# Bufferize.
|
# Bufferize.
|
||||||
"func.func(scf-bufferize)",
|
"func.func(scf-bufferize)",
|
||||||
|
@ -152,7 +152,7 @@ LOWERING_PIPELINE = ",".join([
|
||||||
"convert-func-to-llvm",
|
"convert-func-to-llvm",
|
||||||
"convert-cf-to-llvm",
|
"convert-cf-to-llvm",
|
||||||
"reconcile-unrealized-casts",
|
"reconcile-unrealized-casts",
|
||||||
])
|
]) + ")"
|
||||||
|
|
||||||
|
|
||||||
class RefBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend):
|
class RefBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend):
|
||||||
|
|
|
@ -36,7 +36,7 @@ class LinalgOnTensorsMhloBackend(MhloBackend):
|
||||||
"""
|
"""
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
imported_module,
|
imported_module,
|
||||||
"func.func(symbolic-shape-optimization),func.func(hlo-legalize-to-linalg),func.func(canonicalize)",
|
"builtin.module(func.func(symbolic-shape-optimization),func.func(hlo-legalize-to-linalg),func.func(canonicalize))",
|
||||||
"Lowering MLIR-HLO to Linalg-on-Tensors")
|
"Lowering MLIR-HLO to Linalg-on-Tensors")
|
||||||
return self.refbackend.compile(imported_module)
|
return self.refbackend.compile(imported_module)
|
||||||
|
|
||||||
|
|
|
@ -43,20 +43,20 @@ class LinalgOnTensorsTosaBackend(TosaBackend):
|
||||||
# that depend on TOSA as well as TOSA-to-Standard.
|
# that depend on TOSA as well as TOSA-to-Standard.
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
imported_module,
|
imported_module,
|
||||||
"func.func(tosa-to-arith)",
|
"builtin.module(func.func(tosa-to-arith))",
|
||||||
"Lowering TOSA to Arith")
|
"Lowering TOSA to Arith")
|
||||||
|
|
||||||
# Named ops must be legalized prior to general tosa-to-linalg
|
# Named ops must be legalized prior to general tosa-to-linalg
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
imported_module,
|
imported_module,
|
||||||
"func.func(tosa-to-linalg-named)",
|
"builtin.module(func.func(tosa-to-linalg-named))",
|
||||||
"Lowering TOSA to Linalg-on-Tensors for Named Ops")
|
"Lowering TOSA to Linalg-on-Tensors for Named Ops")
|
||||||
|
|
||||||
# TOSA-to-LinAlg may generate tosa.const() ops, so we want to lower them
|
# TOSA-to-LinAlg may generate tosa.const() ops, so we want to lower them
|
||||||
# to arith.constants here before proceeding further.
|
# to arith.constants here before proceeding further.
|
||||||
run_pipeline_with_repro_report(
|
run_pipeline_with_repro_report(
|
||||||
imported_module,
|
imported_module,
|
||||||
"func.func(tosa-to-tensor),func.func(tosa-to-linalg),func.func(tosa-to-arith)",
|
"builtin.module(func.func(tosa-to-tensor),func.func(tosa-to-linalg),func.func(tosa-to-arith))",
|
||||||
"Lowering TOSA to Linalg-on-Tensors")
|
"Lowering TOSA to Linalg-on-Tensors")
|
||||||
|
|
||||||
return self.refbackend.compile(imported_module)
|
return self.refbackend.compile(imported_module)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: torch-mlir-opt -pass-pipeline='torch-function-to-torch-backend-pipeline{backend-legal-ops=torch.aten.square,torch.aten.argmax}' -split-input-file %s | FileCheck %s
|
// RUN: torch-mlir-opt -pass-pipeline='builtin.module(torch-function-to-torch-backend-pipeline{backend-legal-ops=torch.aten.square,torch.aten.argmax})' -split-input-file %s | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func.func @torch.aten.square
|
// CHECK-LABEL: func.func @torch.aten.square
|
||||||
func.func @torch.aten.square(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> {
|
func.func @torch.aten.square(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> {
|
||||||
|
|
Loading…
Reference in New Issue