mirror of https://github.com/llvm/torch-mlir
Update llvm-project to 204d301bb1921431a853c0bfba32007c018df1d5
This brings in the fix for the obscure RefBackend bug we were hitting.pull/338/head
parent
b59f2cb673
commit
8b2c099914
|
@ -15,18 +15,14 @@ llvm_project_dir="$project_dir/external/llvm-project"
|
||||||
build_dir="$project_dir/build"
|
build_dir="$project_dir/build"
|
||||||
|
|
||||||
cmake -GNinja -B"$build_dir" "$llvm_project_dir/llvm" \
|
cmake -GNinja -B"$build_dir" "$llvm_project_dir/llvm" \
|
||||||
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DCMAKE_C_FLAGS_RELWITHDEBINFO="-O2 -DNDEBUG -gline-tables-only" \
|
|
||||||
-DCMAKE_CXX_FLAGS_RELWITHDEBINFO="-O2 -DNDEBUG -gline-tables-only" \
|
|
||||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
||||||
-DLLVM_ENABLE_PROJECTS=mlir \
|
-DLLVM_ENABLE_PROJECTS=mlir \
|
||||||
-DLLVM_EXTERNAL_PROJECTS=torch-mlir \
|
-DLLVM_EXTERNAL_PROJECTS=torch-mlir \
|
||||||
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$project_dir" \
|
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$project_dir" \
|
||||||
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
|
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
|
||||||
|
-DLLVM_ENABLE_ASSERTIONS=ON \
|
||||||
-DLLVM_TARGETS_TO_BUILD=host
|
-DLLVM_TARGETS_TO_BUILD=host
|
||||||
|
|
||||||
#-DLLVM_ENABLE_ASSERTIONS=ON \
|
|
||||||
#
|
|
||||||
|
|
||||||
cd "$build_dir"
|
cd "$build_dir"
|
||||||
ninja tools/torch-mlir/all check-torch-mlir-all
|
ninja tools/torch-mlir/all check-torch-mlir-all
|
||||||
|
|
|
@ -30,12 +30,6 @@ def MmModule_basic(module, tu: TestUtils):
|
||||||
module.forward(tu.rand(4, 4), tu.rand(4, 4))
|
module.forward(tu.rand(4, 4), tu.rand(4, 4))
|
||||||
|
|
||||||
|
|
||||||
# TODO: Investigate why RefBackend sometimes can't handle two calls in a row in
|
|
||||||
# the trace.
|
|
||||||
# It actually works, if MmModule_chained is run by itself, but if other tests
|
|
||||||
# are mixed with it, it fails with a mysterious-sounding low level ctypes error
|
|
||||||
# that exceeds my current ability to debug.
|
|
||||||
#
|
|
||||||
@register_test_case(module_factory=lambda: MmModule())
|
@register_test_case(module_factory=lambda: MmModule())
|
||||||
def MmModule_chained(module, tu: TestUtils):
|
def MmModule_chained(module, tu: TestUtils):
|
||||||
res = module.forward(tu.rand(4, 4), tu.rand(4, 4))
|
res = module.forward(tu.rand(4, 4), tu.rand(4, 4))
|
||||||
|
|
|
@ -18,11 +18,7 @@ _common_torch_mlir_lowering_xfails = {
|
||||||
'QuantizedMLP_basic',
|
'QuantizedMLP_basic',
|
||||||
}
|
}
|
||||||
|
|
||||||
XFAIL_SETS['refbackend'] = _common_torch_mlir_lowering_xfails | {
|
XFAIL_SETS['refbackend'] = _common_torch_mlir_lowering_xfails
|
||||||
# The first test in the e2e test batch would fail with SystemError: null
|
|
||||||
# argument to internal routine. Might be some issue with refbackend.
|
|
||||||
'MmModule_basic',
|
|
||||||
}
|
|
||||||
|
|
||||||
XFAIL_SETS['torchscript'] = {}
|
XFAIL_SETS['torchscript'] = {}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
# -*- Python -*-
|
||||||
|
# This file is licensed under a pytorch-style license
|
||||||
|
# See frontends/pytorch/LICENSE for license information.
|
||||||
|
|
||||||
|
# From the torch-mlir root, run with:
|
||||||
|
# `python -m examples.torchfx.examples.example_add_tanh_sigmoid`
|
||||||
|
# (after setting up python environment with write_env_file.sh)
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch.fx.experimental.fx_acc import acc_tracer
|
||||||
|
import torch_mlir
|
||||||
|
from torch_mlir.dialects.torch import register_dialect
|
||||||
|
from torch_mlir.passmanager import PassManager
|
||||||
|
from torch_mlir_e2e_test.linalg_on_tensors_backends.refbackend import RefBackendLinalgOnTensorsBackend
|
||||||
|
|
||||||
|
from ..builder import build_module
|
||||||
|
from ..annotator import annotate_forward_args
|
||||||
|
from ..torch_mlir_types import TorchTensorType
|
||||||
|
|
||||||
|
|
||||||
|
class MyModule(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def forward(self, x, y):
|
||||||
|
return torch.tanh(x) + torch.sigmoid(y)
|
||||||
|
|
||||||
|
|
||||||
|
module = MyModule()
|
||||||
|
traced_module = acc_tracer.trace(module, [torch.Tensor(2,2),
|
||||||
|
torch.Tensor(2,2)])
|
||||||
|
|
||||||
|
print("TRACE")
|
||||||
|
arg_type = TorchTensorType(shape=[None, None], dtype=torch.float)
|
||||||
|
traced_module = annotate_forward_args(traced_module, [arg_type, arg_type])
|
||||||
|
print(traced_module.graph)
|
||||||
|
torch_mlir_module = build_module(traced_module)
|
||||||
|
|
||||||
|
print("\n\nTORCH MLIR")
|
||||||
|
torch_mlir_module.dump()
|
||||||
|
print(torch_mlir_module.operation.verify())
|
||||||
|
|
||||||
|
with torch_mlir_module.context:
|
||||||
|
pm = PassManager.parse('torchscript-to-linalg-on-tensors-backend-pipeline')
|
||||||
|
pm.run(torch_mlir_module)
|
||||||
|
|
||||||
|
print("\n\nLOWERED MLIR")
|
||||||
|
torch_mlir_module.dump()
|
||||||
|
|
||||||
|
backend = RefBackendLinalgOnTensorsBackend()
|
||||||
|
compiled = backend.compile(torch_mlir_module)
|
||||||
|
jit_module = backend.load(compiled)
|
||||||
|
|
||||||
|
print("\n\nRunning Forward Function")
|
||||||
|
t = torch.rand((2, 2), dtype=torch.float)
|
||||||
|
print("Compiled result:\n", jit_module.forward(t.numpy(), t.numpy()))
|
||||||
|
print("\nExpected result:\n", module.forward(t, t))
|
|
@ -1 +1 @@
|
||||||
Subproject commit 6e60bb6883178cf14e6fd47a6789495636e4322f
|
Subproject commit 204d301bb1921431a853c0bfba32007c018df1d5
|
|
@ -186,8 +186,8 @@ static Value getPaddedTensor(Operation *op, OpBuilder &b, Value &input,
|
||||||
Type ranked4DTensorType = linalg::PadTensorOp::inferResultType(
|
Type ranked4DTensorType = linalg::PadTensorOp::inferResultType(
|
||||||
input.getType().cast<RankedTensorType>(), paddingInts, paddingInts);
|
input.getType().cast<RankedTensorType>(), paddingInts, paddingInts);
|
||||||
Value paddedInput = linalg::PadTensorOp::createPadScalarOp(
|
Value paddedInput = linalg::PadTensorOp::createPadScalarOp(
|
||||||
ranked4DTensorType, input, c0, /*low=*/paddings, /*high=*/paddings, loc,
|
ranked4DTensorType, input, c0, /*low=*/paddings, /*high=*/paddings,
|
||||||
b);
|
/*packing=*/false, loc, b);
|
||||||
return paddedInput;
|
return paddedInput;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue