diff --git a/CMakeLists.txt b/CMakeLists.txt index b309e85cc..181cf8b8d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -47,8 +47,14 @@ endif() option(TORCH_MLIR_OUT_OF_TREE_BUILD "Specifies an out of tree build" OFF) # PyTorch native extension gate. If OFF, then no features which depend on -# native extensions will be built. -option(TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS "Enables PyTorch native extension features" ON) +# native extensions will be built.TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS is disabled by default. +# But it will be manually enabled in CI build to enable the jit_ir_importer.build_tools.torch_ods_gen +# and abstract_interp_lib_gen.py. Once pure python version of build_tools finished, no need to set it in CI. +option(TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS "Enables PyTorch native extension features" OFF) +# NOTE: The JIT_IR_IMPORTER paths have become unsupportable due to age and lack of maintainers. +# Turning this off disables the old TorchScript path, leaving FX based import as the current supported option. +# The option will be retained for a time, and if a maintainer is interested in setting up testing for it, +# please reach out on the list and speak up for it. It will only be enabled in CI for test usage. cmake_dependent_option(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER "Enables JIT IR Importer" ON TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF) cmake_dependent_option(TORCH_MLIR_ENABLE_LTC "Enables LTC backend" OFF TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF) diff --git a/README.md b/README.md index 8d8c6ad8d..56371b949 100644 --- a/README.md +++ b/README.md @@ -21,17 +21,8 @@ Several vendors have adopted MLIR as the middle layer in their systems, enabling ## All the roads from PyTorch to Torch MLIR Dialect We have few paths to lower down to the Torch MLIR Dialect. - -![Simplified Architecture Diagram for README](docs/images/readme_architecture_diagram.png) - - - TorchScript - This is the most tested path down to Torch MLIR Dialect. - - LazyTensorCore - Read more details [here](docs/ltc_backend.md). - - We also have basic TorchDynamo/PyTorch 2.0 support, see our - [long-term roadmap](docs/roadmap.md) and - [Thoughts on PyTorch 2.0](https://discourse.llvm.org/t/thoughts-on-pytorch-2-0/67000/3) - for more details. + - ONNX as the entry points. + - Fx as the entry points ## Project Communication @@ -39,17 +30,6 @@ We have few paths to lower down to the Torch MLIR Dialect. - Github issues [here](https://github.com/llvm/torch-mlir/issues) - [`torch-mlir` section](https://llvm.discourse.group/c/projects-that-want-to-become-official-llvm-projects/torch-mlir/41) of LLVM Discourse -### Meetings - -Community Meeting / Developer Hour: -- 1st and 3rd Monday of the month at 9 am PST -- 2nd and 4th Monday of the month at 5 pm PST - -Office Hours: -- Every Thursday at 8:30 am PST - -Meeting links can be found [here](https://discourse.llvm.org/t/new-community-meeting-developer-hour-schedule/73868). - ## Install torch-mlir snapshot At the time of writing, we release [pre-built snapshots of torch-mlir](https://github.com/llvm/torch-mlir-release) for Python 3.11 and Python 3.10. @@ -74,7 +54,14 @@ pip install --pre torch-mlir torchvision \ -f https://github.com/llvm/torch-mlir-release/releases/expanded_assets/dev-wheels ``` -## Demos +## Using torch-mlir + +Torch-MLIR is primarily a project that is integrated into compilers to bridge them to PyTorch and ONNX. If contemplating a new integration, it may be helpful to refer to existing downstreams: + +* [IREE](https://github.com/iree-org/iree.git) +* [Blade](https://github.com/alibaba/BladeDISC) + +While most of the project is exercised via testing paths, there are some ways that an end user can directly use the APIs without further integration: ### FxImporter ResNet18 ```shell @@ -93,30 +80,6 @@ torch-mlir prediction [('Labrador retriever', 70.6567153930664), ('golden retriever', 4.988325119018555), ('Saluki, gazelle hound', 4.477458477020264)] ``` -### TorchScript ResNet18 - -Standalone script to Convert a PyTorch ResNet18 model to MLIR and run it on the CPU Backend: - -```shell -# Get the latest example if you haven't checked out the code -wget https://raw.githubusercontent.com/llvm/torch-mlir/main/projects/pt1/examples/torchscript_resnet18.py - -# Run ResNet18 as a standalone script. -python projects/pt1/examples/torchscript_resnet18.py - -load image from https://upload.wikimedia.org/wikipedia/commons/2/26/YellowLabradorLooking_new.jpg -Downloading: "https://download.pytorch.org/models/resnet18-f37072fd.pth" to /home/mlir/.cache/torch/hub/checkpoints/resnet18-f37072fd.pth -100.0% -PyTorch prediction -[('Labrador retriever', 70.66319274902344), ('golden retriever', 4.956596374511719), ('Chesapeake Bay retriever', 4.195662975311279)] -torch-mlir prediction -[('Labrador retriever', 70.66320037841797), ('golden retriever', 4.956601619720459), ('Chesapeake Bay retriever', 4.195651531219482)] -``` - -### Lazy Tensor Core - -View examples [here](docs/ltc_examples.md). - ## Repository Layout The project follows the conventions of typical MLIR-based projects: diff --git a/build_tools/ci/build_posix.sh b/build_tools/ci/build_posix.sh index fec5e252e..ea3e570c8 100755 --- a/build_tools/ci/build_posix.sh +++ b/build_tools/ci/build_posix.sh @@ -50,7 +50,8 @@ cmake -S "$repo_root/externals/llvm-project/llvm" -B "$build_dir" \ -DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$repo_root" \ -DLLVM_TARGETS_TO_BUILD=host \ -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ - -DTORCH_MLIR_ENABLE_LTC=ON + -DTORCH_MLIR_ENABLE_LTC=ON \ + -DTORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS=ON echo "::endgroup::" echo "::group::Build" diff --git a/build_tools/ci/test_posix.sh b/build_tools/ci/test_posix.sh index accdc4199..3a9f5b7af 100755 --- a/build_tools/ci/test_posix.sh +++ b/build_tools/ci/test_posix.sh @@ -8,22 +8,6 @@ torch_version="${1:-unknown}" export PYTHONPATH="$repo_root/build/tools/torch-mlir/python_packages/torch_mlir:$repo_root/projects/pt1" -echo "::group::Run Linalg e2e integration tests" -python -m e2e_testing.main --config=linalg -v -echo "::endgroup::" - -echo "::group::Run make_fx + TOSA e2e integration tests" -python -m e2e_testing.main --config=make_fx_tosa -v -echo "::endgroup::" - -echo "::group::Run TOSA e2e integration tests" -python -m e2e_testing.main --config=tosa -v -echo "::endgroup::" - -echo "::group::Run Stablehlo e2e integration tests" -python -m e2e_testing.main --config=stablehlo -v -echo "::endgroup::" - echo "::group::Run ONNX e2e integration tests" python -m e2e_testing.main --config=onnx -v echo "::endgroup::" diff --git a/build_tools/update_abstract_interp_lib.sh b/build_tools/update_abstract_interp_lib.sh index cb44a4e8b..070fa54a5 100755 --- a/build_tools/update_abstract_interp_lib.sh +++ b/build_tools/update_abstract_interp_lib.sh @@ -41,6 +41,9 @@ if [ ! -z ${TORCH_MLIR_EXT_MODULES} ]; then ext_module="${TORCH_MLIR_EXT_MODULES} " fi +# To enable this python package, manually build torch_mlir with: +# -DTORCH_MLIR_ENABLE_JIT_IR_IMPORTER=ON +# TODO: move this package out of JIT_IR_IMPORTER. PYTHONPATH="${pypath}" python \ -m torch_mlir.jit_ir_importer.build_tools.abstract_interp_lib_gen \ --pytorch_op_extensions=${ext_module:-""} \ diff --git a/build_tools/update_torch_ods.sh b/build_tools/update_torch_ods.sh index cb0599f16..e3aa23078 100755 --- a/build_tools/update_torch_ods.sh +++ b/build_tools/update_torch_ods.sh @@ -42,6 +42,9 @@ if [ ! -z ${TORCH_MLIR_EXT_MODULES} ]; then fi set +u +# To enable this python package, manually build torch_mlir with: +# -DTORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS=ON +# TODO: move this package out of JIT_IR_IMPORTER. PYTHONPATH="${PYTHONPATH}:${pypath}" python \ -m torch_mlir.jit_ir_importer.build_tools.torch_ods_gen \ --torch_ir_include_dir="${torch_ir_include_dir}" \ diff --git a/docs/add_ops.md b/docs/add_ops.md index b8e5ce37e..3a73b48e8 100644 --- a/docs/add_ops.md +++ b/docs/add_ops.md @@ -38,7 +38,8 @@ PS: IREE is pronounced Eerie, and hence the ghost icon. ### How to TorchToLinalg -You will need to do 4 things: +You will need to do 5 things: +- make sure -DTORCH_MLIR_ENABLE_JIT_IR_IMPORTER=ON is added during build. This is to enable the python file used in `build_tools/update_torch_ods.sh` and `build_tools/update_abstract_interp_lib.sh` - make sure the op exists in `torch_ods_gen.py`, and then run `build_tools/update_torch_ods.sh`, and then build. This generates `GeneratedTorchOps.td`, which is used to generate the cpp and h files where ops function signatures are defined. - Reference [torch op registry](https://github.com/pytorch/pytorch/blob/7451dd058564b5398af79bfc1e2669d75f9ecfa2/torch/csrc/jit/passes/utils/op_registry.cpp#L21) - make sure the op exists in `abstract_interp_lib_gen.py`, and then run `build_tools/update_abstract_interp_lib.sh`, and then build. This generates `AbstractInterpLib.cpp`, which is used to generate the cpp and h files where ops function signatures are defined.