diff --git a/.github/workflows/buildAndTest.yml b/.github/workflows/buildAndTest.yml index 0fea9ed46..7a6a3dc80 100644 --- a/.github/workflows/buildAndTest.yml +++ b/.github/workflows/buildAndTest.yml @@ -43,8 +43,7 @@ jobs: -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \ -DPython3_EXECUTABLE=$(which python) \ -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLVM_TARGETS_TO_BUILD=host \ - -DNPCOMP_ENABLE_PYTORCH=ON + -DLLVM_TARGETS_TO_BUILD=host ninja ninja check-npcomp check-frontends-pytorch - name: Refbackend integration tests diff --git a/CMakeLists.txt b/CMakeLists.txt index 56752df66..95dfac50b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,6 @@ set(NPCOMP_MINIMUM_PYTHON_VERSION 3.6) option(NPCOMP_ENABLE_IREE "Enables the IREE backend (must configure location via IREE_DIR)." OFF) option(NPCOMP_ENABLE_REFJIT "Enables the reference JIT backend." ON) set(NPCOMP_IREE_BUILDDIR "../iree-build" CACHE STRING "If building IREE, then setting this elects to build from a source directory (versus installed package)") -option(NPCOMP_ENABLE_PYTORCH "Enables PyTorch integration." OFF) # Turn on -gsplit-dwarf if requested in debug builds. if (NPCOMP_USE_SPLIT_DWARF AND @@ -50,6 +49,15 @@ if(MSVC) ) endif() +#------------------------------------------------------------------------------- +# Directory setup +#------------------------------------------------------------------------------- + +set(MLIR_NPCOMP_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +set(MLIR_NPCOMP_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) +set(MLIR_NPCOMP_PYTHON_PACKAGES_DIR ${CMAKE_CURRENT_BINARY_DIR}/python_packages) +set(TORCH_MLIR_PYTHON_PACKAGES_DIR "${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}") + if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) # If we are not building as a part of LLVM, build NPCOMP as a standalone # project, including LLVM as a subdirectory. This gives us the most control @@ -235,17 +243,9 @@ if(NPCOMP_ENABLE_IREE) symlink_iree(bindings/python/pyiree/rt python/pyiree/rt) endif() -#------------------------------------------------------------------------------- -# Directory setup -#------------------------------------------------------------------------------- - -set(MLIR_NPCOMP_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -set(MLIR_NPCOMP_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) -set(MLIR_NPCOMP_PYTHON_PACKAGES_DIR ${CMAKE_CURRENT_BINARY_DIR}/python_packages) - add_custom_target(check-npcomp) add_custom_target(check-npcomp-all) -add_dependencies(check-npcomp-all check-npcomp) +add_dependencies(check-npcomp-all check-npcomp check-frontends-pytorch check-torch-mlir check-torch-mlir-plugin) add_subdirectory(include/npcomp) add_subdirectory(lib) @@ -253,7 +253,5 @@ add_subdirectory(python) add_subdirectory(test) add_subdirectory(tools) -if(NPCOMP_ENABLE_PYTORCH) - message(STATUS "Adding PyTorch frontent support...") - add_subdirectory(frontends/pytorch) -endif() +message(STATUS "Adding PyTorch frontend support...") +add_subdirectory(frontends/pytorch) diff --git a/README.md b/README.md index 6a9786fa9..d65273c8c 100644 --- a/README.md +++ b/README.md @@ -147,7 +147,7 @@ ninja check-npcomp # enable the PyTorch frontend. pip3 install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html -cmake -DNPCOMP_ENABLE_PYTORCH=ON ... +cmake ... ninja check-frontends-pytorch # If building with PyTorch ``` @@ -177,7 +177,7 @@ Build/test npcomp (from within docker image): ```shell # From within the docker image. cd /src/mlir-npcomp -cmake -GNinja -B/build/npcomp -DCMAKE_BUILD_TYPE=Release -DNPCOMP_ENABLE_PYTORCH=ON . +cmake -GNinja -B/build/npcomp -DCMAKE_BUILD_TYPE=Release . cmake --build /build/npcomp --target check-npcomp check-frontends-pytorch ``` diff --git a/build_tools/update_torch_ods.sh b/build_tools/update_torch_ods.sh deleted file mode 100755 index e0c1dfcf4..000000000 --- a/build_tools/update_torch_ods.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# Updates auto-generated ODS files for the `torch` dialect. -set -e - -src_dir="$(realpath $(dirname $0)/..)" -build_dir="$(realpath "${NPCOMP_BUILD_DIR:-$src_dir/build}")" -torch_ir_dir="${src_dir}/include/npcomp/Dialect/Torch/IR" - -source $src_dir/.env -#ninja -C "${build_dir}" -python -m torch_mlir_utils.codegen.torch_ods_gen \ - --torch_ir_dir="${torch_ir_dir}" \ - --debug_registry_dump="${torch_ir_dir}/JITOperatorRegistryDump.txt" diff --git a/build_tools/write_env_file.sh b/build_tools/write_env_file.sh index eeec63aab..e358a58f2 100755 --- a/build_tools/write_env_file.sh +++ b/build_tools/write_env_file.sh @@ -15,8 +15,7 @@ python_packages_dir="$build_dir/python_packages" write_env_file() { echo "Updating $build_dir/.env file" - echo "PYTHONPATH=\"$(portable_realpath "$python_packages_dir/npcomp_core"):$(portable_realpath "$python_packages_dir/npcomp_torch")\"" > "$build_dir/.env" - echo "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1" >> "$build_dir/.env" + echo "PYTHONPATH=\"$(portable_realpath "$python_packages_dir/npcomp_core"):$(portable_realpath "$python_packages_dir/npcomp_torch"):$(portable_realpath "$python_packages_dir/torch_mlir"):$(portable_realpath "$python_packages_dir/torch_mlir_dialects")\"" > "$build_dir/.env" if ! cp "$build_dir/.env" "$td/.env"; then echo "WARNING: Failed to write $td/.env" fi diff --git a/cmake/modules/NpcompPython.cmake b/cmake/modules/NpcompPython.cmake index 82e5c8c58..b3a9cbea2 100644 --- a/cmake/modules/NpcompPython.cmake +++ b/cmake/modules/NpcompPython.cmake @@ -1,18 +1,3 @@ -function(npcomp_python_target_compile_options target) - target_compile_options(${target} PRIVATE - $<$,$,$>: - # Enable RTTI and exceptions. - -frtti -fexceptions - # Noisy pybind warnings - -Wno-unused-value - -Wno-covered-switch-default - > - $<$: - # Enable RTTI and exceptions. - /EHsc /GR> - ) -endfunction() - function(npcomp_python_create_symlinks binary_dir source_dir) # Do nothing if building in-source if (${binary_dir} STREQUAL ${source_dir}) diff --git a/external/llvm-project b/external/llvm-project index 830c0b902..8dca953dd 160000 --- a/external/llvm-project +++ b/external/llvm-project @@ -1 +1 @@ -Subproject commit 830c0b9023cd0cf91955900e0d96283e7a8c3711 +Subproject commit 8dca953dd39c0cd8c80decbeb38753f58a4de580 diff --git a/external/torch-mlir/.gitignore b/external/torch-mlir/.gitignore new file mode 100644 index 000000000..796b96d1c --- /dev/null +++ b/external/torch-mlir/.gitignore @@ -0,0 +1 @@ +/build diff --git a/external/torch-mlir/CMakeLists.txt b/external/torch-mlir/CMakeLists.txt index c6afd0761..93104b55f 100644 --- a/external/torch-mlir/CMakeLists.txt +++ b/external/torch-mlir/CMakeLists.txt @@ -63,10 +63,14 @@ endif() add_subdirectory(include) add_subdirectory(lib) -add_subdirectory(test) add_subdirectory(tools) if(MLIR_ENABLE_BINDINGS_PYTHON) - #XXX: Enable - #add_subdirectory(python) + if(NOT TORCH_MLIR_PYTHON_PACKAGES_DIR) + set(TORCH_MLIR_PYTHON_PACKAGES_DIR "${CMAKE_CURRENT_BINARY_DIR}/python_packages") + endif() + add_subdirectory(TorchPlugin) + add_subdirectory(python) endif() + +add_subdirectory(test) diff --git a/external/torch-mlir/LICENSE b/external/torch-mlir/LICENSE new file mode 100644 index 000000000..d7e77e267 --- /dev/null +++ b/external/torch-mlir/LICENSE @@ -0,0 +1,235 @@ +============================================================================== +The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: +As an incubator project with ambition to become part of the LLVM Project, +CIRCT is under the same license. +============================================================================== + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. diff --git a/external/torch-mlir/TorchPlugin/CMakeLists.txt b/external/torch-mlir/TorchPlugin/CMakeLists.txt new file mode 100644 index 000000000..15a2a7efa --- /dev/null +++ b/external/torch-mlir/TorchPlugin/CMakeLists.txt @@ -0,0 +1,41 @@ +#------------------------------------------------------------------------------- +# Sub project setup +#------------------------------------------------------------------------------- + +cmake_minimum_required(VERSION 3.13.4) + +if(POLICY CMP0068) + cmake_policy(SET CMP0068 NEW) + set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON) +endif() + +if(POLICY CMP0075) + cmake_policy(SET CMP0075 NEW) +endif() + +if(POLICY CMP0077) + cmake_policy(SET CMP0077 NEW) +endif() + +project(torch_mlir_plugin LANGUAGES CXX C) +set(CMAKE_C_STANDARD 11) +set(CMAKE_CXX_STANDARD 14) + +#------------------------------------------------------------------------------- +# Setup PyTorch +#------------------------------------------------------------------------------- + +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") +include(TorchMLIRPyTorch) +TorchMLIRProbeForPyTorchInstall() +find_package(Torch 1.8 REQUIRED) + +TorchMLIRConfigurePyTorch() + +#------------------------------------------------------------------------------- +# Subdirectories +#------------------------------------------------------------------------------- + +add_subdirectory(csrc) +add_subdirectory(python) +add_subdirectory(test) diff --git a/external/torch-mlir/TorchPlugin/LICENSE b/external/torch-mlir/TorchPlugin/LICENSE new file mode 100644 index 000000000..b6c3eaad6 --- /dev/null +++ b/external/torch-mlir/TorchPlugin/LICENSE @@ -0,0 +1,65 @@ +In order to facilitate future incorporation in pytorch, the code in this +directory (frontends/pytorch) is provided under the below license. + +Copyright (c) 2020 LLVM Foundation. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America + and IDIAP Research Institute nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +The design of this code is highly inspired by the design of the xla device for +pytorch (git@github.com:pytorch/xla.git). The license for pytorch/xla is: + +Copyright (c) 2018 Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America + and IDIAP Research Institute nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/frontends/pytorch/cmake/modules/NpcompPyTorch.cmake b/external/torch-mlir/TorchPlugin/cmake/modules/TorchMLIRPyTorch.cmake similarity index 81% rename from frontends/pytorch/cmake/modules/NpcompPyTorch.cmake rename to external/torch-mlir/TorchPlugin/cmake/modules/TorchMLIRPyTorch.cmake index 484d26375..0c3e8b224 100644 --- a/frontends/pytorch/cmake/modules/NpcompPyTorch.cmake +++ b/external/torch-mlir/TorchPlugin/cmake/modules/TorchMLIRPyTorch.cmake @@ -1,8 +1,8 @@ -# NpcompProbeForPyTorchInstall +# TorchMLIRProbeForPyTorchInstall # Attempts to find a Torch installation and set the Torch_ROOT variable # based on introspecting the python environment. This allows a subsequent # call to find_package(Torch) to work. -function(NpcompProbeForPyTorchInstall) +function(TorchMLIRProbeForPyTorchInstall) if(Torch_ROOT) message(STATUS "Using cached Torch root = ${Torch_ROOT}") else() @@ -24,7 +24,7 @@ function(NpcompProbeForPyTorchInstall) endif() endfunction() -# NpcompConfigurePyTorch +# TorchMLIRConfigurePyTorch # Performs configuration of PyTorch flags after CMake has found it to be # present. Most of this comes down to detecting whether building against a # source or official binary and adjusting compiler options in the latter case @@ -34,7 +34,7 @@ endfunction() # In the future, we may want to switch away from custom building these # extensions and instead rely on the Torch machinery directly (definitely want # to do that for official builds). -function(NpcompConfigurePyTorch) +function(TorchMLIRConfigurePyTorch) if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") # Linux specific libstdcpp ABI checking. message(STATUS "Checking if Torch is an official binary ...") @@ -58,3 +58,18 @@ function(NpcompConfigurePyTorch) endif() endif() endfunction() + +function(torch_mlir_python_target_compile_options target) + target_compile_options(${target} PRIVATE + $<$,$,$>: + # Enable RTTI and exceptions. + -frtti -fexceptions + # Noisy pybind warnings + -Wno-unused-value + -Wno-covered-switch-default + > + $<$: + # Enable RTTI and exceptions. + /EHsc /GR> + ) +endfunction() diff --git a/frontends/pytorch/csrc/CMakeLists.txt b/external/torch-mlir/TorchPlugin/csrc/CMakeLists.txt similarity index 68% rename from frontends/pytorch/csrc/CMakeLists.txt rename to external/torch-mlir/TorchPlugin/csrc/CMakeLists.txt index 759333af3..fc46bb1f6 100644 --- a/frontends/pytorch/csrc/CMakeLists.txt +++ b/external/torch-mlir/TorchPlugin/csrc/CMakeLists.txt @@ -1,8 +1,4 @@ -include(NpcompPython) - -# TODO: Add this to an npcomp header of some kind so it doesn't need to be -# passed loose. -add_compile_definitions("MLIR_PYTHON_PACKAGE_PREFIX=npcomp.") +# TODO: Add a way to configure MLIR_PYTHON_PACKAGE_PREFIX # Sharp edge: Torch extensions need to use the same pybind11 that torch # was compiled with, or else there will be issues in cross module exception @@ -16,7 +12,7 @@ include_directories(BEFORE ) link_directories("${TORCH_INSTALL_PREFIX}/lib") -add_library(NPCOMPTorchMLIRExt SHARED +add_library(TorchMLIRTorchPlugin SHARED builder/acap_dispatch.cpp builder/class_annotator.cpp builder/debug.cpp @@ -31,16 +27,16 @@ add_library(NPCOMPTorchMLIRExt SHARED init_python_bindings.cpp ) -target_link_libraries(NPCOMPTorchMLIRExt - NPCOMPPythonCAPI +target_link_libraries(TorchMLIRTorchPlugin + TorchMLIRAggregateCAPI ${TORCH_LIBRARIES} ${Python3_LIBRARIES} torch_python ) message(STATUS "TORCH_CXXFLAGS=${TORCH_CXXFLAGS}") -set_target_properties(NPCOMPTorchMLIRExt PROPERTIES - LIBRARY_OUTPUT_DIRECTORY "${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_torch" +set_target_properties(TorchMLIRTorchPlugin PROPERTIES + LIBRARY_OUTPUT_DIRECTORY "${TORCH_MLIR_PYTHON_PACKAGES_DIR}/torch_mlir" OUTPUT_NAME _torch_mlir PREFIX "${PYTHON_MODULE_PREFIX}" SUFFIX "${PYTHON_MODULE_EXTENSION}" @@ -48,5 +44,5 @@ set_target_properties(NPCOMPTorchMLIRExt PROPERTIES COMPILE_FLAGS "${TORCH_CXXFLAGS}" ) -npcomp_python_target_compile_options(NPCOMPTorchMLIRExt) -mlir_check_all_link_libraries(NPCOMPTorchMLIRExt) +torch_mlir_python_target_compile_options(TorchMLIRTorchPlugin) +mlir_check_all_link_libraries(TorchMLIRTorchPlugin) diff --git a/frontends/pytorch/csrc/builder/acap_dispatch.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/acap_dispatch.cpp similarity index 99% rename from frontends/pytorch/csrc/builder/acap_dispatch.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/acap_dispatch.cpp index ceeaa28f1..cfe9b70e0 100644 --- a/frontends/pytorch/csrc/builder/acap_dispatch.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/acap_dispatch.cpp @@ -1,7 +1,7 @@ //===- acap_dispatch.cpp --------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// @@ -35,7 +35,6 @@ using c10::Stack; // own key. // TODO: Ask the PT devs why conv is special and only shows up if dispatching // through the autograd keys. -// https://github.com/llvm/mlir-npcomp/issues/86 #define ACAP_DISPATCH_KEY PrivateUse2 #define ACAP_GRAD_DISPATCH_KEY AutogradPrivateUse2 static c10::DispatchKey kAcapDispatchKey = c10::DispatchKey::ACAP_DISPATCH_KEY; diff --git a/frontends/pytorch/csrc/builder/acap_dispatch.h b/external/torch-mlir/TorchPlugin/csrc/builder/acap_dispatch.h similarity index 95% rename from frontends/pytorch/csrc/builder/acap_dispatch.h rename to external/torch-mlir/TorchPlugin/csrc/builder/acap_dispatch.h index 10d5a2d26..92cf33c80 100644 --- a/frontends/pytorch/csrc/builder/acap_dispatch.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/acap_dispatch.h @@ -1,7 +1,7 @@ //===- acap_dispatch.h ------------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// // "ATen Capture" dispatcher: Defines facility for capturing programs by @@ -11,8 +11,8 @@ // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_ACAP_DISPATCH_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_ACAP_DISPATCH_H +#ifndef TORCHMLIRPLUGIN_CSRC_BUILDER_ACAP_DISPATCH_H +#define TORCHMLIRPLUGIN_CSRC_BUILDER_ACAP_DISPATCH_H #include #include @@ -128,4 +128,4 @@ private: } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_C10_DISPATCH_ACAP_DISPATCH_H +#endif // TORCHMLIRPLUGIN_CSRC_C10_DISPATCH_ACAP_DISPATCH_H diff --git a/frontends/pytorch/csrc/builder/class_annotator.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/class_annotator.cpp similarity index 99% rename from frontends/pytorch/csrc/builder/class_annotator.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/class_annotator.cpp index 2ed487020..e38f52a6e 100644 --- a/frontends/pytorch/csrc/builder/class_annotator.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/class_annotator.cpp @@ -1,7 +1,7 @@ //===- class_annotator.cpp ------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/class_annotator.h b/external/torch-mlir/TorchPlugin/csrc/builder/class_annotator.h similarity index 97% rename from frontends/pytorch/csrc/builder/class_annotator.h rename to external/torch-mlir/TorchPlugin/csrc/builder/class_annotator.h index 27f30620b..8960a5577 100644 --- a/frontends/pytorch/csrc/builder/class_annotator.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/class_annotator.h @@ -1,7 +1,7 @@ //===- class_annotations.h --------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// // Utilities for annotating Torch `c10::ClassType` @@ -18,8 +18,8 @@ // of implementation. //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_CLASS_ANNOTATOR_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_CLASS_ANNOTATOR_H +#ifndef TORCHMLIRPLUGIN_CSRC_CLASS_ANNOTATOR_H +#define TORCHMLIRPLUGIN_CSRC_CLASS_ANNOTATOR_H #include "../pybind.h" @@ -192,4 +192,4 @@ void initClassAnnotatorBindings(py::module &m); } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_CLASS_ANNOTATOR_H +#endif // TORCHMLIRPLUGIN_CSRC_CLASS_ANNOTATOR_H diff --git a/frontends/pytorch/csrc/builder/debug.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/debug.cpp similarity index 93% rename from frontends/pytorch/csrc/builder/debug.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/debug.cpp index 3c8c6c5af..1d09520f3 100644 --- a/frontends/pytorch/csrc/builder/debug.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/debug.cpp @@ -1,7 +1,7 @@ //===- debug.cpp ------------------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/debug.h b/external/torch-mlir/TorchPlugin/csrc/builder/debug.h similarity index 72% rename from frontends/pytorch/csrc/builder/debug.h rename to external/torch-mlir/TorchPlugin/csrc/builder/debug.h index cd2bc799a..643e29b97 100644 --- a/frontends/pytorch/csrc/builder/debug.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/debug.h @@ -1,12 +1,12 @@ //===- debug.h --------------------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_DEBUG_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_DEBUG_H +#ifndef TORCHMLIRPLUGIN_CSRC_BUILDER_DEBUG_H +#define TORCHMLIRPLUGIN_CSRC_BUILDER_DEBUG_H #include @@ -24,4 +24,4 @@ void enableDebugTraceToStderr(); } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_DEBUG_H +#endif // TORCHMLIRPLUGIN_CSRC_BUILDER_DEBUG_H diff --git a/frontends/pytorch/csrc/builder/func_builder.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/func_builder.cpp similarity index 99% rename from frontends/pytorch/csrc/builder/func_builder.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/func_builder.cpp index 0910444aa..e2f0b34c4 100644 --- a/frontends/pytorch/csrc/builder/func_builder.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/func_builder.cpp @@ -1,7 +1,7 @@ //===- func_builder.cpp ---------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/func_builder.h b/external/torch-mlir/TorchPlugin/csrc/builder/func_builder.h similarity index 95% rename from frontends/pytorch/csrc/builder/func_builder.h rename to external/torch-mlir/TorchPlugin/csrc/builder/func_builder.h index 120b1d30b..828dd124a 100644 --- a/frontends/pytorch/csrc/builder/func_builder.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/func_builder.h @@ -1,12 +1,12 @@ //===- func_builder.h -------------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_FUNC_BUILDER_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_FUNC_BUILDER_H +#ifndef TORCHMLIRPLUGIN_CSRC_BUILDER_FUNC_BUILDER_H +#define TORCHMLIRPLUGIN_CSRC_BUILDER_FUNC_BUILDER_H #include "mlir_utils.h" #include "torch_to_mlir_utils.h" @@ -150,4 +150,4 @@ private: } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_FUNC_BUILDER_H +#endif // TORCHMLIRPLUGIN_CSRC_BUILDER_FUNC_BUILDER_H diff --git a/frontends/pytorch/csrc/builder/function_importer.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/function_importer.cpp similarity index 97% rename from frontends/pytorch/csrc/builder/function_importer.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/function_importer.cpp index c9a7a32a3..aa3a2daaa 100644 --- a/frontends/pytorch/csrc/builder/function_importer.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/function_importer.cpp @@ -1,7 +1,7 @@ //===- function_importer.cpp ----------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/function_importer.h b/external/torch-mlir/TorchPlugin/csrc/builder/function_importer.h similarity index 86% rename from frontends/pytorch/csrc/builder/function_importer.h rename to external/torch-mlir/TorchPlugin/csrc/builder/function_importer.h index 9a052e8d6..4aa8a40d4 100644 --- a/frontends/pytorch/csrc/builder/function_importer.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/function_importer.h @@ -1,12 +1,12 @@ //===- function_importer.h --------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_FUNCTION_IMPORTER_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_FUNCTION_IMPORTER_H +#ifndef TORCHMLIRPLUGIN_CSRC_FUNCTION_IMPORTER_H +#define TORCHMLIRPLUGIN_CSRC_FUNCTION_IMPORTER_H #include @@ -47,4 +47,4 @@ MlirOperation importJitFunctionAsFuncOp( } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_FUNCTION_IMPORTER_H +#endif // TORCHMLIRPLUGIN_CSRC_FUNCTION_IMPORTER_H diff --git a/frontends/pytorch/csrc/builder/ivalue_importer.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/ivalue_importer.cpp similarity index 99% rename from frontends/pytorch/csrc/builder/ivalue_importer.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/ivalue_importer.cpp index 163063afe..5f42dd3d2 100644 --- a/frontends/pytorch/csrc/builder/ivalue_importer.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/ivalue_importer.cpp @@ -1,7 +1,7 @@ //===- ivalue_importer.cpp ------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/ivalue_importer.h b/external/torch-mlir/TorchPlugin/csrc/builder/ivalue_importer.h similarity index 77% rename from frontends/pytorch/csrc/builder/ivalue_importer.h rename to external/torch-mlir/TorchPlugin/csrc/builder/ivalue_importer.h index 822b08d3c..3ce5da69d 100644 --- a/frontends/pytorch/csrc/builder/ivalue_importer.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/ivalue_importer.h @@ -1,12 +1,12 @@ //===- ivalue_importer.h ----------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_IVALUE_IMPORTER_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_IVALUE_IMPORTER_H +#ifndef TORCHMLIRPLUGIN_CSRC_IVALUE_IMPORTER_H +#define TORCHMLIRPLUGIN_CSRC_IVALUE_IMPORTER_H #include @@ -29,4 +29,4 @@ void importIValue(c10::IValue ivalue, MlirBlock block, MlirContext context, } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_IVALUE_IMPORTER_H +#endif // TORCHMLIRPLUGIN_CSRC_IVALUE_IMPORTER_H diff --git a/frontends/pytorch/csrc/builder/mlir_utils.h b/external/torch-mlir/TorchPlugin/csrc/builder/mlir_utils.h similarity index 94% rename from frontends/pytorch/csrc/builder/mlir_utils.h rename to external/torch-mlir/TorchPlugin/csrc/builder/mlir_utils.h index 4c629e663..712e8fa6a 100644 --- a/frontends/pytorch/csrc/builder/mlir_utils.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/mlir_utils.h @@ -1,12 +1,12 @@ //===- mlir_utils.h ---------------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_MLIR_UTILS_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_MLIR_UTILS_H +#ifndef TORCHMLIRPLUGIN_CSRC_MLIR_UTILS_H +#define TORCHMLIRPLUGIN_CSRC_MLIR_UTILS_H #include #include @@ -111,4 +111,4 @@ MlirOperation createMlirOperationAtEnd(MlirBlock block, std::string name, } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_MLIR_UTILS_H +#endif // TORCHMLIRPLUGIN_CSRC_MLIR_UTILS_H diff --git a/frontends/pytorch/csrc/builder/module_builder.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/module_builder.cpp similarity index 98% rename from frontends/pytorch/csrc/builder/module_builder.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/module_builder.cpp index d5190ad3d..3063497d1 100644 --- a/frontends/pytorch/csrc/builder/module_builder.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/module_builder.cpp @@ -1,7 +1,7 @@ //===- module_builder.cpp -------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// @@ -15,7 +15,6 @@ #include "mlir-c/BuiltinTypes.h" #include "mlir-c/Diagnostics.h" #include "mlir-c/Registration.h" -#include "npcomp-c/Registration.h" #include "torch-mlir-c/Registration.h" namespace py = pybind11; diff --git a/frontends/pytorch/csrc/builder/module_builder.h b/external/torch-mlir/TorchPlugin/csrc/builder/module_builder.h similarity index 90% rename from frontends/pytorch/csrc/builder/module_builder.h rename to external/torch-mlir/TorchPlugin/csrc/builder/module_builder.h index 8511fb4b6..bffbf0b7d 100644 --- a/frontends/pytorch/csrc/builder/module_builder.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/module_builder.h @@ -1,12 +1,12 @@ //===- module_builder.h -----------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_H +#ifndef TORCHMLIRPLUGIN_CSRC_BUILDER_H +#define TORCHMLIRPLUGIN_CSRC_BUILDER_H #include "../pybind.h" @@ -70,4 +70,4 @@ private: } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_C10_DISPATCH_MODULE_BUILDER_H +#endif // TORCHMLIRPLUGIN_CSRC_C10_DISPATCH_MODULE_BUILDER_H diff --git a/frontends/pytorch/csrc/builder/node_importer.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/node_importer.cpp similarity index 99% rename from frontends/pytorch/csrc/builder/node_importer.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/node_importer.cpp index bfbb08c1d..abf3db1d9 100644 --- a/frontends/pytorch/csrc/builder/node_importer.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/node_importer.cpp @@ -1,7 +1,7 @@ //===- node_importer.cpp --------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/node_importer.h b/external/torch-mlir/TorchPlugin/csrc/builder/node_importer.h similarity index 75% rename from frontends/pytorch/csrc/builder/node_importer.h rename to external/torch-mlir/TorchPlugin/csrc/builder/node_importer.h index 26ff1678d..86ac920e5 100644 --- a/frontends/pytorch/csrc/builder/node_importer.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/node_importer.h @@ -1,12 +1,12 @@ //===- node_importer.h ------------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_NODE_IMPORTER_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_NODE_IMPORTER_H +#ifndef TORCHMLIRPLUGIN_CSRC_NODE_IMPORTER_H +#define TORCHMLIRPLUGIN_CSRC_NODE_IMPORTER_H #include @@ -28,4 +28,4 @@ MlirBlock importBlock(MlirContext context, torch::jit::Block *jitBlock, } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_NODE_IMPORTER_H +#endif // TORCHMLIRPLUGIN_CSRC_NODE_IMPORTER_H diff --git a/frontends/pytorch/csrc/builder/op_builder.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/op_builder.cpp similarity index 94% rename from frontends/pytorch/csrc/builder/op_builder.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/op_builder.cpp index 9add31a31..fcc765714 100644 --- a/frontends/pytorch/csrc/builder/op_builder.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/op_builder.cpp @@ -1,7 +1,7 @@ //===- op_builder.cpp -----------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/op_builder.h b/external/torch-mlir/TorchPlugin/csrc/builder/op_builder.h similarity index 80% rename from frontends/pytorch/csrc/builder/op_builder.h rename to external/torch-mlir/TorchPlugin/csrc/builder/op_builder.h index 4f4156890..6a2f88b32 100644 --- a/frontends/pytorch/csrc/builder/op_builder.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/op_builder.h @@ -1,12 +1,12 @@ //===- op_builder.h ---------------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_OP_BUILDER_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_OP_BUILDER_H +#ifndef TORCHMLIRPLUGIN_CSRC_BUILDER_OP_BUILDER_H +#define TORCHMLIRPLUGIN_CSRC_BUILDER_OP_BUILDER_H #include "mlir_utils.h" #include "torch_to_mlir_utils.h" @@ -37,4 +37,4 @@ private: } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_BUILDER_OP_BUILDER_H +#endif // TORCHMLIRPLUGIN_CSRC_BUILDER_OP_BUILDER_H diff --git a/frontends/pytorch/csrc/builder/python_bindings.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/python_bindings.cpp similarity index 98% rename from frontends/pytorch/csrc/builder/python_bindings.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/python_bindings.cpp index 8053b5c59..ec7b81715 100644 --- a/frontends/pytorch/csrc/builder/python_bindings.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/python_bindings.cpp @@ -1,7 +1,7 @@ //===- python_bindings.cpp --------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/builder/torch_to_mlir_utils.cpp b/external/torch-mlir/TorchPlugin/csrc/builder/torch_to_mlir_utils.cpp similarity index 98% rename from frontends/pytorch/csrc/builder/torch_to_mlir_utils.cpp rename to external/torch-mlir/TorchPlugin/csrc/builder/torch_to_mlir_utils.cpp index 6a1c0147f..d0b225166 100644 --- a/frontends/pytorch/csrc/builder/torch_to_mlir_utils.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/builder/torch_to_mlir_utils.cpp @@ -1,7 +1,7 @@ //===- ivalue_importer.cpp ------------------------------------------------===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// @@ -323,9 +323,6 @@ MlirAttribute torch_mlir::convertTensorToMlirElementsAttr(at::Tensor tensor, shapedType, numElements, static_cast(tensorData)); break; case ScalarType::Bool: - // TODO: Add a test specifically for bool and ensure consistency between - // storage format and load format - // (https://github.com/llvm/mlir-npcomp/issues/100). return mlirDenseElementsAttrBoolGet(shapedType, numElements, static_cast(tensorData)); break; diff --git a/frontends/pytorch/csrc/builder/torch_to_mlir_utils.h b/external/torch-mlir/TorchPlugin/csrc/builder/torch_to_mlir_utils.h similarity index 94% rename from frontends/pytorch/csrc/builder/torch_to_mlir_utils.h rename to external/torch-mlir/TorchPlugin/csrc/builder/torch_to_mlir_utils.h index ccbd70993..9e79b63e5 100644 --- a/frontends/pytorch/csrc/builder/torch_to_mlir_utils.h +++ b/external/torch-mlir/TorchPlugin/csrc/builder/torch_to_mlir_utils.h @@ -1,12 +1,12 @@ //===- torch_to_mlir_utils.h ------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_TORCH_TO_MLIR_UTILS_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_TORCH_TO_MLIR_UTILS_H +#ifndef TORCHMLIRPLUGIN_CSRC_TORCH_TO_MLIR_UTILS_H +#define TORCHMLIRPLUGIN_CSRC_TORCH_TO_MLIR_UTILS_H #include @@ -98,4 +98,4 @@ MlirOperation createOperationFromSchema(MlirBlock appendToBlock, } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_TORCH_TO_MLIR_UTILS_H +#endif // TORCHMLIRPLUGIN_CSRC_TORCH_TO_MLIR_UTILS_H diff --git a/frontends/pytorch/csrc/init_python_bindings.cpp b/external/torch-mlir/TorchPlugin/csrc/init_python_bindings.cpp similarity index 54% rename from frontends/pytorch/csrc/init_python_bindings.cpp rename to external/torch-mlir/TorchPlugin/csrc/init_python_bindings.cpp index df5116c62..8649b8432 100644 --- a/frontends/pytorch/csrc/init_python_bindings.cpp +++ b/external/torch-mlir/TorchPlugin/csrc/init_python_bindings.cpp @@ -1,16 +1,11 @@ //===- init_python_bindings.cpp ---------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// -// This is the top-level entry point for the MLIR/NPCOMP <-> PyTorch bridge. -// It provides several mechanisms for extracting programs from PyTorch via: -// a) A pseudo-device which captures the operations to an MLIR module -// (implemented via the legacy type_dispatch mechanism for PyTorch 1.3). -// b) Direct IR translation from PyTorch Graphs (not implemented). -// c) Using the PyTorch JIT facility (not implemented). +// This is the top-level entry point for the MLIR <-> PyTorch bridge. #include "init_python_bindings.h" diff --git a/frontends/pytorch/csrc/init_python_bindings.h b/external/torch-mlir/TorchPlugin/csrc/init_python_bindings.h similarity index 90% rename from frontends/pytorch/csrc/init_python_bindings.h rename to external/torch-mlir/TorchPlugin/csrc/init_python_bindings.h index c6c3806ca..7601c046f 100644 --- a/frontends/pytorch/csrc/init_python_bindings.h +++ b/external/torch-mlir/TorchPlugin/csrc/init_python_bindings.h @@ -1,7 +1,7 @@ //===- init_python_bindings.h -----------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// diff --git a/frontends/pytorch/csrc/pybind.h b/external/torch-mlir/TorchPlugin/csrc/pybind.h similarity index 80% rename from frontends/pytorch/csrc/pybind.h rename to external/torch-mlir/TorchPlugin/csrc/pybind.h index 00940773e..358213cb9 100644 --- a/frontends/pytorch/csrc/pybind.h +++ b/external/torch-mlir/TorchPlugin/csrc/pybind.h @@ -1,7 +1,7 @@ //===- module_builder.h -----------------------------------------*- C++ -*-===// // // This file is licensed under a pytorch-style license -// See frontends/pytorch/LICENSE for license information. +// See LICENSE for license information. // //===----------------------------------------------------------------------===// // Includes Torch-specific pybind and associated helpers. @@ -9,8 +9,8 @@ // directly). //===----------------------------------------------------------------------===// -#ifndef NPCOMP_FRONTENDS_PYTORCH_CSRC_PYBIND_H -#define NPCOMP_FRONTENDS_PYTORCH_CSRC_PYBIND_H +#ifndef TORCHMLIRPLUGIN_CSRC_PYBIND_H +#define TORCHMLIRPLUGIN_CSRC_PYBIND_H #include @@ -25,4 +25,4 @@ public: } // namespace torch_mlir -#endif // NPCOMP_FRONTENDS_PYTORCH_CSRC_PYBIND_H +#endif // TORCHMLIRPLUGIN_CSRC_PYBIND_H diff --git a/external/torch-mlir/TorchPlugin/python/CMakeLists.txt b/external/torch-mlir/TorchPlugin/python/CMakeLists.txt new file mode 100644 index 000000000..942b45977 --- /dev/null +++ b/external/torch-mlir/TorchPlugin/python/CMakeLists.txt @@ -0,0 +1,24 @@ + +## Declare the sources of the Python module. + +declare_mlir_python_sources(TorchMLIRPluginPythonSources) + +declare_mlir_python_sources(TorchMLIRPluginPythonSources.Core + ADD_TO_PARENT TorchMLIRPluginPythonSources + SOURCES_GLOB + torch_mlir/*.py + torch_mlir_utils/codegen/*.py +) + +set(_source_components + TorchMLIRPluginPythonSources +) + +add_mlir_python_modules(TorchMLIRPluginPythonModules + ROOT_PREFIX "${TORCH_MLIR_PYTHON_PACKAGES_DIR}/torch_mlir" + INSTALL_PREFIX "python_packages/torch_mlir" + DECLARED_SOURCES ${_source_components} + COMMON_CAPI_LINK_LIBS + TorchMLIRAggregateCAPI + ) +add_dependencies(TorchMLIRPluginPythonModules TorchMLIRTorchPlugin) diff --git a/frontends/pytorch/python/torch_mlir/__init__.py b/external/torch-mlir/TorchPlugin/python/torch_mlir/__init__.py similarity index 94% rename from frontends/pytorch/python/torch_mlir/__init__.py rename to external/torch-mlir/TorchPlugin/python/torch_mlir/__init__.py index d58b3ee4a..7dad01b71 100644 --- a/frontends/pytorch/python/torch_mlir/__init__.py +++ b/external/torch-mlir/TorchPlugin/python/torch_mlir/__init__.py @@ -7,7 +7,6 @@ # prior to loading shared objects. import torch -import npcomp._mlir_libs._npcomp # Our native extension is not self-contained. It references libraries which # must come in via the above first. diff --git a/frontends/pytorch/python/torch_mlir/torchscript_annotations.py b/external/torch-mlir/TorchPlugin/python/torch_mlir/torchscript_annotations.py similarity index 93% rename from frontends/pytorch/python/torch_mlir/torchscript_annotations.py rename to external/torch-mlir/TorchPlugin/python/torch_mlir/torchscript_annotations.py index e45da0097..526191ac1 100644 --- a/frontends/pytorch/python/torch_mlir/torchscript_annotations.py +++ b/external/torch-mlir/TorchPlugin/python/torch_mlir/torchscript_annotations.py @@ -34,12 +34,12 @@ def _recursively_extract_annotations( if not isinstance(scripted_method, torch.ScriptMethod): continue method = getattr(module, method_name) - if hasattr(method, '_npcomp_export'): + if hasattr(method, '_torch_mlir_export'): class_annotator.exportPath(scripted._c._type(), [method_name]) - if hasattr(method, '_npcomp_arg_annotations'): + if hasattr(method, '_torch_mlir_arg_annotations'): class_annotator.annotateArgs( scripted._c._type(), [method_name], - method._npcomp_arg_annotations) + method._torch_mlir_arg_annotations) # Recurse. for name, child in module.named_children(): scripted_child = getattr(scripted, name) diff --git a/frontends/pytorch/python/torch_mlir_utils/codegen/__init__.py b/external/torch-mlir/TorchPlugin/python/torch_mlir_utils/codegen/__init__.py similarity index 100% rename from frontends/pytorch/python/torch_mlir_utils/codegen/__init__.py rename to external/torch-mlir/TorchPlugin/python/torch_mlir_utils/codegen/__init__.py diff --git a/frontends/pytorch/python/torch_mlir_utils/codegen/torch_ods_gen.py b/external/torch-mlir/TorchPlugin/python/torch_mlir_utils/codegen/torch_ods_gen.py similarity index 100% rename from frontends/pytorch/python/torch_mlir_utils/codegen/torch_ods_gen.py rename to external/torch-mlir/TorchPlugin/python/torch_mlir_utils/codegen/torch_ods_gen.py diff --git a/frontends/pytorch/setup.py b/external/torch-mlir/TorchPlugin/setup.py similarity index 90% rename from frontends/pytorch/setup.py rename to external/torch-mlir/TorchPlugin/setup.py index d8d6cf9a1..404f4b715 100644 --- a/frontends/pytorch/setup.py +++ b/external/torch-mlir/TorchPlugin/setup.py @@ -22,9 +22,8 @@ extension_sources = [str(p) for p in this_dir.joinpath("csrc").rglob("*.cpp")] include_dirs = npcomp_build.get_include_dirs() lib_dirs = npcomp_build.get_lib_dirs() npcomp_libs = [npcomp_build.get_capi_link_library_name()] -# TODO: Export this in some way from an npcomp config file include vs needing -# it loose here. -compile_args = ["-DMLIR_PYTHON_PACKAGE_PREFIX=npcomp."] +# TODO: Add a way to customize MLIR_PYTHON_PACKAGE_PREFIX +compile_args = [] setup( name="npcomp-torch", diff --git a/external/torch-mlir/TorchPlugin/test/CMakeLists.txt b/external/torch-mlir/TorchPlugin/test/CMakeLists.txt new file mode 100644 index 000000000..aecd25c8b --- /dev/null +++ b/external/torch-mlir/TorchPlugin/test/CMakeLists.txt @@ -0,0 +1,21 @@ +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py + MAIN_CONFIG + ${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py +) + +set(TEST_DEPENDS + FileCheck count not + torch-mlir-opt + TorchMLIRTorchPlugin + ) + +add_lit_testsuite(check-torch-mlir-plugin + "Running the torch-mlir-plugin regression tests" + ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${TEST_DEPENDS} + ) +set_target_properties(check-torch-mlir-plugin PROPERTIES FOLDER "Tests") + +add_lit_testsuites(TORCH_MLIR_PLUGIN ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS ${TEST_DEPENDS}) diff --git a/frontends/pytorch/test/acap_export/test_arange.py b/external/torch-mlir/TorchPlugin/test/acap_export/test_arange.py similarity index 81% rename from frontends/pytorch/test/acap_export/test_arange.py rename to external/torch-mlir/TorchPlugin/test/acap_export/test_arange.py index c855c97d7..cc9c2bf4e 100644 --- a/frontends/pytorch/test/acap_export/test_arange.py +++ b/external/torch-mlir/TorchPlugin/test/acap_export/test_arange.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s torch_mlir.debug_trace_to_stderr() mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/acap_export/test_conv_nllloss_grads.py b/external/torch-mlir/TorchPlugin/test/acap_export/test_conv_nllloss_grads.py similarity index 93% rename from frontends/pytorch/test/acap_export/test_conv_nllloss_grads.py rename to external/torch-mlir/TorchPlugin/test/acap_export/test_conv_nllloss_grads.py index d9f7c9aa1..900b53757 100644 --- a/frontends/pytorch/test/acap_export/test_conv_nllloss_grads.py +++ b/external/torch-mlir/TorchPlugin/test/acap_export/test_conv_nllloss_grads.py @@ -1,8 +1,8 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s # XFAIL: * import torch diff --git a/frontends/pytorch/test/acap_export/test_export_add3.py b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_add3.py similarity index 94% rename from frontends/pytorch/test/acap_export/test_export_add3.py rename to external/torch-mlir/TorchPlugin/test/acap_export/test_export_add3.py index 4ebc015d7..87565809a 100644 --- a/frontends/pytorch/test/acap_export/test_export_add3.py +++ b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_add3.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s t0 = torch.randn((1,2,3,4)) t1 = torch.randn((1,2,3,4)) diff --git a/frontends/pytorch/test/acap_export/test_export_batchnorm.py b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_batchnorm.py similarity index 86% rename from frontends/pytorch/test/acap_export/test_export_batchnorm.py rename to external/torch-mlir/TorchPlugin/test/acap_export/test_export_batchnorm.py index 584882cd1..ed7513386 100644 --- a/frontends/pytorch/test/acap_export/test_export_batchnorm.py +++ b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_batchnorm.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/acap_export/test_export_cat.py b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_cat.py similarity index 91% rename from frontends/pytorch/test/acap_export/test_export_cat.py rename to external/torch-mlir/TorchPlugin/test/acap_export/test_export_cat.py index 723e4fa88..6e9de8c1b 100644 --- a/frontends/pytorch/test/acap_export/test_export_cat.py +++ b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_cat.py @@ -1,8 +1,8 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s import torch import torch.nn as nn diff --git a/frontends/pytorch/test/acap_export/test_export_conv2d_fwd.py b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_conv2d_fwd.py similarity index 96% rename from frontends/pytorch/test/acap_export/test_export_conv2d_fwd.py rename to external/torch-mlir/TorchPlugin/test/acap_export/test_export_conv2d_fwd.py index 14c30f810..668e79b89 100644 --- a/frontends/pytorch/test/acap_export/test_export_conv2d_fwd.py +++ b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_conv2d_fwd.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/acap_export/test_export_multi_out.py b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_multi_out.py similarity index 86% rename from frontends/pytorch/test/acap_export/test_export_multi_out.py rename to external/torch-mlir/TorchPlugin/test/acap_export/test_export_multi_out.py index 200c7450e..fe247edb2 100644 --- a/frontends/pytorch/test/acap_export/test_export_multi_out.py +++ b/external/torch-mlir/TorchPlugin/test/acap_export/test_export_multi_out.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/builder/get_registered_ops.py b/external/torch-mlir/TorchPlugin/test/builder/get_registered_ops.py similarity index 91% rename from frontends/pytorch/test/builder/get_registered_ops.py rename to external/torch-mlir/TorchPlugin/test/builder/get_registered_ops.py index f33874ad2..fdadac96b 100644 --- a/frontends/pytorch/test/builder/get_registered_ops.py +++ b/external/torch-mlir/TorchPlugin/test/builder/get_registered_ops.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. # RUN: %PYTHON %s | FileCheck %s import _torch_mlir diff --git a/frontends/pytorch/test/builder/simple_acap_e2e.py b/external/torch-mlir/TorchPlugin/test/builder/simple_acap_e2e.py similarity index 85% rename from frontends/pytorch/test/builder/simple_acap_e2e.py rename to external/torch-mlir/TorchPlugin/test/builder/simple_acap_e2e.py index a3d1c2934..6b27873bf 100644 --- a/frontends/pytorch/test/builder/simple_acap_e2e.py +++ b/external/torch-mlir/TorchPlugin/test/builder/simple_acap_e2e.py @@ -1,8 +1,8 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. -# RUN: %PYTHON %s | npcomp-opt -aten-recognize-kernels -numpy-public-functions-to-tensor -canonicalize | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt -aten-recognize-kernels -numpy-public-functions-to-tensor -canonicalize | FileCheck %s # TODO: Re-enable after adding support for 4-operand aten::add in `aten-recognize-kernels`. # XFAIL: * diff --git a/frontends/pytorch/test/extension_coexistence.py b/external/torch-mlir/TorchPlugin/test/extension_coexistence.py similarity index 80% rename from frontends/pytorch/test/extension_coexistence.py rename to external/torch-mlir/TorchPlugin/test/extension_coexistence.py index 838b9ac59..32bc98119 100644 --- a/frontends/pytorch/test/extension_coexistence.py +++ b/external/torch-mlir/TorchPlugin/test/extension_coexistence.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. # Some checks that we can import the various extensions and libraries and # not have symbol collisions or other goings on. @@ -10,7 +10,7 @@ import sys print(f"PYTHONPATH={sys.path}") -import npcomp.ir +import mlir.ir import torch_mlir print("Extensions all loaded") diff --git a/frontends/pytorch/test/ivalue_import/README.md b/external/torch-mlir/TorchPlugin/test/ivalue_import/README.md similarity index 100% rename from frontends/pytorch/test/ivalue_import/README.md rename to external/torch-mlir/TorchPlugin/test/ivalue_import/README.md diff --git a/frontends/pytorch/test/ivalue_import/annotations/arg-error.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/arg-error.py similarity index 95% rename from frontends/pytorch/test/ivalue_import/annotations/arg-error.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/arg-error.py index 6e3c5cee7..3fda696f1 100644 --- a/frontends/pytorch/test/ivalue_import/annotations/arg-error.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/arg-error.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing diff --git a/frontends/pytorch/test/ivalue_import/annotations/arg-tensor-type-bound.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/arg-tensor-type-bound.py similarity index 91% rename from frontends/pytorch/test/ivalue_import/annotations/arg-tensor-type-bound.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/arg-tensor-type-bound.py index 1b46128ec..cd9f1ba52 100644 --- a/frontends/pytorch/test/ivalue_import/annotations/arg-tensor-type-bound.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/arg-tensor-type-bound.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/annotations/class-annotator-repr.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/class-annotator-repr.py similarity index 98% rename from frontends/pytorch/test/ivalue_import/annotations/class-annotator-repr.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/class-annotator-repr.py index fdbbbcaa7..7e62d2c0a 100644 --- a/frontends/pytorch/test/ivalue_import/annotations/class-annotator-repr.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/class-annotator-repr.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing diff --git a/frontends/pytorch/test/ivalue_import/annotations/export-error.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export-error.py similarity index 95% rename from frontends/pytorch/test/ivalue_import/annotations/export-error.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export-error.py index e27acb19d..55979f770 100644 --- a/frontends/pytorch/test/ivalue_import/annotations/export-error.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export-error.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing diff --git a/frontends/pytorch/test/ivalue_import/annotations/export-recursive.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export-recursive.py similarity index 93% rename from frontends/pytorch/test/ivalue_import/annotations/export-recursive.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export-recursive.py index a413ed478..24dd4e056 100644 --- a/frontends/pytorch/test/ivalue_import/annotations/export-recursive.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export-recursive.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/annotations/export.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export.py similarity index 92% rename from frontends/pytorch/test/ivalue_import/annotations/export.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export.py index 986807785..138e10c7c 100644 --- a/frontends/pytorch/test/ivalue_import/annotations/export.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/annotations/export.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/dict.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/dict.py similarity index 92% rename from frontends/pytorch/test/ivalue_import/dict.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/dict.py index b90988d59..6b95251ef 100644 --- a/frontends/pytorch/test/ivalue_import/dict.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/dict.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. from typing import Dict, Optional import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/functions-that-call-methods.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/functions-that-call-methods.py similarity index 95% rename from frontends/pytorch/test/ivalue_import/functions-that-call-methods.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/functions-that-call-methods.py index 18a563cdb..ce2e664a2 100644 --- a/frontends/pytorch/test/ivalue_import/functions-that-call-methods.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/functions-that-call-methods.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/functions.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/functions.py similarity index 93% rename from frontends/pytorch/test/ivalue_import/functions.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/functions.py index bc85c6b24..cf49bef0f 100644 --- a/frontends/pytorch/test/ivalue_import/functions.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/functions.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/list.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/list.py similarity index 90% rename from frontends/pytorch/test/ivalue_import/list.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/list.py index 1c28f7120..6372f5ab8 100644 --- a/frontends/pytorch/test/ivalue_import/list.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/list.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/methods-derefine.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/methods-derefine.py similarity index 92% rename from frontends/pytorch/test/ivalue_import/methods-derefine.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/methods-derefine.py index d2b008df7..09c8f9b9d 100644 --- a/frontends/pytorch/test/ivalue_import/methods-derefine.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/methods-derefine.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/methods-locations.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/methods-locations.py similarity index 92% rename from frontends/pytorch/test/ivalue_import/methods-locations.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/methods-locations.py index ce841b19d..e8fc084f8 100644 --- a/frontends/pytorch/test/ivalue_import/methods-locations.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/methods-locations.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing diff --git a/frontends/pytorch/test/ivalue_import/methods.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/methods.py similarity index 93% rename from frontends/pytorch/test/ivalue_import/methods.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/methods.py index b9a5b68ae..07804139e 100644 --- a/frontends/pytorch/test/ivalue_import/methods.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/methods.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/object-identity-error-submodule.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-error-submodule.py similarity index 94% rename from frontends/pytorch/test/ivalue_import/object-identity-error-submodule.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-error-submodule.py index df3146863..4fc3aaf1e 100644 --- a/frontends/pytorch/test/ivalue_import/object-identity-error-submodule.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-error-submodule.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing diff --git a/frontends/pytorch/test/ivalue_import/object-identity-error.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-error.py similarity index 93% rename from frontends/pytorch/test/ivalue_import/object-identity-error.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-error.py index 2c27c1b85..e93702578 100644 --- a/frontends/pytorch/test/ivalue_import/object-identity-error.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-error.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing diff --git a/frontends/pytorch/test/ivalue_import/object-identity-torch-bug.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-torch-bug.py similarity index 93% rename from frontends/pytorch/test/ivalue_import/object-identity-torch-bug.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-torch-bug.py index c77169195..60ab092cd 100644 --- a/frontends/pytorch/test/ivalue_import/object-identity-torch-bug.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity-torch-bug.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/object-identity.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity.py similarity index 87% rename from frontends/pytorch/test/ivalue_import/object-identity.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity.py index 5a450379b..bdf5e9c39 100644 --- a/frontends/pytorch/test/ivalue_import/object-identity.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/object-identity.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/prim.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/prim.py similarity index 92% rename from frontends/pytorch/test/ivalue_import/prim.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/prim.py index 66b0979a8..c3333c34b 100644 --- a/frontends/pytorch/test/ivalue_import/prim.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/prim.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/primitives.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/primitives.py similarity index 92% rename from frontends/pytorch/test/ivalue_import/primitives.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/primitives.py index 160609f93..561004d56 100644 --- a/frontends/pytorch/test/ivalue_import/primitives.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/primitives.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/quantization.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/quantization.py similarity index 94% rename from frontends/pytorch/test/ivalue_import/quantization.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/quantization.py index 2d82bb377..bd9df8b88 100644 --- a/frontends/pytorch/test/ivalue_import/quantization.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/quantization.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/strings.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/strings.py similarity index 88% rename from frontends/pytorch/test/ivalue_import/strings.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/strings.py index cae5795a3..4cffa1ed5 100644 --- a/frontends/pytorch/test/ivalue_import/strings.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/strings.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/submodules-select.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/submodules-select.py similarity index 91% rename from frontends/pytorch/test/ivalue_import/submodules-select.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/submodules-select.py index 4236611e6..a39bf4125 100644 --- a/frontends/pytorch/test/ivalue_import/submodules-select.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/submodules-select.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/submodules.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/submodules.py similarity index 93% rename from frontends/pytorch/test/ivalue_import/submodules.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/submodules.py index 2b87d7228..202572058 100644 --- a/frontends/pytorch/test/ivalue_import/submodules.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/submodules.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/tensors.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/tensors.py similarity index 91% rename from frontends/pytorch/test/ivalue_import/tensors.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/tensors.py index 2ddefc0f7..43e014d48 100644 --- a/frontends/pytorch/test/ivalue_import/tensors.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/tensors.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/ivalue_import/tuple.py b/external/torch-mlir/TorchPlugin/test/ivalue_import/tuple.py similarity index 90% rename from frontends/pytorch/test/ivalue_import/tuple.py rename to external/torch-mlir/TorchPlugin/test/ivalue_import/tuple.py index 6a1511e9d..5cdef44fc 100644 --- a/frontends/pytorch/test/ivalue_import/tuple.py +++ b/external/torch-mlir/TorchPlugin/test/ivalue_import/tuple.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/external/torch-mlir/TorchPlugin/test/lit.cfg.py b/external/torch-mlir/TorchPlugin/test/lit.cfg.py new file mode 100644 index 000000000..3d8c7d88d --- /dev/null +++ b/external/torch-mlir/TorchPlugin/test/lit.cfg.py @@ -0,0 +1,74 @@ +# -*- Python -*- +# This file is licensed under a pytorch-style license +# See LICENSE for license information. + +import os +import platform +import re +import subprocess +import tempfile + +import lit.formats +import lit.util + +from lit.llvm import llvm_config +from lit.llvm.subst import ToolSubst +from lit.llvm.subst import FindTool + +# Configuration file for the 'lit' test runner. + +# name: The name of this test suite. +config.name = 'TORCH_MLIR_PLUGIN' + +config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) +if 'TEST_SRC_PATH' in os.environ: + config.environment['TEST_SRC_PATH'] = os.environ['TEST_SRC_PATH'] + +# path to our python operation library +config.environment['TEST_BUILD_PATH'] = os.path.join(config.torch_mlir_plugin_obj_root) + +# suffixes: A list of file extensions to treat as test files. +config.suffixes = ['.py'] + +# test_source_root: The root path where tests are located. +config.test_source_root = os.path.dirname(__file__) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join(config.torch_mlir_plugin_obj_root, 'test') + +config.substitutions.append(('%PATH%', config.environment['PATH'])) +config.substitutions.append(('%shlibext', config.llvm_shlib_ext)) +config.substitutions.append(('%PYTHON', config.python_executable)) + +llvm_config.with_system_environment( + ['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP']) + +llvm_config.use_default_substitutions() + +# excludes: A list of directories to exclude from the testsuite. The 'Inputs' +# subdirectories contain auxiliary inputs for various tests in their parent +# directories. +config.excludes = ['lit.cfg.py', 'Inputs', 'Examples', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt'] + +# test_source_root: The root path where tests are located. +config.test_source_root = os.path.dirname(__file__) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join(config.torch_mlir_plugin_obj_root, 'test') +config.torch_mlir_tools_dir = os.path.join(config.torch_mlir_plugin_obj_root, 'bin') + +# Tweak the PATH to include the tools dir. +llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True) +llvm_config.with_environment('PYTHONPATH', [ + os.path.join(config.torch_mlir_python_packages_dir, 'torch_mlir'), + os.path.join(config.torch_mlir_python_packages_dir, 'torch_mlir_dialects'), + ], + append_path=True) + + +tool_dirs = [config.torch_mlir_tools_dir, config.llvm_tools_dir] +tools = [ + 'torch-mlir-opt', +] + +llvm_config.add_tool_substitutions(tools, tool_dirs) diff --git a/external/torch-mlir/TorchPlugin/test/lit.site.cfg.py.in b/external/torch-mlir/TorchPlugin/test/lit.site.cfg.py.in new file mode 100644 index 000000000..de29b4c57 --- /dev/null +++ b/external/torch-mlir/TorchPlugin/test/lit.site.cfg.py.in @@ -0,0 +1,56 @@ +# -*- Python -*- +# This file is licensed under a pytorch-style license +# See LICENSE for license information. + +@LIT_SITE_CFG_IN_HEADER@ + +import sys + +config.host_triple = "@LLVM_HOST_TRIPLE@" +config.target_triple = "@TARGET_TRIPLE@" +config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.llvm_obj_root = "@LLVM_BINARY_DIR@" +# TODO: Fix tools dir to find FileCheck. +#config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" +config.llvm_tools_dir = "@LLVM_BINARY_DIR@/bin" +config.llvm_lib_dir = "@LLVM_LIBRARY_DIR@" +config.llvm_shlib_dir = "@SHLIBDIR@" +config.llvm_shlib_ext = "@SHLIBEXT@" +config.llvm_exe_ext = "@EXEEXT@" +config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@" +config.python_executable = "@Python3_EXECUTABLE@" +config.gold_executable = "@GOLD_EXECUTABLE@" +config.ld64_executable = "@LD64_EXECUTABLE@" +config.enable_shared = @ENABLE_SHARED@ +config.enable_assertions = @ENABLE_ASSERTIONS@ +config.targets_to_build = "@TARGETS_TO_BUILD@" +config.native_target = "@LLVM_NATIVE_ARCH@" +config.llvm_bindings = "@LLVM_BINDINGS@".split(' ') +config.host_os = "@HOST_OS@" +config.host_cc = "@HOST_CC@" +config.host_cxx = "@HOST_CXX@" +# Note: ldflags can contain double-quoted paths, so must use single quotes here. +config.host_ldflags = '@HOST_LDFLAGS@' +config.llvm_use_sanitizer = "@LLVM_USE_SANITIZER@" +config.llvm_host_triple = '@LLVM_HOST_TRIPLE@' +config.host_arch = "@HOST_ARCH@" +config.torch_mlir_plugin_src_root = "@CMAKE_SOURCE_DIR@" +config.torch_mlir_plugin_obj_root = "@CMAKE_BINARY_DIR@" +config.torch_mlir_python_packages_dir = "@TORCH_MLIR_PYTHON_PACKAGES_DIR@" + +# Support substitution of the tools_dir with user parameters. This is +# used when we can't determine the tool dir at configuration time. +try: + config.llvm_tools_dir = config.llvm_tools_dir % lit_config.params + config.llvm_shlib_dir = config.llvm_shlib_dir % lit_config.params +except KeyError: + e = sys.exc_info()[1] + key, = e.args + lit_config.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key,key)) + + +import lit.llvm +lit.llvm.initialize(lit_config, config) + +# Let the main config do the real work. +lit_config.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg.py") diff --git a/frontends/pytorch/test/node_import/README.md b/external/torch-mlir/TorchPlugin/test/node_import/README.md similarity index 100% rename from frontends/pytorch/test/node_import/README.md rename to external/torch-mlir/TorchPlugin/test/node_import/README.md diff --git a/frontends/pytorch/test/node_import/debug-info.py b/external/torch-mlir/TorchPlugin/test/node_import/debug-info.py similarity index 94% rename from frontends/pytorch/test/node_import/debug-info.py rename to external/torch-mlir/TorchPlugin/test/node_import/debug-info.py index 72826d323..a6fc7e9c6 100644 --- a/frontends/pytorch/test/node_import/debug-info.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/debug-info.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir diff --git a/frontends/pytorch/test/node_import/dict.py b/external/torch-mlir/TorchPlugin/test/node_import/dict.py similarity index 93% rename from frontends/pytorch/test/node_import/dict.py rename to external/torch-mlir/TorchPlugin/test/node_import/dict.py index 2fb84cf55..8b77ca918 100644 --- a/frontends/pytorch/test/node_import/dict.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/dict.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir import collections from typing import Tuple, Optional, List, NamedTuple, Dict -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/elif.py b/external/torch-mlir/TorchPlugin/test/node_import/elif.py similarity index 86% rename from frontends/pytorch/test/node_import/elif.py rename to external/torch-mlir/TorchPlugin/test/node_import/elif.py index 5d68d7e59..2146f0eb6 100644 --- a/frontends/pytorch/test/node_import/elif.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/elif.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/errors.py b/external/torch-mlir/TorchPlugin/test/node_import/errors.py similarity index 91% rename from frontends/pytorch/test/node_import/errors.py rename to external/torch-mlir/TorchPlugin/test/node_import/errors.py index 8de43be44..3b3ba77eb 100644 --- a/frontends/pytorch/test/node_import/errors.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/errors.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import enum diff --git a/frontends/pytorch/test/node_import/function-derefine.py b/external/torch-mlir/TorchPlugin/test/node_import/function-derefine.py similarity index 93% rename from frontends/pytorch/test/node_import/function-derefine.py rename to external/torch-mlir/TorchPlugin/test/node_import/function-derefine.py index b055bac89..826d31444 100644 --- a/frontends/pytorch/test/node_import/function-derefine.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/function-derefine.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir import typing -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/if.py b/external/torch-mlir/TorchPlugin/test/node_import/if.py similarity index 95% rename from frontends/pytorch/test/node_import/if.py rename to external/torch-mlir/TorchPlugin/test/node_import/if.py index 5b8c81f3e..cb2d5a141 100644 --- a/frontends/pytorch/test/node_import/if.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/if.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/list.py b/external/torch-mlir/TorchPlugin/test/node_import/list.py similarity index 86% rename from frontends/pytorch/test/node_import/list.py rename to external/torch-mlir/TorchPlugin/test/node_import/list.py index 9528be7ec..31aaf27b6 100644 --- a/frontends/pytorch/test/node_import/list.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/list.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/loop.py b/external/torch-mlir/TorchPlugin/test/node_import/loop.py similarity index 97% rename from frontends/pytorch/test/node_import/loop.py rename to external/torch-mlir/TorchPlugin/test/node_import/loop.py index 61b15bcc9..892420db4 100644 --- a/frontends/pytorch/test/node_import/loop.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/loop.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir import typing -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/prim.py b/external/torch-mlir/TorchPlugin/test/node_import/prim.py similarity index 98% rename from frontends/pytorch/test/node_import/prim.py rename to external/torch-mlir/TorchPlugin/test/node_import/prim.py index a60315926..459116060 100644 --- a/frontends/pytorch/test/node_import/prim.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/prim.py @@ -1,6 +1,6 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import typing @@ -9,7 +9,7 @@ import torch_mlir import typing -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/tuple.py b/external/torch-mlir/TorchPlugin/test/node_import/tuple.py similarity index 96% rename from frontends/pytorch/test/node_import/tuple.py rename to external/torch-mlir/TorchPlugin/test/node_import/tuple.py index 12753f9bc..8abecd893 100644 --- a/frontends/pytorch/test/node_import/tuple.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/tuple.py @@ -1,13 +1,13 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir import collections from typing import Tuple, Optional, NamedTuple -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() NT = NamedTuple('NT', [('f1', Optional[torch.Tensor]), diff --git a/frontends/pytorch/test/node_import/types-bool.py b/external/torch-mlir/TorchPlugin/test/node_import/types-bool.py similarity index 80% rename from frontends/pytorch/test/node_import/types-bool.py rename to external/torch-mlir/TorchPlugin/test/node_import/types-bool.py index 68f588157..53b4ef97f 100644 --- a/frontends/pytorch/test/node_import/types-bool.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/types-bool.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/frontends/pytorch/test/node_import/types-none.py b/external/torch-mlir/TorchPlugin/test/node_import/types-none.py similarity index 80% rename from frontends/pytorch/test/node_import/types-none.py rename to external/torch-mlir/TorchPlugin/test/node_import/types-none.py index 404e55a26..2b5fe3c27 100644 --- a/frontends/pytorch/test/node_import/types-none.py +++ b/external/torch-mlir/TorchPlugin/test/node_import/types-none.py @@ -1,11 +1,11 @@ # -*- Python -*- # This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. +# See LICENSE for license information. import torch import torch_mlir -# RUN: %PYTHON %s | npcomp-opt | FileCheck %s +# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s mb = torch_mlir.ModuleBuilder() diff --git a/external/torch-mlir/build_tools/build_standalone.sh b/external/torch-mlir/build_tools/build_standalone.sh new file mode 100755 index 000000000..bd6d1f535 --- /dev/null +++ b/external/torch-mlir/build_tools/build_standalone.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# Simple script that does a CMake configure of this project as an external +# LLVM project so it can be tested in isolation to larger assemblies. +# This is meant for CI's and project maintainers. + +set -eu -o errtrace + +project_dir="$(cd $(dirname $0)/.. && pwd)" +llvm_project_dir="$project_dir/../llvm-project" +build_dir="$project_dir/build" + +cmake -GNinja -B"$build_dir" "$llvm_project_dir/llvm" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_ENABLE_PROJECTS=mlir \ + -DLLVM_EXTERNAL_PROJECTS=torch-mlir \ + -DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$project_dir" \ + -DMLIR_ENABLE_BINDINGS_PYTHON=ON + +cd "$build_dir" +ninja tools/torch-mlir/all check-torch-mlir check-torch-mlir-plugin diff --git a/external/torch-mlir/build_tools/update_torch_ods.sh b/external/torch-mlir/build_tools/update_torch_ods.sh new file mode 100755 index 000000000..8723d7fa9 --- /dev/null +++ b/external/torch-mlir/build_tools/update_torch_ods.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Updates auto-generated ODS files for the `torch` dialect. +set -e + +src_dir="$(realpath $(dirname $0)/..)" +build_dir="$(realpath "${TORCH_MLIR_BUILD_DIR:-$src_dir/build}")" +torch_ir_dir="${src_dir}/include/torch-mlir/Dialect/Torch/IR" +python_packages_dir="${build_dir}/tools/torch-mlir/python_packages" + +#ninja -C "${build_dir}" +PYTHONPATH="${python_packages_dir}/torch_mlir" python -m torch_mlir_utils.codegen.torch_ods_gen \ + --torch_ir_dir="${torch_ir_dir}" \ + --debug_registry_dump="${torch_ir_dir}/JITOperatorRegistryDump.txt" diff --git a/external/torch-mlir/include/torch-mlir-c/Dialects.h b/external/torch-mlir/include/torch-mlir-c/Dialects.h new file mode 100644 index 000000000..20ded3520 --- /dev/null +++ b/external/torch-mlir/include/torch-mlir-c/Dialects.h @@ -0,0 +1,25 @@ +/*===-- torch-mlir-c/Dialects.h - Dialect functions --------------*- C -*-===*\ +|* *| +|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *| +|* Exceptions. *| +|* See https://llvm.org/LICENSE.txt for license information. *| +|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef TORCHMLIR_C_DIALECTS_H +#define TORCHMLIR_C_DIALECTS_H + +#include "mlir-c/Registration.h" + +#ifdef __cplusplus +extern "C" { +#endif + +MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(Torch, torch); + +#ifdef __cplusplus +} +#endif + +#endif // TORCHMLIR_C_DIALECTS_H diff --git a/external/torch-mlir/lib/CAPI/CMakeLists.txt b/external/torch-mlir/lib/CAPI/CMakeLists.txt index 56fdcfe27..236a3d3dc 100644 --- a/external/torch-mlir/lib/CAPI/CMakeLists.txt +++ b/external/torch-mlir/lib/CAPI/CMakeLists.txt @@ -1,4 +1,5 @@ add_mlir_library(TorchMLIRCAPI + Dialects.cpp Registration.cpp TorchTypes.cpp diff --git a/lib/Typing/Analysis/CPA/Interfaces.cpp b/external/torch-mlir/lib/CAPI/Dialects.cpp similarity index 52% rename from lib/Typing/Analysis/CPA/Interfaces.cpp rename to external/torch-mlir/lib/CAPI/Dialects.cpp index 04d486a07..cd2f3ef43 100644 --- a/lib/Typing/Analysis/CPA/Interfaces.cpp +++ b/external/torch-mlir/lib/CAPI/Dialects.cpp @@ -1,4 +1,4 @@ -//===- Interfaces.cpp - Interfaces for IR types ---------------------------===// +//===- Dialects.cpp - C Interface for Dialects ----------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,9 +6,9 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Typing/Analysis/CPA/Interfaces.h" +#include "torch-mlir-c/Dialects.h" -using namespace mlir; +#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h" +#include "mlir/CAPI/Registration.h" -#include "npcomp/Typing/Analysis/CPA/OpInterfaces.cpp.inc" -#include "npcomp/Typing/Analysis/CPA/TypeInterfaces.cpp.inc" +MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(Torch, torch, mlir::torch::Torch::TorchDialect) diff --git a/external/torch-mlir/python/CMakeLists.txt b/external/torch-mlir/python/CMakeLists.txt new file mode 100644 index 000000000..852f8fe61 --- /dev/null +++ b/external/torch-mlir/python/CMakeLists.txt @@ -0,0 +1,65 @@ +include(AddMLIRPython) + +################################################################################ +# Sources +################################################################################ + +declare_mlir_python_sources(TorchMLIRPythonSources) +declare_mlir_python_sources(TorchMLIRPythonExtensions) + +declare_mlir_python_sources(TorchMLIRPythonSources.Dialects + ADD_TO_PARENT TorchMLIRPythonSources +) + +declare_mlir_dialect_python_bindings( + ADD_TO_PARENT TorchMLIRPythonSources.Dialects + ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/mlir" + TD_FILE dialects/TorchBinding.td + SOURCES dialects/torch.py + DIALECT_NAME torch +) + +################################################################################ +# Extensions +################################################################################ + +declare_mlir_python_extension(TorchMLIRPythonExtensions.Main + MODULE_NAME _torchMlir + ADD_TO_PARENT TorchMLIRPythonExtensions + SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/TorchMLIRModule.cpp + EMBED_CAPI_LINK_LIBS + TorchMLIRCAPI + PRIVATE_LINK_LIBS + LLVMSupport +) + +################################################################################ +# Generate packages and shared library +# Downstreams typically will not use these, but they are useful for local +# testing. +################################################################################ + +set(_source_components + # TODO: Core is now implicitly building/registering all dialects, increasing + # build burden by ~5x. Make it stop. + MLIRPythonSources.Core + TorchMLIRPythonSources + TorchMLIRPythonExtensions +) + +add_mlir_python_common_capi_library(TorchMLIRAggregateCAPI + INSTALL_COMPONENT TorchMLIRPythonModules + INSTALL_DESTINATION python_packages/torch_mlir_dialects/mlir/_mlir_libs + OUTPUT_DIRECTORY "${TORCH_MLIR_PYTHON_PACKAGES_DIR}/torch_mlir_dialects/mlir/_mlir_libs" + RELATIVE_INSTALL_ROOT "../../../.." + DECLARED_SOURCES ${_source_components} +) + +add_mlir_python_modules(TorchMLIRPythonModules + ROOT_PREFIX "${TORCH_MLIR_PYTHON_PACKAGES_DIR}/torch_mlir_dialects/mlir" + INSTALL_PREFIX "python_packages/torch_mlir_dialects/mlir" + DECLARED_SOURCES ${_source_components} + COMMON_CAPI_LINK_LIBS + TorchMLIRAggregateCAPI + ) diff --git a/external/torch-mlir/python/TorchMLIRModule.cpp b/external/torch-mlir/python/TorchMLIRModule.cpp new file mode 100644 index 000000000..6db194714 --- /dev/null +++ b/external/torch-mlir/python/TorchMLIRModule.cpp @@ -0,0 +1,29 @@ +//===-- TorchBind.td - Torch dialect bind ------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "torch-mlir-c/Dialects.h" +#include "mlir-c/Bindings/Python/Interop.h" +#include "mlir-c/Registration.h" +#include "mlir/Bindings/Python/PybindAdaptors.h" + +namespace py = pybind11; + +PYBIND11_MODULE(_torchMlir, m) { + m.doc() = "torch-mlir main python extension"; + + m.def( + "register_torch_dialect", + [](MlirContext context, bool load) { + MlirDialectHandle handle = mlirGetDialectHandle__torch__(); + mlirDialectHandleRegisterDialect(handle, context); + if (load) { + mlirDialectHandleLoadDialect(handle, context); + } + }, + py::arg("context"), py::arg("load") = true); +} diff --git a/python/npcomp/dialects/TorchBind.td b/external/torch-mlir/python/mlir/dialects/TorchBinding.td similarity index 70% rename from python/npcomp/dialects/TorchBind.td rename to external/torch-mlir/python/mlir/dialects/TorchBinding.td index ad048cbb4..058b75edd 100644 --- a/python/npcomp/dialects/TorchBind.td +++ b/external/torch-mlir/python/mlir/dialects/TorchBinding.td @@ -1,4 +1,4 @@ -//===-- TorchBind.td - Torch dialect bind ------------------*- tablegen -*-===// +//===-- TorchBinding.td - Torch dialect bindings -----------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,10 +6,10 @@ // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_PYTHON_DIALECTS_TORCH_BIND -#define NPCOMP_PYTHON_DIALECTS_TORCH_BIND +#ifndef PYTHON_BINDINGS_TORCH_OPS +#define PYTHON_BINDINGS_TORCH_OPS include "mlir/Bindings/Python/Attributes.td" include "torch-mlir/Dialect/Torch/IR/TorchOps.td" -#endif +#endif // PYTHON_BINDINGS_TORCH_OPS diff --git a/python/npcomp/dialects/torch.py b/external/torch-mlir/python/mlir/dialects/torch.py similarity index 79% rename from python/npcomp/dialects/torch.py rename to external/torch-mlir/python/mlir/dialects/torch.py index 1963133a8..7f2174889 100644 --- a/python/npcomp/dialects/torch.py +++ b/external/torch-mlir/python/mlir/dialects/torch.py @@ -3,3 +3,4 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception from ._torch_ops_gen import * +from .._mlir_libs._torchMlir import register_torch_dialect diff --git a/external/torch-mlir/test/CMakeLists.txt b/external/torch-mlir/test/CMakeLists.txt index 868612ebf..cb3ded7bf 100644 --- a/external/torch-mlir/test/CMakeLists.txt +++ b/external/torch-mlir/test/CMakeLists.txt @@ -11,16 +11,9 @@ configure_lit_site_cfg( set(TORCH_MLIR_TEST_DEPENDS FileCheck count not - torch-mlir-opt + torch-mlir-opt TorchMLIRPythonModules ) -# XXX: Enable -#if(MLIR_ENABLE_BINDINGS_PYTHON) -# list(APPEND TORCH_MLIR_TEST_DEPENDS -# TorchMLIRPythonModules -# ) -#endif() - add_lit_testsuite(check-torch-mlir "Running the torch-mlir regression tests" ${CMAKE_CURRENT_BINARY_DIR} DEPENDS ${TORCH_MLIR_TEST_DEPENDS} diff --git a/external/torch-mlir/test/lit.cfg.py b/external/torch-mlir/test/lit.cfg.py index 148ecdf51..250c7e59c 100644 --- a/external/torch-mlir/test/lit.cfg.py +++ b/external/torch-mlir/test/lit.cfg.py @@ -65,7 +65,7 @@ llvm_config.add_tool_substitutions(tools, tool_dirs) if config.enable_bindings_python: llvm_config.with_environment('PYTHONPATH', [ - os.path.join(config.torch_mlir_obj_root, 'python_packages', - 'torch_mlir'), + os.path.join(config.torch_mlir_python_packages_dir, + 'torch_mlir_dialects'), ], append_path=True) diff --git a/external/torch-mlir/test/lit.site.cfg.py.in b/external/torch-mlir/test/lit.site.cfg.py.in index 914c96a34..8a274e33c 100644 --- a/external/torch-mlir/test/lit.site.cfg.py.in +++ b/external/torch-mlir/test/lit.site.cfg.py.in @@ -4,6 +4,7 @@ import sys config.enable_bindings_python = @MLIR_ENABLE_BINDINGS_PYTHON@ config.torch_mlir_obj_root = "@TORCH_MLIR_BINARY_DIR@" +config.torch_mlir_python_packages_dir = "@TORCH_MLIR_PYTHON_PACKAGES_DIR@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" config.llvm_obj_root = "@LLVM_BINARY_DIR@" config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" diff --git a/external/torch-mlir/test/python/smoketest.py b/external/torch-mlir/test/python/smoketest.py index f04e4af4a..130242562 100644 --- a/external/torch-mlir/test/python/smoketest.py +++ b/external/torch-mlir/test/python/smoketest.py @@ -1,9 +1,7 @@ # RUN: %PYTHON %s -# XXX: Fix this -# XFAIL: * import mlir.ir -from mlir.dialects import iree +from mlir.dialects import torch with mlir.ir.Context() as ctx: - iree.register_iree_dialect(ctx) + torch.register_torch_dialect(ctx) diff --git a/frontends/pytorch/CMakeLists.txt b/frontends/pytorch/CMakeLists.txt index ff29efea4..e7c8bae4c 100644 --- a/frontends/pytorch/CMakeLists.txt +++ b/frontends/pytorch/CMakeLists.txt @@ -21,17 +21,6 @@ project(npcomp_pytorch LANGUAGES CXX C) set(CMAKE_C_STANDARD 11) set(CMAKE_CXX_STANDARD 14) -#------------------------------------------------------------------------------- -# Setup PyTorch -#------------------------------------------------------------------------------- - -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") -include(NpcompPyTorch) -NpcompProbeForPyTorchInstall() -find_package(Torch 1.8 REQUIRED) - -NpcompConfigurePyTorch() - #------------------------------------------------------------------------------- # Output paths #------------------------------------------------------------------------------- @@ -44,6 +33,5 @@ endif() # Subdirectories #------------------------------------------------------------------------------- -add_subdirectory(csrc) add_subdirectory(python) add_subdirectory(test) diff --git a/frontends/pytorch/README.md b/frontends/pytorch/README.md deleted file mode 100644 index 0ceae5f3d..000000000 --- a/frontends/pytorch/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# NPComp - PyTorch frontend integration - -This directory contains optional components for interfacing PyTorch to NPComp. -Integration is targeted at multiple levels: - -* Via program capture with a ATen pseudo-device. -* Via IR-level integration with PyTorch (via tracing or scripting interfaces). -* Interfaces to facilitate checking against reference implementations and - verification. - -In all situations, the target dialects are maintained in the outer project, -along with their lowerings to common intermediate dialects and backends. This -directory should be purely about interfacing with the PyTorch/LibTorch -components for extracting and executing programs. - -The code in this directory is intended to integrate tightly with pytorch, and -follows the code style for pytorch. See the [overall documentation for -frontends](../README.md) for further details about code layout and integration -philosophy. In particular, this directory exists to provide a working -frontend to an MLIR based pytorch compilation flow and is not intended to be -contributed to the LLVM monorepo. If the project is successful, it makes more -sense to either break it out as an independent project that depends on -LLVM/MLIR/npcomp or contribute it upstream to PyTorch. However, as it will be -quite some time before the components are in a state to support such a -dependency, it is being carried in-tree in the interim. - -### Program capture with a ATen dispatch capture. - -Integration with a pseudo-device is typified by code like the following: - -``` -import torch -import torch_mlir - -lhs = torch.rand(2, 3) -rhs = torch.rand(3, 4) - -mb = torch_mlir.ModuleBuilder() -with mb.capture_function("mm", [lhs, rhs]) as f: - result = torch.mm(lhs, rhs) - f.returns([result]) - -mb.module.operation.print() -``` - -All operations that happen under the `mb.capture_function` context manager are -intercepted via PyTorch's -[dispatcher](http://blog.ezyang.com/2020/09/lets-talk-about-the-pytorch-dispatcher/), -and an IR graph is constructed into the module held by the ModuleBuilder. - -This technique has several advantages and disadvantages. For training use -cases, this technique generates a backward path automatically using the same -method that pytorch natively uses. The resulting graph also tends to be -simpler, since it will not reflect conditionals in the original python -code. Lastly, it is natural if MLIR is being used as a frontend target for an -actual device of some sort. In this case, the MLIR could go through a -device-specific lowering path and the resulting code run on a device. -The implementation of this technique is largely modeled after `pytorch/xla`. diff --git a/frontends/pytorch/docs/compatibility.md b/frontends/pytorch/docs/compatibility.md deleted file mode 100644 index 44c126976..000000000 --- a/frontends/pytorch/docs/compatibility.md +++ /dev/null @@ -1,20 +0,0 @@ -# Compatibility notes - -This document contains known compatibility issues with the PyTorch integration. -Some items may be permanent limitations and other may just be capturing -plans for future work. In general, this only applies to the default -configuration, not the "type dispatch" (PyTorch 1.4) integration. - - -## Pending Work Needed - -### Unpacking quantized weights - -Some of the torch::jit::Operator 's (especially the quantized:: ones) have already gone through some lowering steps. Specifically, the quantized::conv operators are pre-packed and stored as ConvPackedParam attributes on the Module instead of just passing the weight/bias tensors as SSA arguments to the Operator[0] [1]. How they are packed depends on whether the fbgemm or the XNNPACK backends are used... - -I think this comes back to the ability to pass a "CustomClass" as an SSA Value into an Operator, which may be difficult for us to lower to TCF... -Glow (and others) get around this by adding custom passes to convert the PackedParams to a traditional glow::unpacked_quantized_conv operation [2], but that adds some layers of lowering in TorchScript land before we would want to call off to get_registered_ops on the python side (may not be avoidable?) - -[0]: https://github.com/pytorch/pytorch/blob/dc67b47bc9d53dbeb898a4d920b0225ac73629ec/aten/src/ATen/native/quantized/library.cpp#L63-L69 -[1]: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/quantized/cpu/conv_packed_params.h -[2]: https://github.com/pytorch/glow/blob/b62ec449c43b77722c119b53b3ea5aec9be3edb9/torch_glow/src/TorchGlowBackend.cpp#L98-L116 diff --git a/frontends/pytorch/docs/libtorch_roundtrip.md b/frontends/pytorch/docs/libtorch_roundtrip.md deleted file mode 100644 index e1244b7f4..000000000 --- a/frontends/pytorch/docs/libtorch_roundtrip.md +++ /dev/null @@ -1,150 +0,0 @@ -# Design sketch: libtorch code generation round-trip - -It has been brought up a couple of times that having a dynamic fallback to -libtorch for kernel calls that the compiler does not recognize could be -advantageous. This is a sketch of how such a facility could work. - -## Op background - -When programs are imported from Torch (either via acap/driver capture or -from TorchScript), kernel calls are mapped to a `torch.kernel_call` op, which -it is useful to visualize: - -```mlir -%0 = torch.kernel_call "aten::mm" %arg0, %arg1 : - (!numpy.ndarray<[2,3]:f32>, !numpy.ndarray<[3,4]:f32>) -> - !numpy.ndarray<[2,4]:f32> - { - sigArgTypes = ["Tensor", "Tensor"], - sigIsMutable = false, - sigIsVararg = false, - sigIsVarret = false, - sigRetTypes = ["Tensor"] - } -``` - -A couple of things to note at this level: - -* Tensor operands/results are all represented by mutable `ndarray` types. -* The kernel call name ("aten::mm" above) is the `c10::OperatorName`. -* `sigArgTypes` and `sigRetTypes` correspond to the rest of a signature. - Together with the kernel name, it is sufficient to find a precise `OpHandle` - that can be used for making calls. -* The `torch.kernel_call` implements the `TorchKernelOpInterface` which - provides structured access to this metadata. - -From here, one typically uses the pass `aten-recognize-kernels` to promote -`torch.kernel_call` ops that the compiler has concretely modeled into -corresponding `aten` dialect ops. Here is an example of a function containing -the above, with aten kernels recognized: - -```mlir - func @mm(%arg0: !numpy.ndarray<[2,3]:f32>, %arg1: !numpy.ndarray<[3,4]:f32>) -> !numpy.ndarray<[2,4]:f32> { - %0 = numpy.copy_to_tensor %arg0 : (!numpy.ndarray<[2,3]:f32>) -> tensor<2x3xf32> - %1 = numpy.copy_to_tensor %arg1 : (!numpy.ndarray<[3,4]:f32>) -> tensor<3x4xf32> - %2 = "aten.mm"(%0, %1) : (tensor<2x3xf32>, tensor<3x4xf32>) -> tensor<2x4xf32> - %3 = numpy.create_array_from_tensor %2 : (tensor<2x4xf32>) -> !numpy.ndarray<[2,4]:f32> - return %3 : !numpy.ndarray<[2,4]:f32> - } -``` - -A few things to note about this form: - -* These recognized kernels are generated from the `torch_signature_ods_gen.py` - script, which imposes some mapping policy on them. -* Most kernels are aggressively converted to operate on ssa tensor values via - `copy_to_tensor`/`create_array_from_tensor` ops, making the majority of ops - in the `aten` dialect which are purely functional operate just on value - semantic types. -* The metadata is stripped off of the originating `torch.kernel_call` but each - `aten` op implements `TorchKernelOpInterface`, giving it access to the kernel - name and a signature matching its operands/results of a Torch kernel that - implements the computation. -* There is some information loss here but there should be enough retained to - perform the correct calculation, if not execute it exactly as the original - program specified (i.e. `out=` and other "ergonomic" aliases will be - rewritten into dedicated stores, etc). - -## General fallback flow - -The most straight-forward way to execute a `torch.kernel_call` or `aten` op -supporting the `TorchKernelOpInterface` would be to rewrite it into code that -invokes the ATen boxed dispatch mechanism: - -* Looking up a corresponding kernel based on a signature known at compile time - (constructed from `TorchKernelOpInterface` metadata). -* For each operand, scribble into a `Stack` (of `IValue`) list. -* Invoking `c10::Dispatcher::callBoxed()` with the stack. -* Marshaling returned results back out of the return `Stack`. -* Performing error and type constraint checking. - -The "inside" of such a dispatch function would be somewhat "switchy" but is -not all that complicated. - -## Runtime library - -`libtorch` on its own is not particularly amenable to be invoked from such -a low level. It would be better if there were a shared library that provided -the above facility as simple C functions that the compiler could emit calls -to. It would then be trivial to load/link this shared library in for JIT'ing, -AOT compilation, etc. - -Example: - -```c -/// Looks up a Torch op given a signature. -void *refbackFindTorchOp(const char *signature); - -/// Creates a 'call' struct from an op returned by `refbackFindTorchOp`. -/// Must be de-allocated via refbackDestroyTorchCall() when done. -void *refbackCreateTorchCall(void *torchOp); - -/// Adds IValues to the call stack. -void refbackTorchCallAddTensor(void *call, void *data, int64_t *sizes, int rank); -void refbackTorchCallAddScalar(void *call, int64_t scalar); -// ... - -/// Invokes the kernel. -/// After invocation, results can be read out with below methods. -bool refbackTorchInvoke(void *call); - -/// Gets IValues from the result stack. -bool refbackTorchGetTensor(void *call, size_t index, void **data, int64_t **sizes, int *rank); -bool refbackTorchGetScalar(void *call, size_t index, int64_t *scalar); - -/// Frees any resources associated with the call. -void refbackTorchCallDestroy(void *call); -``` - -## Generating code - -A pass could be written to transform ops implementing `TorchKernelOpInterface` -into `llvm` calls into the above functions. Details will be a bit thick and -depend on precise representations, but it should be fully generic. It should -be possible to prototype the whole thing with nothing but command line tools -and the existing `torch_mlir` paths for extracting programs. - -## Code location recommendations: - -* C-runtime library: `frontends/pytorch/csrc/kernelcrt` -* Code generation pass: `include/npcomp/Dialects/Torch/Transforms/TorchKernelToLLVMPass.cpp` - -## Gotchas - -This facility should work well for Torch kernels that are wholly unknown to -the compiler. However, kernels that the compiler fails to lower completely (i.e. -due to some unsupported, and unknown at the outset dynamism) way end up as -`tcf` ops or others that cannot be natively lowered via the -`TorchKernelOpInterface` facility. We can deal with this phase ordering in -a couple of ways: - -* When converting into `tcf` be more precise about when certain dynamic - constructs are wholly unsupported. Not likely to scale really well unless - if just being used as a stop-gap. In that case, possibly having a pass - early that marks ops to not lower because we know we want to retain them - at the higher level may be fine. -* Treat `aten` as both a source and a target dialect for `tcf`: implement - lowerings *to* `aten` that run after the rest of `tcf` has been lowered. -* Implement `TorchKernelOpInterface` on the `tcf` ops (or have some other - interface for mapping them back). - diff --git a/frontends/pytorch/examples/torchscript_resnet18_e2e.py b/frontends/pytorch/examples/torchscript_resnet18_e2e.py index d319c801f..c764b1da2 100644 --- a/frontends/pytorch/examples/torchscript_resnet18_e2e.py +++ b/frontends/pytorch/examples/torchscript_resnet18_e2e.py @@ -12,7 +12,8 @@ import typing import torch_mlir import npcomp -from npcomp.compiler.pytorch.backend import refjit, frontend_lowering, iree +from npcomp.passmanager import PassManager +from npcomp.compiler.pytorch.backend import refjit, iree from npcomp.compiler.utils import logging mb = torch_mlir.ModuleBuilder() @@ -109,7 +110,8 @@ class_annotator.annotateArgs( mb.import_module(recursivescriptmodule._c, class_annotator) backend = refjit.RefjitNpcompBackend() -compiled = backend.compile(frontend_lowering.lower_object_graph(mb.module)) +PassManager.parse("torchscript-to-npcomp-backend-pipeline").run(mb.module) +compiled = backend.compile(mb.module) jit_module = backend.load(compiled) predictions(test_module.forward, jit_module.forward, img, labels) diff --git a/frontends/pytorch/python/CMakeLists.txt b/frontends/pytorch/python/CMakeLists.txt index 5c263dcd7..11152557b 100644 --- a/frontends/pytorch/python/CMakeLists.txt +++ b/frontends/pytorch/python/CMakeLists.txt @@ -3,5 +3,7 @@ # Collapse all local python sources to the project level python/ directory. ################################################################################ +include(NpcompPython) + npcomp_python_create_symlinks( ${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_torch ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/frontends/pytorch/python/torch_mlir_torchscript/annotations.py b/frontends/pytorch/python/torch_mlir_torchscript/annotations.py index f198aac23..1bd8f7a1e 100644 --- a/frontends/pytorch/python/torch_mlir_torchscript/annotations.py +++ b/frontends/pytorch/python/torch_mlir_torchscript/annotations.py @@ -20,9 +20,9 @@ import torch # Attribute names used for annotations. # These should be kept in sync with their use in -# `frontends/pytorch/python/torch_mlir/torchscript_annotations.py`. -NPCOMP_EXPORT_ATTR_NAME = '_npcomp_export' -NPCOMP_ARG_ANNOTATIONS_ATTR_NAME = '_npcomp_arg_annotations' +# `torch_mlir/torchscript_annotations.py`. +TORCH_MLIR_EXPORT_ATTR_NAME = '_torch_mlir_export' +TORCH_MLIR_ARG_ANNOTATIONS_ATTR_NAME = '_torch_mlir_arg_annotations' def export(fn): @@ -39,7 +39,7 @@ def export(fn): Conceptually, this decorator is annotating the scripted module, but is applied to the original `torch.nn.Module` for convenience. """ - setattr(fn, NPCOMP_EXPORT_ATTR_NAME, True) + setattr(fn, TORCH_MLIR_EXPORT_ATTR_NAME, True) return fn @@ -63,7 +63,7 @@ def annotate_args(annotations: List[Optional[ArgAnnotation]]): # TODO: Check the number of arguments matches the number of arg annotations. def decorator(fn): - setattr(fn, NPCOMP_ARG_ANNOTATIONS_ATTR_NAME, annotations) + setattr(fn, TORCH_MLIR_ARG_ANNOTATIONS_ATTR_NAME, annotations) return fn return decorator @@ -91,10 +91,10 @@ def extract_serializable_annotations( continue export = None arg_annotations = None - if hasattr(method, NPCOMP_EXPORT_ATTR_NAME): - export = method._npcomp_export - if hasattr(method, NPCOMP_ARG_ANNOTATIONS_ATTR_NAME): - arg_annotations = method._npcomp_arg_annotations + if hasattr(method, TORCH_MLIR_EXPORT_ATTR_NAME): + export = method._torch_mlir_export + if hasattr(method, TORCH_MLIR_ARG_ANNOTATIONS_ATTR_NAME): + arg_annotations = method._torch_mlir_arg_annotations if export is not None and arg_annotations is not None: module_annotations.method_annotations.append( SerializableMethodAnnotation(method_name=method_name, diff --git a/frontends/pytorch/python/torch_mlir_torchscript_e2e_test_configs/npcomp_backend.py b/frontends/pytorch/python/torch_mlir_torchscript_e2e_test_configs/npcomp_backend.py index 8b9135269..21321c0c9 100644 --- a/frontends/pytorch/python/torch_mlir_torchscript_e2e_test_configs/npcomp_backend.py +++ b/frontends/pytorch/python/torch_mlir_torchscript_e2e_test_configs/npcomp_backend.py @@ -12,6 +12,7 @@ import numpy as np import torch import torch_mlir +import npcomp from npcomp.passmanager import PassManager from npcomp.compiler.pytorch.backend import refjit from npcomp.compiler.pytorch.backend.abc import NpcompBackend @@ -88,15 +89,22 @@ Diagnostics: finally: sys.stderr = sys.__stderr__ + # The torch-mlir python code is built against its own aggregate CAPI. + # The npcomp python module is built against our own. + # So we need to transport it across those as a string. + with npcomp.ir.Context() as ctx: + npcomp.register_all_dialects(ctx) + module = npcomp.ir.Module.parse(str(mb.module)) + try: sys.stderr = StringIO() - asm_for_error_report = mb.module.operation.get_asm( + asm_for_error_report = module.operation.get_asm( large_elements_limit=10, enable_debug_info=True) pipeline_str = "torchscript-to-npcomp-backend-pipeline" # Lower module in place to make it ready for compiler backends. - with mb.module.context: + with module.context: pm = PassManager.parse(pipeline_str) - pm.run(mb.module) + pm.run(module) except Exception as e: # TODO: More robust. # - don't arbitrarily clutter up /tmp. When a test suite has many @@ -119,11 +127,12 @@ $ npcomp-opt -{pipeline_str} {filename} """) from None finally: sys.stderr = sys.__stderr__ + try: sys.stderr = StringIO() - asm_for_error_report = mb.module.operation.get_asm( + asm_for_error_report = module.operation.get_asm( large_elements_limit=10, enable_debug_info=True) - return self.backend.compile(mb.module) + return self.backend.compile(module) except Exception as e: filename = os.path.join(tempfile.gettempdir(), scripted.original_name + '.mlir') diff --git a/frontends/pytorch/test/CMakeLists.txt b/frontends/pytorch/test/CMakeLists.txt index b0a7f948d..97eeb7469 100644 --- a/frontends/pytorch/test/CMakeLists.txt +++ b/frontends/pytorch/test/CMakeLists.txt @@ -1,7 +1,3 @@ -llvm_canonicalize_cmake_booleans( - NPCOMP_ENABLE_TORCH_TYPE_DISPATCH -) - configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py @@ -12,21 +8,16 @@ configure_lit_site_cfg( set(TEST_DEPENDS FileCheck count not npcomp-opt - NPCOMPTorchMLIRExt + TorchMLIRTorchPlugin + TorchMLIRPluginPythonModules + NPCOMPPythonModules ) - -if(NPCOMP_ENABLE_TORCH_TYPE_DISPATCH) - list(APPEND TEST_DEPENDS - aten_ops - ) -endif() - add_lit_testsuite(check-frontends-pytorch "Running the frontends-pytorch regression tests" ${CMAKE_CURRENT_BINARY_DIR} DEPENDS ${TEST_DEPENDS} ) set_target_properties(check-frontends-pytorch PROPERTIES FOLDER "Tests") -add_lit_testsuites(TORCH_MLIR ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS ${TEST_DEPENDS}) +add_lit_testsuites(FRONTENDS_PYTORCH ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS ${TEST_DEPENDS}) add_dependencies(check-npcomp-all check-frontends-pytorch) diff --git a/frontends/pytorch/test/ivalue_import/annotations/sugar.py b/frontends/pytorch/test/annotations-sugar.py similarity index 100% rename from frontends/pytorch/test/ivalue_import/annotations/sugar.py rename to frontends/pytorch/test/annotations-sugar.py diff --git a/frontends/pytorch/test/lit.cfg.py b/frontends/pytorch/test/lit.cfg.py index d4c28891b..8cf5f77e2 100644 --- a/frontends/pytorch/test/lit.cfg.py +++ b/frontends/pytorch/test/lit.cfg.py @@ -61,8 +61,11 @@ config.npcomp_tools_dir = os.path.join(config.npcomp_obj_root, 'bin') npcomp_python_dir = "python" if config.npcomp_built_standalone else "tools/npcomp/python" llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True) llvm_config.with_environment('PYTHONPATH', [ - os.path.join(config.npcomp_python_packages_dir, 'npcomp_core'), - os.path.join(config.npcomp_python_packages_dir, 'npcomp_torch')], + os.path.join(config.npcomp_python_packages_dir, 'npcomp_core'), + os.path.join(config.npcomp_python_packages_dir, 'npcomp_torch'), + os.path.join(config.torch_mlir_python_packages_dir, 'torch_mlir'), + os.path.join(config.torch_mlir_python_packages_dir, 'torch_mlir_dialects'), + ], append_path=True) diff --git a/frontends/pytorch/test/lit.site.cfg.py.in b/frontends/pytorch/test/lit.site.cfg.py.in index c1d214a1f..684dba38c 100644 --- a/frontends/pytorch/test/lit.site.cfg.py.in +++ b/frontends/pytorch/test/lit.site.cfg.py.in @@ -38,6 +38,8 @@ config.npcomp_src_root = "@CMAKE_SOURCE_DIR@" config.npcomp_obj_root = "@CMAKE_BINARY_DIR@" config.npcomp_built_standalone = bool("@NPCOMP_BUILT_STANDALONE@") config.npcomp_python_packages_dir = "@MLIR_NPCOMP_PYTHON_PACKAGES_DIR@" +config.torch_mlir_python_packages_dir = "@TORCH_MLIR_PYTHON_PACKAGES_DIR@" + # Support substitution of the tools_dir with user parameters. This is # used when we can't determine the tool dir at configuration time. diff --git a/frontends/pytorch/utils/gen_aten_dialect.py b/frontends/pytorch/utils/gen_aten_dialect.py deleted file mode 100644 index b5f15f500..000000000 --- a/frontends/pytorch/utils/gen_aten_dialect.py +++ /dev/null @@ -1,1261 +0,0 @@ -# -*- Python -*- -# This file is licensed under a pytorch-style license -# See frontends/pytorch/LICENSE for license information. - -# Structured similarly to code from git@github.com:pytorch/xla.git - -from __future__ import print_function - -import argparse -import collections -import lark -import os -import re -import string -import sys - -#### -# This file parses the C++ signatures exported by pytorch and generates -# appropriate MLIR operations in a tablegen file. It also generates some of -# the more boilerplate parts of the pytorch integration. This may need to be -# run if pytorch versions change. Primarily this reads information from -# pytorch through RegistrationDeclarations.h and Functions.h. It also allows -# some local overrides (specified in aten_mlir_type.h). -# It generates: aten_mlir_type_defaults.{.cpp,.h} and ATenOps.td, which will need -# to be moved to their appropriate places. - -# To run: -# python3 gen_aten_dialect.py --output_folder=. \ -# ../csrc/aten_mlir_type.h \ -# ${TORCH_INSTALL_PREFIX}/include/ATen/RegistrationDeclarations.h \ -# ${TORCH_INSTALL_PREFIX}/include/ATen/Functions.h - - -def namedtuple_with_defaults(typename, field_names, default_values=()): - ntuple = collections.namedtuple(typename, field_names) - ntuple.__new__.__defaults__ = (None,) * len(ntuple._fields) - if isinstance(default_values, collections.Mapping): - prototype = ntuple(**default_values) - else: - prototype = ntuple(*default_values) - ntuple.__new__.__defaults__ = tuple(prototype) - return ntuple - - -class ArgTemplate(string.Template): - idpattern = r'[a-z0-9_]+' - - -FuncDef = namedtuple_with_defaults('FuncDef', 'cpp_sig, aten_sig') - -FuncGen = namedtuple_with_defaults( - 'FuncGen', - 'tree, xtree, rwxtree, func, xfunc, code, sig, rwsig, cppsig, funsig, mapsig, aten_sig' -) - -FuncOpts = namedtuple_with_defaults( - 'FuncOpts', - 'ref_param, device_param, wparams, outfn_template, outfn_name, shape_check_indices' -) - -_GRAMMAR = r""" - start: type fnname "(" params ")" - rtype: "(" rparams ")" - | TNAME - rparams: rparam - | rparam "," rparams - rparam: type param_name - type: CONST? core_type refspec? - fnname: CNAME - refspec: REF - | PTR - core_type: template - | TNAME - template: TNAME "<" typelist ">" - typelist: type - | type "," typelist - REF: "&" - PTR: "*" - CONST: "const" - TNAME: /[a-zA-Z0-9_:]+/ - HEXNUMBER: /0x[0-9a-fA-F]+/ - params: param - | param "," params - param: type param_name param_defval? - param_name: CNAME - - param_defval: "=" init_value - init_value: "true" - | "false" - | "{}" - | NUMBER - | SIGNED_NUMBER - | HEXNUMBER - | ESCAPED_STRING - - %import common.CNAME -> CNAME - %import common.NUMBER -> NUMBER - %import common.SIGNED_NUMBER -> SIGNED_NUMBER - %import common.ESCAPED_STRING -> ESCAPED_STRING - %import common.WS - %ignore WS - """ - -_PARSER = lark.Lark(_GRAMMAR, parser='lalr', propagate_positions=True) - -_XPARSER = lark.Lark(_GRAMMAR, - parser='lalr', - propagate_positions=True, - keep_all_tokens=True) - -_TD_BLACKLIST = set([ - 'clone', - 'to', - 'copy_', - 'copy', - 'copy_from', - '_copy_from', - '_unsafe_view', -]) - -_TD_NO_OPSTATS_LIST = set([ - '_log_softmax', - '_log_softmax_backward_data', -]) - -_FN_BLACKLIST = set([ - 'numel', - 'ones', - 'ones_like', - 'result_type', - # 'zero_', - 'zeros', - 'zeros_like', -]) - -_FN_NO_DEBUG_ENTRY_LIST = set([ - 'empty', - 'fill_', - 'zero_', -]) - -_FN_BLACKLIST_REGEX = [ - # ATEN functions - r'[^(]*cudnn', - # XLA/TPU functions -] - -_FN_OUT = { - 'add_out': - FuncOpts(), - 'arange_out(Tensor, Scalar, Scalar, Scalar) -> Tensor': - FuncOpts(outfn_template=ArgTemplate( - 'ATenMLIRType::arange($1, $2, $3, $0.options())')), - 'bitwise_not_out': - FuncOpts(), - 'clamp_out': - FuncOpts(), - 'div_out': - FuncOpts(), - 'gather_out': - FuncOpts(), - 'kthvalue_out': - FuncOpts(), - 'index_select_out': - FuncOpts(), - 'log_out': - FuncOpts(), - 'topk_out': - FuncOpts(), -} -_FN_OUT = {} - -# List of tuples with the regex match first, and the corresponding FuncOpts() -# second. -_FN_OUT_REGEX = [] - -_FN_REMAP = { - '_th_eq(Tensor, Scalar) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::eq'), - '_th_eq(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::eq'), - '_th_ge(Tensor, Scalar) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::ge'), - '_th_ge(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::ge'), - '_th_gt(Tensor, Scalar) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::gt'), - '_th_gt(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::gt'), - '_th_le(Tensor, Scalar) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::le'), - '_th_le(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::le'), - '_th_lt(Tensor, Scalar) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::lt'), - '_th_lt(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::lt'), - '_th_ne(Tensor, Scalar) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::ne'), - '_th_ne(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::ne'), - 's__th_and(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::__and__', - shape_check_indices=((0, 1),)), - 's__th_or(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::__or__', - shape_check_indices=((0, 1),)), - 's__th_xor(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::__xor__', - shape_check_indices=((0, 1),)), - # '_s_where(Tensor, Tensor, Tensor) -> Tensor': - # FuncOpts( - # outfn_name='ATenMLIRType::where', - # shape_check_indices=( - # (0, 1), - # (0, 2), - # )), - 's__th_eq(Tensor, Tensor) -> Tensor': - FuncOpts(outfn_name='ATenMLIRType::eq', shape_check_indices=((0, 1),)), -} - -_TYPE_NSMAP = { - 'Tensor': 'at::Tensor', - 'TensorList': 'at::TensorList', - 'Scalar': 'at::Scalar', - 'Storage': 'at::Storage', - 'IntList': 'at::IntList', - 'IntArrayRef': 'at::IntArrayRef', - 'Generator': 'at::Generator', - 'ScalarType': 'at::ScalarType', - 'TensorOptions': 'at::TensorOptions', - 'SparseTensorRef': 'at::SparseTensorRef', - 'Device': 'c10::Device', - 'optional': 'c10::optional', - 'MemoryFormat': 'at::MemoryFormat', - 'QScheme': 'at::QScheme', - 'ConstQuantizerPtr': 'at::ConstQuantizerPtr', - 'Dimname': 'at::Dimname', # namedtensor-only - 'DimnameList': 'at::DimnameList', # namedtensor-only -} - -_H_HEADER = """// Autogenerated file by {gen}. Do not edit directly! - -#include - -namespace torch_mlir {{ - -class ATenMLIRTypeDefault {{ - public: -{hfuncs} -}}; - -void RegisterAtenTypeFunctions(); - -}} // namespace torch_mlir -""" - -_CPP_HEADER = """// Autogenerated file by {gen}. Do not edit directly! -#include "aten_mlir_type_default.h" - -#include -#include -#include -#include - -#include "aten_mlir_bridge.h" -#include "aten_mlir_type.h" - -namespace torch_mlir {{ - -{funcs} - -{regs} -}} // namespace torch_mlir -""" - -_torch_mlir_FUNCTIONS = {} - -_CTOR_FUNCTIONS = { - 'empty': '.device(at::DeviceType::CPU)', - 'linspace': '.device(at::DeviceType::CPU)', - 'logspace': '.device(at::DeviceType::CPU)', - 'rand': '.device(at::DeviceType::CPU)', - 'rand_like': '.device(at::DeviceType::CPU)', - 'randn': '.device(at::DeviceType::CPU)', - 'randn_like': '.device(at::DeviceType::CPU)', - 'randint': '.device(at::DeviceType::CPU)', - 'randint_like': '.device(at::DeviceType::CPU)', - 'randperm': '.device(at::DeviceType::CPU)', - 'scalar_tensor': '.device(at::DeviceType::CPU)', -} - -_FUNCTION_OPTIONS = { - 'slice(Tensor, int64_t, int64_t, int64_t, int64_t) -> Tensor': - FuncOpts(wparams=['self']), -} - -_RESULT_NAME = 'x_result' - - -class Context(object): - - def __init__(self, functions): - with open(functions, 'r') as ff: - self.functions_data = ff.read() - - def get_function(self, name): - if self.functions_data.find(' {}('.format(name)) >= 0: - return 'at::{}'.format(name) - - -class StringEmit(object): - - def __init__(self, sref): - self.sref = sref - self.sval = '' - self.pos = -1 - - def __repr__(self): - return self.sval - - def advance(self, t): - start = t.column - 1 - end = t.end_column - 1 - pos = self.pos if self.pos >= 0 else start - if start > pos: - self.sval += self.sref[pos:start] - self.sval += t.value - self.pos = end - - def skip(self, t): - self.pos = last_match(t) if self.pos >= 0 else -1 - - def append(self, s): - self.sval += s - self.pos = -1 - - -class TensorFetcher(object): - - def __init__(self, var_name): - self.var_name = var_name - self.tvar_name = '{}_tensors'.format(self.var_name) - self.tensors = [] - self.writeable = [] - - def add(self, name, writeable): - if writeable: - self.writeable.append(len(self.tensors)) - self.tensors.append(name) - return '{}[{}]'.format(self.var_name, len(self.tensors) - 1) - - def generate_fetches(self): - code = '' - code += ' std::vector {} = {{{}}};\n'.format( - self.tvar_name, ', '.join(self.tensors)) - code += (' auto {} = bridge::MLIRCreateTensorList({});\n').format( - self.var_name, self.tvar_name) - return code - - def generate_updates(self): - assert (0) - code = '' - if self.writeable: - ivar_name = '{}_update_indices'.format(self.var_name) - code += ' std::vector {} = {{{}}};\n'.format( - ivar_name, ', '.join(str(x) for x in self.writeable)) - code += ' bridge::XlaUpdateTensors({}, {}, {});\n'.format( - self.tvar_name, self.var_name, ivar_name) - return code - - -def list_get(l, n): - return l[n] if n < len(l) else None - - -def is_blacklisted_fn(fname, mapsig): - if fname in _FN_BLACKLIST or mapsig in _FN_BLACKLIST: - return True - for frx in _FN_BLACKLIST_REGEX: - if re.match(frx, fname) or re.match(frx, mapsig): - return True - return False - - -def get_outfn_options(fname, mapsig): - for name in [fname, mapsig]: - fnopts = _FN_OUT.get(name, None) - if fnopts is not None: - return fnopts - for frx, fnopts in _FN_OUT_REGEX: - if re.match(frx, fname) or re.match(frx, mapsig): - return fnopts - - -def get_remapfn_options(fname, mapsig): - for name in [fname, mapsig]: - fnopts = _FN_REMAP.get(name, None) - if fnopts is not None: - return fnopts - - -def is_write_param(fnopts, pname, defval): - if fnopts and fnopts.wparams: - if pname in fnopts.wparams: - return True - return defval - - -def first_match(t): - if isinstance(t, lark.lexer.Token): - return t.column - 1 - assert isinstance(t, lark.tree.Tree) - return first_match(t.children[0]) - - -def last_match(t): - if isinstance(t, lark.lexer.Token): - return t.end_column - 1 - assert isinstance(t, lark.tree.Tree) - return last_match(t.children[-1]) - - -def for_every_token(t, fn): - if isinstance(t, lark.lexer.Token): - fn(t) - else: - assert isinstance(t, lark.tree.Tree) - for c in t.children: - for_every_token(c, fn) - - -def emit_string(t, emit, emit_fn): - status = emit_fn(t) - if status > 0: - - def do_emit(tok): - emit.advance(tok) - - for_every_token(t, do_emit) - elif status == 0: - if isinstance(t, lark.lexer.Token): - emit.advance(t) - else: - assert isinstance(t, lark.tree.Tree) - for c in t.children: - emit_string(c, emit, emit_fn) - else: - emit.skip(t) - - -def typed_child(t, n, ttype): - assert isinstance(t, lark.tree.Tree) - assert n < len(t.children) - c = t.children[n] - assert isinstance(c, lark.tree.Tree) - assert c.data == ttype, t.pretty() - return c - - -def rewrite_sig(tree, orig_sig, emit_fn=lambda x: 0): - emit = StringEmit(orig_sig) - emit_string(tree, emit, emit_fn) - return str(emit) - - -def rewrite_signature(sig, tmap): - - def rewrite(t): - if t.type == 'TNAME': - new_type = tmap.get(t.value, None) - if new_type is not None: - t.value = new_type - - def emit_fn(t): - if isinstance(t, lark.lexer.Token): - return 0 - return -1 if t.data == 'param_defval' else 0 - - xtree = _XPARSER.parse(sig) - for_every_token(xtree, rewrite) - return rewrite_sig(xtree, sig, emit_fn=emit_fn) - - -def create_stdfunc_sig(tree, orig_sig): - - def emit_fn(t): - if isinstance(t, lark.lexer.Token): - return 0 - return -1 if t.data == 'param_name' else 0 - - emit = StringEmit(orig_sig) - # Emit full function return type. - emit_string(typed_child(tree, 0, 'type'), emit, emit_fn) - emit.append('(') - # Emit parameter list w/out parameter names. - emit_string(typed_child(tree, 3, 'params'), emit, emit_fn) - emit.append(')') - return str(emit) - - -def create_map_sig(tree, orig_sig): - - def emit_fn(t): - if isinstance(t, lark.lexer.Token): - return -1 if t.type in ['CONST', 'REF', 'PTR'] else 0 - return -1 if t.data in ['param_name', 'param_defval'] else 0 - - emit = StringEmit(orig_sig) - # Emit full function return type. - emit_string(typed_child(tree, 1, 'fnname'), emit, emit_fn) - emit.append('(') - # Emit parameter list w/out parameter names. - emit_string(typed_child(tree, 3, 'params'), emit, emit_fn) - emit.append(') -> ') - emit_string(typed_child(tree, 0, 'type'), emit, emit_fn) - return str(emit) - - -def type_core(t): - assert isinstance(t, lark.tree.Tree) - for c in t.children: - if isinstance(c, lark.tree.Tree) and c.data == 'core_type': - c = c.children[0] - if isinstance(c, lark.lexer.Token): - return c.value - assert isinstance(c, lark.tree.Tree) and c.data == 'template' - return c.children[0].value - raise RuntimeError('Not a type tree: {}'.format(t)) - - -def type_is_const(t): - assert isinstance(t, lark.tree.Tree) - c = t.children[0] - return isinstance(c, lark.lexer.Token) and c.value == 'const' - - -def type_is_refptr(t, kind): - assert isinstance(t, lark.tree.Tree) - c = t.children[-1] - if not isinstance(c, lark.tree.Tree) or c.data != 'refspec': - return False - c = c.children[0] - return isinstance(c, lark.lexer.Token) and c.value == kind - - -def extract_list(t, l): - assert isinstance(t, lark.tree.Tree) - l.append(t.children[0]) - if len(t.children) == 2: - c = t.children[1] - if isinstance(c, lark.tree.Tree) and c.data == t.data: - extract_list(c, l) - return l - - -def tuple_type_list(t): - assert isinstance(t, lark.tree.Tree) - c = t.children[0] - assert isinstance(c, lark.tree.Tree) and c.data == 'core_type' - c = c.children[0] - assert isinstance(c, lark.tree.Tree) and c.data == 'template' - types = [] - return extract_list(c.children[1], types) - - -def get_function_name(t): - assert isinstance(t, lark.tree.Tree) - fname = t.children[1] - assert isinstance(fname, lark.tree.Tree) - assert fname.data == 'fnname' - return fname.children[0].value - - -def get_function_signature(t, orig_sig, namefn): - emit = StringEmit(orig_sig) - # Emit full function return type. - emit_string(typed_child(t, 0, 'type'), emit, lambda t: 0) - fnname = typed_child(t, 1, 'fnname').children[0] - xfname = namefn(fnname.value) - emit.append(' {}('.format(xfname)) - # Emit parameter list w/out parameter names. - emit_string(typed_child(t, 3, 'params'), emit, lambda t: 0) - emit.append(')') - return str(emit), fnname.value, xfname - - -def get_parameters(t): - assert isinstance(t, lark.tree.Tree) - c = t.children[2] - assert isinstance(c, lark.tree.Tree) - assert c.data == 'params' - params = [] - extract_list(c, params) - return params - - -def get_rparameters(t): - assert isinstance(t, lark.tree.Tree) - params = [] - print(len(t.children)) - # c = t.children[3] - # assert isinstance(c, lark.tree.Tree) - # assert c.data == 'rparams' - - # extract_list(c, params) - return params - - -def param_name(t): - assert isinstance(t, lark.tree.Tree) - c = t.children[1] - assert isinstance(c, lark.tree.Tree) - assert c.data == 'param_name' - token = c.children[0] - assert isinstance(token, lark.lexer.Token) - return token.value - - -def param_type(t): - assert isinstance(t, lark.tree.Tree) - c = t.children[0] - assert isinstance(c, lark.tree.Tree) - return c - - -def get_optional(fnopts, name, defval=None): - if fnopts is None or not hasattr(fnopts, name): - return defval - return getattr(fnopts, name, defval) or defval - - -def get_return_value(rtype, rname, param, var, ref_param, fnopts): - crtype = type_core(rtype) - if type_is_const(rtype) or type_is_refptr(rtype, '&'): - # If the return type is a const or a reference, return the matching - # parameter. In these cases we operated on XLA tensors data (the ATEN one), - # but the returned references are the input parameters. - assert param - return param_name(param) - elif crtype != 'Tensor': - return rname - else: - # If instead the return type is a value Tensor, we create a new one by - # wrapping the proper local variable which has been created by calling - # into the CPU tensor implementation. - return 'bridge::CreateMLIRTensor({}, bridge::GetMLIRDevice({}))'.format( - rname, get_optional(fnopts, 'device_param', param_name(ref_param))) - - -def get_reference_param(params, fnopts=None): - # The reference parameter is the Tensor object which we use to extract the - # result Tensor device, if any. - ref_param = None - other = None - for p in params: - ptype = param_type(p) - cptype = type_core(ptype) - pname = param_name(p) - if get_optional(fnopts, 'ref_param') == pname: - return p - if not other and (cptype == 'TensorOptions' or cptype == 'TensorList'): - other = p - if cptype != 'Tensor': - continue - if not ref_param and (pname == 'self' or type_is_const(ptype)): - ref_param = p - other = p - return ref_param or other - - -def get_tuple_return(rtype, rtype_str, rname, params, param_vars, ref_param, - fnopts): - types = tuple_type_list(rtype) - retstr = '{}('.format(rtype_str) - for i, ttype in enumerate(types): - if i > 0: - retstr += ', ' - tuple_var = 'std::get<{}>({})'.format(i, rname) - retstr += get_return_value(ttype, tuple_var, list_get(params, i), - list_get(param_vars, i), ref_param, fnopts) - return retstr + ')' - - -def get_return_type_str(t, orig_sig): - assert isinstance(t, lark.tree.Tree) - fname = t.children[1] - assert isinstance(fname, lark.tree.Tree) - assert fname.data == 'fnname' - token = fname.children[0] - assert isinstance(token, lark.lexer.Token) - return orig_sig[0:token.column - 2] - - -def generate_entry_debug_code(t, fname, params, fname_ns='aten'): - code = '' - if fname in _FN_NO_DEBUG_ENTRY_LIST: - return code - code += ' std::cout << "{}::{}" << std::endl;\n'.format(fname_ns, fname) - # Emits debug code for a given intercepted ATEN type function. For now we use - # a counter which will show up in the metrics reports. - # VLOG info. Use the following to see debug output: - # export TF_CPP_VMODULE=aten_mlir_type_default=3 - #code += ' TF_VLOG(3) << "XLA {} :"'.format(fname) - #for p in params: - # ptype = param_type(p) - # cptype = type_core(ptype) - # pname = param_name(p) - # if cptype == 'Tensor': - # code += ' << " {}=" << {}.toString()'.format(pname, pname) - #code += ';\n' - return code - - -def generate_exit_debug_code(t, fname, rname, params, param_vars): - code = '' - return code - - -def generate_return_stmt(t, rtype_str, fname, rname, params, param_vars, - ref_param, fnopts): - assert isinstance(t, lark.tree.Tree) - rtype = t.children[0] - ctype = type_core(rtype) - if ctype == 'std::tuple': - retstr = get_tuple_return(rtype, rtype_str, rname, params, param_vars, - ref_param, fnopts) - elif ctype == 'std::vector': - #retstr = 'bridge::CreateXlaTensors({}, bridge::GetXlaDevice({}))'.format( - # rname, get_optional(fnopts, 'device_param', param_name(ref_param))) - retstr = rname - elif ctype == 'Tensor': - retstr = get_return_value(rtype, rname, params[0], param_vars[0], ref_param, - fnopts) - elif ctype == 'void' and not type_is_refptr(rtype, '*'): - return '' - else: - retstr = rname - return ' return {};\n'.format(retstr) - - -def generate_result_assignment(t, rname): - assert isinstance(t, lark.tree.Tree) - rtype = t.children[0] - ctype = type_core(rtype) - if ctype == 'void' and not type_is_refptr(rtype, '*'): - return '' - return 'auto&& {} = '.format(rname) - - -def get_handling_function(ctx, fname, the_ref_param, param_vars): - function = _torch_mlir_FUNCTIONS.get(fname, None) or ctx.get_function(fname) - if function: - code = '{}({})'.format(function, ', '.join(param_vars)) - else: - other_params = list(param_vars) - other_params.remove(the_ref_param) - code = '{}.{}({})'.format(the_ref_param, fname, ', '.join(other_params)) - return code - - -def rewrite_tensor_options(fname, pname): - rw = _CTOR_FUNCTIONS.get(fname, None) - if rw is None: - return '', pname - xname = 'o_{}'.format(pname) - code = ' at::TensorOptions {} = {}{};\n'.format(xname, pname, rw) - return code, xname - - -def get_param_names(params): - param_vars = [] - for p in params: - pname = param_name(p) - param_vars.append(pname) - return param_vars - - -def expand_fn_template(tmpl, param_vars): - mdict = {} - for i, pname in enumerate(param_vars): - mdict[str(i)] = pname - return tmpl.substitute(mdict) - - -def create_call(fname, param_vars): - return '{}({})'.format(fname, ', '.join(param_vars)) - - -def generate_shape_checks(param_vars, shape_check_indices, fname): - code = '' - #for i, j in shape_check_indices: - # code += (' XLA_CHECK({}.sizes() == {}.sizes()) << "Operand shapes must be ' - # 'identical for {}, mismatch for arguments {} and {}";\n').format( - # param_vars[i], param_vars[j], fname, i + 1, j + 1) - return code - - -def generate_aten_remap(ctx, fname, sig, params, fnopts): - code = '{} {{\n'.format(sig) - - param_vars = get_param_names(params) - if fnopts.outfn_template is not None: - fcall = expand_fn_template(fnopts.outfn_template, param_vars) - else: - assert fnopts.outfn_name - fcall = create_call(fnopts.outfn_name, param_vars) - - if fnopts.shape_check_indices is not None: - code += generate_shape_checks(param_vars, fnopts.shape_check_indices, fname) - code += ' return {};\n'.format(fcall) - code += '}' - return code - - -def generate_outfn_result_copy(dest, src): - return ' {}.unsafeGetTensorImpl()->shallow_copy_from({}.getIntrusivePtr());\n'.format( - dest, src) - - -def generate_aten_out(ctx, tree, rwxtree, fname, sig, rwsig, params, fnopts): - rtype = tree.children[0] - num_outputs = None - if type_core(rtype) == 'std::tuple': - num_outputs = len(tuple_type_list(rtype)) - - code = '{} {{\n'.format(sig) - code += generate_entry_debug_code(tree, fname, params) - - param_vars = get_param_names(params) - if fnopts.outfn_template is not None: - fcall = expand_fn_template(fnopts.outfn_template, param_vars) - else: - m = re.match(r'(.*)_out$', fname) - assert m is not None, fname - out_count = num_outputs if num_outputs is not None else 1 - fcall = create_call('ATenMLIRType::{}'.format(m.group(1)), - param_vars[out_count:]) - - tmp_result = '{}_tmp'.format(fname) - code += ' auto {} = {};\n'.format(tmp_result, fcall) - if num_outputs is None: - code += generate_outfn_result_copy(param_vars[0], tmp_result) - code += generate_exit_debug_code(tree, fname, param_vars[0], params, - param_vars) - code += ' return {};\n'.format(param_vars[0]) - else: - for i in range(0, num_outputs): - code += generate_outfn_result_copy( - param_vars[i], 'std::get<{}>({})'.format(i, tmp_result)) - code += generate_exit_debug_code(tree, fname, param_vars[0:num_outputs], - params, param_vars) - code += ' return {}('.format(get_return_type_str(rwxtree, rwsig)) - for i in range(0, num_outputs): - if i > 0: - code += ', ' - code += param_vars[i] - code += ');\n' - code += '}' - return code - - -def generate_aten_to_mlir(ctx, tree, rwxtree, fname, sig, rwsig, params, - fnopts): - ref_param = get_reference_param(params, fnopts=fnopts) - - code = '{} {{\n'.format(sig) - code += generate_entry_debug_code(tree, fname, params) - the_ref_param = param_name(ref_param) if ref_param else None - tfetcher = TensorFetcher('mlirtens') - param_vars = [] - for p in params: - ptype = param_type(p) - cptype = type_core(ptype) - pname = param_name(p) - if cptype == 'TensorList': - #xname = 'l_{}'.format(pname) - #code += (' auto {} = bridge::XlaCreateTensorList({});\n').format( - # xname, pname) - xname = pname - param_vars.append(xname) - elif cptype == 'TensorOptions': - gcode, xname = rewrite_tensor_options(fname, pname) - code += gcode - param_vars.append(xname) - elif cptype != 'Tensor': - param_vars.append(pname) - elif type_is_const(ptype): - xname = tfetcher.add(pname, is_write_param(fnopts, pname, False)) - param_vars.append(xname) - else: - xname = tfetcher.add(pname, is_write_param(fnopts, pname, True)) - param_vars.append(xname) - if p == ref_param and not get_optional(fnopts, 'ref_param'): - the_ref_param = param_vars[-1] - code += tfetcher.generate_fetches() - result_assign = generate_result_assignment(tree, _RESULT_NAME) - code += ' {}{};\n'.format( - result_assign, get_handling_function(ctx, fname, the_ref_param, - param_vars)) - #code += tfetcher.generate_updates() - if result_assign: - code += (' static_cast({}); // Avoid warnings in case not ' - 'used\n'.format(_RESULT_NAME)) - code += generate_exit_debug_code(tree, fname, - _RESULT_NAME if result_assign else None, - params, param_vars) - code += generate_return_stmt(tree, get_return_type_str(rwxtree, rwsig), fname, - _RESULT_NAME if result_assign else None, params, - param_vars, ref_param, fnopts) - code += '}' - return code - - -def get_mlir_wrapper(fndef, ctx): - tree = _PARSER.parse(fndef.cpp_sig) - xtree = _XPARSER.parse(fndef.cpp_sig) - mapsig = create_map_sig(xtree, fndef.cpp_sig) - rwsig = rewrite_signature(fndef.cpp_sig, _TYPE_NSMAP) - rwxtree = _XPARSER.parse(rwsig) - params = get_parameters(tree) - fnopts = _FUNCTION_OPTIONS.get(mapsig, None) - - def gen_fnname(x): - return 'ATenMLIRTypeDefault::{}'.format(x) - - sig, fname, xfname = get_function_signature(rwxtree, rwsig, gen_fnname) - if not is_blacklisted_fn(fname, mapsig): - ofnopts = get_outfn_options(fname, mapsig) - rfnopts = get_remapfn_options(fname, mapsig) - if ofnopts is not None: - #print ("gen_aten_out:", fname) - code = generate_aten_out(ctx, tree, rwxtree, fname, sig, rwsig, params, - ofnopts) - elif rfnopts is not None: - #print ("gen_aten_remap", fname) - code = generate_aten_remap(ctx, fname, sig, params, rfnopts) - else: - code = generate_aten_to_mlir(ctx, tree, rwxtree, fname, sig, rwsig, - params, fnopts) - else: - code = None - return FuncGen(tree=tree, - xtree=xtree, - rwxtree=rwxtree, - func=fname, - xfunc=xfname, - code=code, - sig=fndef.cpp_sig, - rwsig=rwsig, - cppsig=sig, - mapsig=mapsig, - funsig=create_stdfunc_sig(rwxtree, rwsig), - aten_sig=fndef.aten_sig) - - -def is_tensor_api(fndef): - fndef = fndef.replace('at::', '') - fndef = fndef.replace('c10::Device', 'Device') - m = re.search(r'\bTensor\b', fndef) - return m is not None, fndef - - -def extract_functions(path): - functions = [] - errors = [] - for line in open(path, 'r'): - m = re.match(r'\s*([^\s].*); //\s+(.*)', line) - if not m: - continue - fndef = m.group(1) - try: - _XPARSER.parse(fndef) - functions.append(FuncDef(cpp_sig=fndef, aten_sig=m.group(2))) - except Exception as e: - if is_tensor_api(fndef)[0]: - errors.append((fndef, str(e))) - print('Error parsing "{}": {}'.format(fndef, e), file=sys.stderr) - return functions, errors - - -def get_mapsig_key(mapsig): - # PyTorch generates std::tuple<> without space among the tuple types, - # which would require special understanding in the string rewriter. - # Since we are using this as simple key, we can just string the spaces. - return mapsig.replace(' ', '') - - -def parse_local_overrides(path): - functions = [] - fndef = None - for line in open(path, 'r'): - line = line.strip() - if not fndef: - m = re.match(r'static\s+(.*);', line) - if m: - functions.append(m.group(1)) - continue - m = re.match(r'static\s+(.*)', line) - if m: - fndef = m.group(1) - else: - fndef = '{} {}'.format(fndef, line) - if fndef.endswith(';'): - functions.append(fndef[:-1]) - fndef = None - assert fndef is None - - overrides = {} - for fndef in functions: - # Discard static XLA type functions which are not ATEN. - is_tensor, fndef = is_tensor_api(fndef) - if is_tensor: - xtree = _XPARSER.parse(fndef) - mapsig_key = get_mapsig_key(create_map_sig(xtree, fndef)) - overrides[mapsig_key] = fndef - return overrides - - -def get_dialect_name(func): - name = '' - upper = True - cs = list(func) - for c in cs: - if c == '_': - upper = True - elif upper: - name += str(c).upper() - upper = False - else: - name += c - if cs[-1] == "_": - name += "Under" - return name - - -def generate_td_functions(fgens, overrides): - code = '' - overridden = set() - - code += "#ifdef ATEN_OP_DEFS\n" - code += "#else\n" - code += "#define ATEN_OP_DEFS\n\n" - - for fgen in fgens: - mapsig_key = get_mapsig_key(fgen.mapsig) - if mapsig_key in overrides: - overridden.add(mapsig_key) - if fgen.func in _TD_BLACKLIST: - continue - - rtype = fgen.tree.children[0] - num_outputs = 1 - if type_core(rtype) == 'std::tuple': - num_outputs = len(tuple_type_list(rtype)) - #print(num_outputs, rtype) - - dialect_name = get_dialect_name(fgen.func) - #print ('"{}"'.format(dialect_name)) - code += 'def aten_{}Op: aten_Op<"{}"'.format(dialect_name, fgen.func) - code += ', [NoSideEffect' - if not fgen.func in _TD_NO_OPSTATS_LIST: - code += ', StatisticsOpInterface' - code += ']>,\n' - code += ' Results<(outs' - # foreach output - # rparams = get_rparameters(fgen.tree) - # for p in rparams: - # pname = param_name(p) - # ptype = param_type(p) - # cptype = type_core(ptype) - # print(pname) - code += ' AnyTensor' - for i in range(num_outputs - 1): - code += ', AnyTensor' - code += ')> {\n' - code += ' let arguments = (\n' - params = get_parameters(fgen.tree) - for p in params: - pname = param_name(p) - ptype = param_type(p) - cptype = type_core(ptype) - if (cptype == 'Tensor'): - td_type = "AnyTensor" - elif (cptype == 'Scalar' or cptype == 'int64_t' or cptype == 'double' or - cptype == 'bool'): - td_type = "AnyScalar" - elif (cptype == 'c10::optional' or cptype == 'std::array'): - continue - elif (cptype == 'IntArrayRef'): - td_type = "AnyType" - else: - print('unhandled type', cptype) - td_type = "AnyType" - if p == params[0]: - code += ' ins {}:${}'.format(td_type, pname) - else: - code += ',\n {}:${}'.format(td_type, pname) - code += '\n );\n' - code += ' let summary = "aten {} operator";\n'.format(fgen.func) - code += ' let description = [{\n' - code += ' {}Op\n'.format(dialect_name) - code += ' aten {} operator\n'.format(fgen.func) - code += ' }];\n' - if not fgen.func in _TD_NO_OPSTATS_LIST: - code += ' let extraClassDeclaration = [{\n' - code += ' std::map getStatistics();\n' - code += ' }];\n' - code += '}\n\n' - - code += "#endif\n" - return code, overridden - - -def generate_registrations(fgens, overrides): - code = 'void RegisterAtenTypeFunctions() {\n' - code += ' static auto dispatch = torch::RegisterOperators()\n' - overridden = set() - for fgen in fgens: - mapsig_key = get_mapsig_key(fgen.mapsig) - if mapsig_key in overrides: - override_fn = 'ATenMLIRType::{}'.format(fgen.func) - overridden.add(mapsig_key) - else: - override_fn = fgen.xfunc if fgen.code else None - if override_fn: - code += ( - ' .op(torch::RegisterOperators::options().schema("{}")\n ' - '.impl_unboxedOnlyKernel<{}, &{}>(at::TensorTypeId::XLATensorId)\n' - ' .aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))\n'.format( - fgen.aten_sig, fgen.funsig, override_fn, override_fn, - fgen.aten_sig)) - return code + ';\n}\n', overridden - - -def generate_functions(fgens): - code = '' - for fgen in fgens: - if fgen.code: - code += '{}\n\n'.format(fgen.code) - return code - - -def generate_class_functions(fgens): - code = '' - for fgen in fgens: - if fgen.code: - code += ' static {};\n'.format(fgen.rwsig) - return code - - -def gen_output_file(args, name): - if not args.output_folder: - return sys.stdout - return open(os.path.join(args.output_folder, name), 'w') - - -def gen_h_output_file(args): - return gen_output_file(args, 'aten_mlir_type_default.h') - - -def gen_cpp_output_file(args): - return gen_output_file(args, 'aten_mlir_type_default.cpp') - - -def gen_td_output_file(args): - return gen_output_file(args, 'ATenOps.td') - - -def check_overrides(availagle_fgens, overrides, overridden): - misses = 0 - for mapsig, cpp_sig in overrides.items(): - mapsig_key = get_mapsig_key(mapsig) - if not mapsig_key in overridden: - misses += 1 - print('ERROR: ATenMLIRType function missed override:\n' - ' CPPSIG: {}\n' - ' MAPSIG: {}\n' - ' KEY : {}\n'.format(cpp_sig, mapsig, mapsig_key), - file=sys.stderr) - if misses != 0: - print('Some required overrides were missing (see above).') - print('Available overrides:') - for fgen in availagle_fgens: - print(' ', get_mapsig_key(fgen.mapsig)) - - return misses == 0 - - -def generate(args): - fndefs, errors = extract_functions(args.typedef) - print('Extracted {} functions ({} errors) from {}'.format( - len(fndefs), len(errors), args.typedef), - file=sys.stderr) - assert len(errors) == 0 - - local_overrides = parse_local_overrides(args.overridetype) - print('{} function overrides in {}'.format(len(local_overrides), - args.overridetype), - file=sys.stderr) - - fgens = [] - ctx = Context(args.functions) - for ts in fndefs: - try: - fgen = get_mlir_wrapper(ts, ctx) - if fgen: - fgens.append(fgen) - except Exception as e: - print('Failed to generate wrapper for {}: {}'.format(ts, e), - file=sys.stderr) - print('Generated {} wrappers for {}'.format(len(fgens), args.typedef), - file=sys.stderr) - - functions = generate_functions(fgens) - hfunctions = generate_class_functions(fgens) - - tdfunctions, overridden = generate_td_functions(fgens, local_overrides) - assert check_overrides( - fgens, - local_overrides, - overridden), ('Missing overrides when generating td functions') - - #print(tdfunctions) - - regs, overridden = generate_registrations(fgens, local_overrides) - #print (len(overrides), len(overridden)) - assert check_overrides( - fgens, - local_overrides, - overridden), ('Missing local overrides when generating registrations') - - # Create output files ... - print(_H_HEADER.format(gen=os.path.basename(sys.argv[0]), hfuncs=hfunctions), - file=gen_h_output_file(args)) - print(_CPP_HEADER.format(gen=os.path.basename(sys.argv[0]), - funcs=functions, - regs=regs), - file=gen_cpp_output_file(args)) - - with gen_td_output_file(args) as f: - f.write(tdfunctions) - - -if __name__ == '__main__': - arg_parser = argparse.ArgumentParser() - arg_parser.add_argument('--output_folder', type=str) - arg_parser.add_argument('overridetype', - type=str, - metavar='OVERRIDE_TYPE_FILE', - help='The path to the overrides file') - arg_parser.add_argument('typedef', - type=str, - metavar='TYPE_DEFAULT_FILE', - help='The path to the TypeDefault.h file') - arg_parser.add_argument('functions', - type=str, - metavar='FUNCTIONS_FILE', - help='The path to the Functions.h file') - args, files = arg_parser.parse_known_args() - generate(args) diff --git a/frontends/pytorch/utils/pt_util.py b/frontends/pytorch/utils/pt_util.py deleted file mode 100644 index 65a37c041..000000000 --- a/frontends/pytorch/utils/pt_util.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 -""" -Utility for handling common tasks for exported `.pt` model files. - -Usage: - # Dump PyTorch data structures for .pt file. - # This does not involve any MLIR code. - $ pt_util.py --dump model.pt - - # Import the .pt file into MLIR. - $ pt_util.py --import model.pt -""" - -import torch -import torch_mlir - -import argparse - - -def main(): - parser = argparse.ArgumentParser( - description="Utility for .pt files") - parser.add_argument("pt_file", metavar="PT_FILE", type=str, - help="the .pt file to import") - parser.add_argument("--dump", action="store_true", - help="dump the pytorch module") - parser.add_argument("--import", action="store_true", - help="import the pytorch module") - parser.add_argument("--exported-name", action="append", - help=""" -Name to export, such as `my.submodule.forward`(default = export all). -Can pass repeatedly. -""") - args = parser.parse_args() - # TODO: Investigate why "cpu" is needed. - module = torch.jit.load(args.pt_file, map_location="cpu") - - if args.dump: - module._c.dump(code=True, attrs=False, params=False) - - # `import` is a Python keyword, so getattr is needed. - if getattr(args, "import", False): - class_annotator = torch_mlir.ClassAnnotator() - if args.exported_name is not None: - class_annotator.exportNone(module._c._type()) - for name in args.exported_name: - class_annotator.exportPath(module._c._type(), name.split(".")) - mb = torch_mlir.ModuleBuilder() - mb.import_module(module._c, class_annotator) - mb.module.operation.print(large_elements_limit=16) - - -if __name__ == "__main__": - main() diff --git a/include/npcomp-c/BasicpyTypes.h b/include/npcomp-c/BasicpyTypes.h deleted file mode 100644 index 037459b05..000000000 --- a/include/npcomp-c/BasicpyTypes.h +++ /dev/null @@ -1,92 +0,0 @@ -//===-- npcomp-c/BasicpyTypes.h - C API for basicpy types ---------*- C -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM -// Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_C_BASICPYTYPES_H -#define NPCOMP_C_BASICPYTYPES_H - -#include "mlir-c/IR.h" -#include "mlir-c/Support.h" - -#ifdef __cplusplus -extern "C" { -#endif - -//===----------------------------------------------------------------------===// -// !basicpy.BoolType -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is the Python "bool" type. -MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyBool(MlirType t); - -/// Gets the Python "bool" type. -MLIR_CAPI_EXPORTED MlirType npcompBasicpyBoolTypeGet(MlirContext context); - -//===----------------------------------------------------------------------===// -// !basicpy.BytesType -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is the Python "bytes" type. -MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyBytes(MlirType t); - -/// Gets the Python "bytes" type. -MLIR_CAPI_EXPORTED MlirType npcompBasicpyBytesTypeGet(MlirContext context); - -//===----------------------------------------------------------------------===// -// !basicpy.DictType -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is the Python "dict" type. -MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyDict(MlirType t); - -/// Gets the generic Python "dict" type. -MLIR_CAPI_EXPORTED MlirType npcompBasicpyDictTypeGet(MlirContext context); - -//===----------------------------------------------------------------------===// -// List type -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is the Python "list" type. -MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyList(MlirType t); - -/// Gets the generic Python "list" type. -MLIR_CAPI_EXPORTED MlirType npcompBasicpyListTypeGet(MlirContext context); - -//===----------------------------------------------------------------------===// -// !basicpy.NoneType type. -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is a `!basicpy.NoneType`. -MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyNone(MlirType t); - -/// Gets the `!basicpy.NoneType` type. -MLIR_CAPI_EXPORTED MlirType npcompBasicpyNoneTypeGet(MlirContext context); - -//===----------------------------------------------------------------------===// -// SlotObject type. -//===----------------------------------------------------------------------===// - -MLIR_CAPI_EXPORTED MlirType npcompBasicPySlotObjectTypeGet( - MlirContext context, MlirStringRef className, intptr_t slotTypeCount, - const MlirType *slotTypes); - -//===----------------------------------------------------------------------===// -// !basicpy.TupleType -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is a `!basicpy.TupleType`. -MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyTuple(MlirType t); - -/// Gets the generic Python "tuple" type. -MLIR_CAPI_EXPORTED MlirType npcompBasicpyTupleTypeGet(MlirContext context); - -#ifdef __cplusplus -} -#endif - -#endif // NPCOMP_C_BASICPYTYPES_H diff --git a/include/npcomp-c/NumpyTypes.h b/include/npcomp-c/NumpyTypes.h deleted file mode 100644 index 7355d5849..000000000 --- a/include/npcomp-c/NumpyTypes.h +++ /dev/null @@ -1,59 +0,0 @@ -//===-- npcomp-c/NumpyTypes.h - C API for numpy types -------------*- C -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM -// Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_C_NUMPYTYPES_H -#define NPCOMP_C_NUMPYTYPES_H - -#include "mlir-c/IR.h" -#include "mlir-c/Support.h" - -#ifdef __cplusplus -extern "C" { -#endif - -//===----------------------------------------------------------------------===// -// !numpy.any_dtype type. -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is the special "any dtype" type that is used -// to signal an NDArray or tensor of unknown type. -MLIR_CAPI_EXPORTED bool npcompTypeIsANumpyAnyDtype(MlirType t); - -/// Gets the "any dtype" type. -MLIR_CAPI_EXPORTED MlirType npcompAnyDtypeTypeGet(MlirContext context); - -//===----------------------------------------------------------------------===// -// NDArray type. -//===----------------------------------------------------------------------===// - -/// Checks whether the given type is an NdArray type. -MLIR_CAPI_EXPORTED bool npcompTypeIsANumpyNdArray(MlirType t); - -/// Gets a numpy.NdArray type that is unranked. -MLIR_CAPI_EXPORTED MlirType -npcompNumpyNdArrayTypeGetUnranked(MlirType elementType); - -/// Gets a numpy.NdArray type that is ranked. Any dimensions that are -1 are -/// unknown. -MLIR_CAPI_EXPORTED MlirType npcompNumpyNdArrayTypeGetRanked( - intptr_t rank, const int64_t *shape, MlirType elementType); - -/// Helper that gets an equivalent NdArrayType from a ShapedType. -MLIR_CAPI_EXPORTED MlirType -npcompNumpyNdArrayTypeGetFromShaped(MlirType shapedType); - -/// Helper that converts an NdArrayType to a TensorType. -MLIR_CAPI_EXPORTED MlirType -npcompNumpyNdArrayTypeToTensor(MlirType ndarrayType); - -#ifdef __cplusplus -} -#endif - -#endif // NPCOMP_C_NUMPYTYPES_H diff --git a/include/npcomp/CMakeLists.txt b/include/npcomp/CMakeLists.txt index ef380b37f..d3c615448 100644 --- a/include/npcomp/CMakeLists.txt +++ b/include/npcomp/CMakeLists.txt @@ -2,4 +2,3 @@ add_subdirectory(Backend) add_subdirectory(Conversion) add_subdirectory(Dialect) add_subdirectory(RefBackend) -add_subdirectory(Typing) diff --git a/include/npcomp/Conversion/BasicpyToStd/Passes.h b/include/npcomp/Conversion/BasicpyToStd/Passes.h deleted file mode 100644 index 76bdf1201..000000000 --- a/include/npcomp/Conversion/BasicpyToStd/Passes.h +++ /dev/null @@ -1,21 +0,0 @@ -//===------------------------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LaLVM -// Exceptions. See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_CONVERSION_BASICPYTOSTD_PASSES_H -#define NPCOMP_CONVERSION_BASICPYTOSTD_PASSES_H - -#include "mlir/Pass/Pass.h" -#include - -namespace mlir { -namespace NPCOMP { -std::unique_ptr> createConvertBasicpyToStdPass(); -} -} // namespace mlir - -#endif // NPCOMP_CONVERSION_BASICPYTOSTD_PASSES_H diff --git a/include/npcomp/Conversion/BasicpyToStd/Patterns.h b/include/npcomp/Conversion/BasicpyToStd/Patterns.h deleted file mode 100644 index ec9bcfc7d..000000000 --- a/include/npcomp/Conversion/BasicpyToStd/Patterns.h +++ /dev/null @@ -1,27 +0,0 @@ -//===------------------------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_CONVERSION_BASICPYTOSTD_PATTERNS_H -#define NPCOMP_CONVERSION_BASICPYTOSTD_PATTERNS_H - -#include "mlir/IR/PatternMatch.h" -#include - -namespace mlir { -namespace NPCOMP { - -//===----------------------------------------------------------------------===// -// Conversion patterns -//===----------------------------------------------------------------------===// - -void populateBasicpyToStdPrimitiveOpPatterns(RewritePatternSet &patterns); - -} // namespace NPCOMP -} // namespace mlir - -#endif // NPCOMP_CONVERSION_BASICPYTOSTD_PATTERNS_H diff --git a/include/npcomp/Conversion/Passes.td b/include/npcomp/Conversion/Passes.td index f56a29171..8711c0ebd 100644 --- a/include/npcomp/Conversion/Passes.td +++ b/include/npcomp/Conversion/Passes.td @@ -112,13 +112,4 @@ def ConvertTorchToIREE : Pass<"convert-torch-to-iree", "FuncOp"> { let constructor = "mlir::NPCOMP::createConvertTorchToIREEPass()"; } -//===----------------------------------------------------------------------===// -// Basicpy conversions -//===----------------------------------------------------------------------===// - -def ConvertBasicpyToStd : Pass<"convert-basicpy-to-std", "FuncOp"> { - let summary = "Convert representable Basicpy ops to std"; - let constructor = "mlir::NPCOMP::createConvertBasicpyToStdPass()"; -} - #endif // NPCOMP_CONVERSION_PASSES diff --git a/include/npcomp/Dialect/Basicpy/CMakeLists.txt b/include/npcomp/Dialect/Basicpy/CMakeLists.txt deleted file mode 100644 index 9f57627c3..000000000 --- a/include/npcomp/Dialect/Basicpy/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(IR) -add_subdirectory(Transforms) diff --git a/include/npcomp/Dialect/Basicpy/IR/BasicpyDialect.h b/include/npcomp/Dialect/Basicpy/IR/BasicpyDialect.h deleted file mode 100644 index bf11fafed..000000000 --- a/include/npcomp/Dialect/Basicpy/IR/BasicpyDialect.h +++ /dev/null @@ -1,114 +0,0 @@ -//===- BasicPyDialect.h - Basic Python --------------------------*- C++ -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_BASICPY_IR_BASICPY_DIALECT_H -#define NPCOMP_DIALECT_BASICPY_IR_BASICPY_DIALECT_H - -#include "mlir/IR/Attributes.h" -#include "mlir/IR/Dialect.h" -#include "mlir/IR/Types.h" -#include "npcomp/Typing/Analysis/CPA/Interfaces.h" - -namespace mlir { -namespace NPCOMP { -namespace Basicpy { - -namespace detail { -struct SlotObjectTypeStorage; -} // namespace detail - -/// Python 'bool' type (can contain values True or False, corresponding to -/// i1 constants of 0 or 1). -class BoolType : public Type::TypeBase { -public: - using Base::Base; - static BoolType get(MLIRContext *context) { return Base::get(context); } -}; - -/// The type of the Python `bytes` values. -class BytesType : public Type::TypeBase { -public: - using Base::Base; - static BytesType get(MLIRContext *context) { return Base::get(context); } -}; - -/// Python 'dict' type. -class DictType : public Type::TypeBase { -public: - using Base::Base; - static DictType get(MLIRContext *context) { return Base::get(context); } -}; - -/// The type of the Python `Ellipsis` value. -class EllipsisType : public Type::TypeBase { -public: - using Base::Base; - static EllipsisType get(MLIRContext *context) { return Base::get(context); } -}; - -/// Python 'list' type. -class ListType : public Type::TypeBase { -public: - using Base::Base; - static ListType get(MLIRContext *context) { return Base::get(context); } -}; - -/// The type of the Python `None` value. -class NoneType : public Type::TypeBase { -public: - using Base::Base; - static NoneType get(MLIRContext *context) { return Base::get(context); } -}; - -class SlotObjectType : public Type::TypeBase { -public: - using Base::Base; - static SlotObjectType get(StringAttr className, ArrayRef slotTypes); - StringAttr getClassName(); - unsigned getSlotCount(); - ArrayRef getSlotTypes(); - - // Shorthand to check whether the SlotObject is of a given className and - // arity. - bool isOfClassArity(StringRef className, unsigned arity) { - return getClassName().getValue() == className && getSlotCount() == arity; - } -}; - -/// The type of the Python `str` values. -class StrType : public Type::TypeBase { -public: - using Base::Base; - static StrType get(MLIRContext *context) { return Base::get(context); } -}; - -/// Python 'tuple' type. -class TupleType : public Type::TypeBase { -public: - using Base::Base; - static TupleType get(MLIRContext *context) { return Base::get(context); } -}; - -/// An unknown type that could be any supported python type. -class UnknownType : public Type::TypeBase { -public: - using Base::Base; - static UnknownType get(MLIRContext *context) { return Base::get(context); } - - Typing::CPA::TypeNode *mapToCPAType(Typing::CPA::Context &context); -}; - -} // namespace Basicpy -} // namespace NPCOMP -} // namespace mlir - -#include "npcomp/Dialect/Basicpy/IR/BasicpyOpsDialect.h.inc" - -#endif // NPCOMP_DIALECT_BASICPY_IR_BASICPY_DIALECT_H diff --git a/include/npcomp/Dialect/Basicpy/IR/BasicpyDialect.td b/include/npcomp/Dialect/Basicpy/IR/BasicpyDialect.td deleted file mode 100644 index bc3a0340b..000000000 --- a/include/npcomp/Dialect/Basicpy/IR/BasicpyDialect.td +++ /dev/null @@ -1,166 +0,0 @@ -//===- BasicPyDialect.td - Basic python dialect ------------*- tablegen -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_BASICPY_IR_BASICPY_DIALECT -#define NPCOMP_DIALECT_BASICPY_IR_BASICPY_DIALECT - -include "mlir/IR/OpBase.td" - -//===----------------------------------------------------------------------===// -// Dialect definition -//===----------------------------------------------------------------------===// - -def Basicpy_Dialect : Dialect { - let name = "basicpy"; - let summary = "Basic Python dialect"; - let description = [{ - Core types and ops - }]; - let cppNamespace = "::mlir::NPCOMP::Basicpy"; - let hasConstantMaterializer = 1; -} - -//===----------------------------------------------------------------------===// -// Op templates -//===----------------------------------------------------------------------===// - -class Basicpy_Op traits = []> : - Op { - let parser = [{ return ::parse$cppClass(parser, &result); }]; - let printer = [{ return ::print(p, *this); }]; - let verifier = [{ return ::verify(*this); }]; -} - -//===----------------------------------------------------------------------===// -// Dialect types -//===----------------------------------------------------------------------===// - -def Basicpy_BoolType : DialectType()">, "Bool type">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::Basicpy::BoolType>()"> { - let description = [{ - Type for 'True' and 'False' values. - }]; -} - -def Basicpy_BytesType : DialectType()">, "Bytes type">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::Basicpy::BytesType>()"> { - let description = [{ - Represents Python 'bytes' values. - }]; -} - -def Basicpy_EllipsisType : DialectType()">, "Ellipsis type">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::Basicpy::EllipsisType>()"> { - let description = [{ - Type of the Python 'Ellipsis' value. - }]; -} - -def Basicpy_NoneType : DialectType()">, "None type">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::Basicpy::NoneType>()"> { - let description = [{ - Type of the Python 'None' value. - }]; -} - -def Basicpy_SlotObjectType : DialectType()">, - "Slot object"> { - let description = [{ - Type for built-in objects which have a fixed number of slots and a type - name in the system catalog of types. In some ways, this resembles a - namedtuple, but it is used for built-in custom objects. - }]; -} - -def Basicpy_StrType : DialectType()">,"String type">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::Basicpy::StrType>()"> { - let description = [{ - Represents values of the python 'str' type. - }]; -} - -def Basicpy_UnknownType : DialectType()">, - "Unknown type"> { - let description = [{ - An unknown type (for the current phase of processing). - }]; -} - -def Basicpy_ListType : DialectType()">, - "List type"> { - let description = [{ - A Python list type. In the non-parameterized case, there are limited - constraints on the element type or length; however, it can be refined to - include such constraints. - - As in Python, this list type represents a mutable, reference counted - object in a corresponding runtime layer. - }]; -} - -def Basicpy_TupleType : DialectType()">, - "Tuple type"> { - let description = [{ - A Python tuple type. In the non-parameterized case, there are limited - constraints on the element type or length; however, it can be refined to - include such constraints. - - As in Python, post-construction tuple's are immutable, reference counted - objects in a corresponding runtime layer. However, since they are - immutable, they can also serve as value-typed entities if their elements - are immutable. - }]; -} - -def Basicpy_DictType : DialectType()">, - "Dict type"> { - let description = [{ - A Python dict type. In the non-parameterized case, there are limited - constraints on the key or value types; however, it can be refined to - include such constraints. - - As in Python, this list type represents a mutable, reference counted - object in a corresponding runtime layer. - }]; -} - -//===----------------------------------------------------------------------===// -// Type/attribute predicates -//===----------------------------------------------------------------------===// - -def Basicpy_SingletonType : AnyTypeOf<[ - Basicpy_NoneType, - Basicpy_EllipsisType -]>; - -// A predicate to determine whether a Type is a SlotObject of a given -// className and arity. Does no checking of slot types. -class Basicpy_SlotObjectOfClassArity : - And<[ - Basicpy_SlotObjectType.predicate, - CPred< - "$_self.cast<::mlir::NPCOMP::Basicpy::SlotObjectType>().isOfClassArity(\"" - # className # "\", " # arity # ")"> - ]>; - -// Type representing a 'slice' object, which mirrors the Python built-in -// slice class. -def Basicpy_SliceSlotObjectType : - Type>; - -#endif // NPCOMP_DIALECT_BASICPY_IR_BASICPY_DIALECT diff --git a/include/npcomp/Dialect/Basicpy/IR/BasicpyOps.h b/include/npcomp/Dialect/Basicpy/IR/BasicpyOps.h deleted file mode 100644 index 4bfaf90cd..000000000 --- a/include/npcomp/Dialect/Basicpy/IR/BasicpyOps.h +++ /dev/null @@ -1,29 +0,0 @@ -//===- BasicPyOps.h - Basic python ops --------------------------*- C++ -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_BASICPY_IR_BASICPY_OPS_H -#define NPCOMP_DIALECT_BASICPY_IR_BASICPY_OPS_H - -#include "mlir/IR/Attributes.h" -#include "mlir/IR/Builders.h" -#include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/Dialect.h" -#include "mlir/IR/FunctionSupport.h" -#include "mlir/IR/OpDefinition.h" -#include "mlir/IR/OpImplementation.h" -#include "mlir/IR/SymbolTable.h" -#include "mlir/Interfaces/CallInterfaces.h" -#include "mlir/Interfaces/ControlFlowInterfaces.h" -#include "mlir/Interfaces/SideEffectInterfaces.h" - -#include "npcomp/Dialect/Basicpy/IR/BasicpyOpsEnums.h.inc" - -#define GET_OP_CLASSES -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h.inc" - -#endif // NPCOMP_DIALECT_BASICPY_IR_BASICPY_OPS_H diff --git a/include/npcomp/Dialect/Basicpy/IR/BasicpyOps.td b/include/npcomp/Dialect/Basicpy/IR/BasicpyOps.td deleted file mode 100644 index 67ea2bccd..000000000 --- a/include/npcomp/Dialect/Basicpy/IR/BasicpyOps.td +++ /dev/null @@ -1,545 +0,0 @@ -//===- BasicpyOps.td - Basic Python ops --------------------*- tablegen -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_BASICPY_IR_BASICPY_OPS -#define NPCOMP_DIALECT_BASICPY_IR_BASICPY_OPS - -include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.td" -include "mlir/Interfaces/CallInterfaces.td" -include "mlir/Interfaces/ControlFlowInterfaces.td" -include "mlir/Interfaces/SideEffectInterfaces.td" -include "mlir/IR/OpAsmInterface.td" -include "mlir/IR/SymbolInterfaces.td" - -//===----------------------------------------------------------------------===// -// Predicates -//===----------------------------------------------------------------------===// - -def BoolOrI1Type : AnyTypeOf<[Basicpy_BoolType, I1], "Python bool or i1">; - -//===----------------------------------------------------------------------===// -// Binary operation enum -// The name matches the operation name in the python AST ("Add", "Mult", etc). -//===----------------------------------------------------------------------===// - -def BINOP_ADD : StrEnumAttrCase<"Add">; -def BINOP_BITAND : StrEnumAttrCase<"BitAnd">; -def BINOP_BITOR : StrEnumAttrCase<"BitOr">; -def BINOP_BITXOR : StrEnumAttrCase<"BitXor">; -def BINOP_DIV : StrEnumAttrCase<"Div">; -def BINOP_FLOORDIV : StrEnumAttrCase<"FloorDiv">; -def BINOP_LSHIFT : StrEnumAttrCase<"LShift">; -def BINOP_MATMULT : StrEnumAttrCase<"MatMult">; -def BINOP_MOD : StrEnumAttrCase<"Mod">; -def BINOP_MULT : StrEnumAttrCase<"Mult">; -def BINOP_RSHIFT : StrEnumAttrCase<"RShift">; -def BINOP_SUB : StrEnumAttrCase<"Sub">; - -def BinaryOperationAttr : StrEnumAttr< - "BinaryOperation", "Operation for a binary expression", [ - BINOP_ADD, - BINOP_BITAND, - BINOP_BITOR, - BINOP_BITXOR, - BINOP_DIV, - BINOP_FLOORDIV, - BINOP_LSHIFT, - BINOP_MATMULT, - BINOP_MOD, - BINOP_MULT, - BINOP_RSHIFT, - BINOP_SUB, - ]> { - let cppNamespace = "::mlir::NPCOMP::Basicpy"; -} - -//===----------------------------------------------------------------------===// -// Comparison operation enum -// The name matches the operation name in the python AST ("Lt", "Gt", etc). -//===----------------------------------------------------------------------===// - -def CMPOP_EQ : StrEnumAttrCase<"Eq">; -def CMPOP_GT : StrEnumAttrCase<"Gt">; -def CMPOP_GTE : StrEnumAttrCase<"GtE">; -def CMPOP_IN : StrEnumAttrCase<"In">; -def CMPOP_IS : StrEnumAttrCase<"Is">; -def CMPOP_ISNOT : StrEnumAttrCase<"IsNot">; -def CMPOP_LT : StrEnumAttrCase<"Lt">; -def CMPOP_LTE : StrEnumAttrCase<"LtE">; -def CMPOP_NEQ : StrEnumAttrCase<"NotEq">; -def CMPOP_NOTIN : StrEnumAttrCase<"NotIn">; - -def CompareOperationAttr : StrEnumAttr< - "CompareOperation", "Comparison operator", [ - CMPOP_EQ, - CMPOP_GT, - CMPOP_GTE, - CMPOP_IN, - CMPOP_IS, - CMPOP_ISNOT, - CMPOP_LT, - CMPOP_LTE, - CMPOP_NEQ, - CMPOP_NOTIN, - ]> { - let cppNamespace = "::mlir::NPCOMP::Basicpy"; -} - -//===----------------------------------------------------------------------===// -// Constant and constructor operations -//===----------------------------------------------------------------------===// - -def Basicpy_NumericConstantOp : Basicpy_Op<"numeric_constant", [ - ConstantLike, NoSideEffect, - DeclareOpInterfaceMethods]> { - let summary = "A constant from the Python3 numeric type hierarchy"; - let description = [{ - Basicpy re-uses core MLIR types to represent the Python3 numeric type - hierarchy with the following mappings: - - * Python3 `int` : In python, this type is signed, arbitrary precision but - in typical realizations, it maps to an MLIR `IntegerType` of a fixed - bit-width (typically si64 if no further information is known). In the - future, there may be a real `Basicpy::IntType` that retains the true - arbitrary precision nature, but this is deemed an enhancement that - does not obviate the need to infer physical, sized types for many - real-world cases. As such, the Basicpy numeric type hierarchy will - always include physical `IntegerType`, if only to enable progressive - lowering and interop with cases where the precise type is known. - * Python3 `float` : This is allowed to map to any legal floating point - type on the physical machine and is usually represented as a double (f64). - In MLIR, any `FloatType` is allowed, which facilitates progressive - lowering and interop with cases where a more precise type is known. - * Python3 `complex` : Maps to an MLIR `ComplexType` with a `FloatType` - elementType (note: in Python, complex numbers are always defined with - floating point components). - * `bool` : See `bool_constant` for a constant (i1) -> !basicpy.BoolType - constant. This constant op is not used for representing such bool - values, even though from the Python perspective, bool is part of the - numeric hierarchy (the distinction is really only necessary during - promotion). - - ### Integer Signedness - - All `int` values in Python are signed. However, there exist special cases - where libraries (i.e. struct packing and numpy arrays) interoperate with - unsigned values. As such, when mapping to MLIR, Python integer types - are represented as either signed or unsigned `IntegerType` types and can - be lowered to signless integers as appropriate (typically during realization - of arithmetic expressions where the choice is meaningful). Since it is not - known at the outset when in lowering this information is safe to discard - this `numeric_constant` op accepts any signedness. - }]; - - let arguments = (ins AnyAttr:$value); - let results = (outs AnyType); - let hasFolder = 1; -} - -def Basicpy_BoolConstantOp : Basicpy_Op<"bool_constant", [ - ConstantLike, NoSideEffect, - DeclareOpInterfaceMethods]> { - let summary = "A boolean constant"; - let description = [{ - A constant of type !basicpy.BoolType that can take either an i1 value - of 0 (False) or 1 (True). - - Note that as in Python a BoolType can be thought of as an object, whereas - the corresponding i1 is a numeric type suitable for use in contexts where - storage format matters (or for interop with lower level dialects). - }]; - let arguments = (ins I1Attr:$value); - let results = (outs - Basicpy_BoolType:$result - ); - let assemblyFormat = "$value attr-dict"; - let hasFolder = 1; -} - -// TODO: Implement ConstantLike op trait. -def Basicpy_BuildDictOp : Basicpy_Op<"build_dict", [NoSideEffect]> { - let summary = "Builds an empty dict"; - let description = [{ - This op mirrors the CPython BUILD_MAP op (note naming difference). - - Note that as with CPython, this op only builds an empty dict; however, - it is reserved in the future for it to take variadic operands to construct - with a list of key/value pairs. - }]; - let arguments = (ins - ); - let results = (outs - Basicpy_DictType:$result - ); - let assemblyFormat = "attr-dict `:` functional-type(operands, results)"; -} - -// TODO: Implement ConstantLike op trait. -def Basicpy_BuildListOp : Basicpy_Op<"build_list", [NoSideEffect]> { - let summary = "Builds a list from operands"; - let description = [{ - Constructs a new list object from its operands. - - TODO: Any allowable type can be expressed in lists; however, this should be - revisited once more of the dialect infrastructure is in place and tightened - up accordingly. At that time, appropriate constraints should be added that - both allow correct program representation and support transformations to - lower levels (i.e. allowing a wider set of types as useful for conversions). - }]; - let arguments = (ins - Variadic:$elements - ); - let results = (outs - Basicpy_ListType:$result - ); - let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; -} - -// TODO: Implement ConstantLike op trait. -def Basicpy_BuildTupleOp : Basicpy_Op<"build_tuple", [NoSideEffect]> { - let summary = "Builds a tuple from operands"; - let description = [{ - Constructs a new tuple object from its operands. - - TODO: Any allowable type can be expressed in lists; however, this should be - revisited once more of the dialect infrastructure is in place and tightened - up accordingly. At that time, appropriate constraints should be added that - both allow correct program representation and support transformations to - lower levels (i.e. allowing a wider set of types as useful for conversions). - }]; - let arguments = (ins - Variadic:$elements - ); - let results = (outs - Basicpy_TupleType:$result - ); - let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; -} - -def Basicpy_BytesConstantOp : Basicpy_Op<"bytes_constant", [ - ConstantLike, NoSideEffect, - DeclareOpInterfaceMethods]> { - let summary = "Constant bytes value"; - let description = [{ - A bytes value of BytesType. The value is represented by a StringAttr. - }]; - let arguments = (ins - StrAttr:$value - ); - let results = (outs - Basicpy_BytesType:$result - ); - let assemblyFormat = "$value attr-dict"; - let hasFolder = 1; -} - -def Basicpy_SingletonOp : Basicpy_Op<"singleton", [ - ConstantLike, NoSideEffect]> { - let summary = "Constant value for a singleton type"; - let description = [{ - Some types only have a single possible value, represented by the - SingletonAttr. This op allows creating constants of these types. - }]; - let arguments = (ins); - let results = (outs - Basicpy_SingletonType:$result - ); - let assemblyFormat = "attr-dict `:` type($result)"; - let hasFolder = 1; -} - -def Basicpy_StrConstantOp : Basicpy_Op<"str_constant", [ - ConstantLike, NoSideEffect, - DeclareOpInterfaceMethods]> { - let summary = "Constant string value"; - let description = [{ - A string value of StrType. The value is represented by a StringAttr - that is UTF-8 encoded. - }]; - let arguments = (ins - StrAttr:$value - ); - let results = (outs - Basicpy_StrType:$result - ); - let assemblyFormat = "$value attr-dict"; - let hasFolder = 1; -} - -//===----------------------------------------------------------------------===// -// Casting and coercion operations -//===----------------------------------------------------------------------===// - -def Basicpy_AsI1Op : Basicpy_Op<"as_i1", - [NoSideEffect]> { - let summary = "Evaluates an input to an i1 predicate value"; - let description = [{ - Applies the rules for interpreting a type as a boolean, returning an i1 - indicating the truthiness of the operand. Since the output of this op - is intended to drive lower-level control flow, the i1 type is used (not - the user level BoolType). - }]; - let arguments = (ins AnyType:$operand); - let results = (outs I1:$result); - let assemblyFormat = "$operand attr-dict `:` type($operand)"; -} - -def Basicpy_BoolCastOp : Basicpy_Op<"bool_cast", [NoSideEffect]> { - let summary = "Casts between BoolType and i1 (predicate value)"; - let description = [{ - When interfacing with lower level dialect or progressively lowering - the Python BoolType away, it is often necessary to cast between it and - i1, which is used to represent bool-ness at lower levels. - }]; - let arguments = (ins BoolOrI1Type:$operand); - let results = (outs BoolOrI1Type:$result); - let assemblyFormat = "$operand attr-dict `:` type(operands) `->` type(results)"; - let hasFolder = 1; -} - -def Basicpy_UnknownCastOp : Basicpy_Op<"unknown_cast", [NoSideEffect]> { - let summary = "Casts to and from the UnknownType"; - let arguments = (ins AnyType:$operand); - let results = (outs AnyType:$result); - let assemblyFormat = "operands attr-dict `:` type(operands) `->` type(results)"; - - let hasCanonicalizer = 1; -} - -//===----------------------------------------------------------------------===// -// Operations -//===----------------------------------------------------------------------===// - -def Basicpy_BinaryCompareOp : Basicpy_Op<"binary_compare", []> { - let summary = "Performs a comparison between two operands"; - let description = [{ - This op performs only one step of a potentially multi-step short - circuit comparison. - See: https://docs.python.org/3/reference/expressions.html#comparisons - }]; - let arguments = (ins - AnyType:$left, - AnyType:$right, - CompareOperationAttr:$operation - ); - let results = (outs - Basicpy_BoolType:$result - ); - let assemblyFormat = "$left $operation $right attr-dict `:` type(operands)"; -} - -def Basicpy_BinaryExprOp : Basicpy_Op<"binary_expr", []> { - let summary = "Binary expression"; - let description = [{ - An expression between two operands as generated by the AST BinOp node. - }]; - let arguments = (ins - AnyType:$left, - AnyType:$right, - BinaryOperationAttr:$operation - ); - let results = (outs - AnyType:$result - ); - let assemblyFormat = "$left $operation $right attr-dict `:` functional-type(operands, results)"; -} - -def Basicpy_ExecOp : Basicpy_Op<"exec", [ - SingleBlockImplicitTerminator<"ExecDiscardOp">]> { - let summary = "Evaluates an expression being executed as a statement"; - let description = [{ - The result is discarded. Typically expressions are no-side-effect and can - be re-ordered as needed. Embedding one in an exec op ensures that its - placement in program order is preserved. - }]; - let regions = (region SizedRegion<1>:$body); - - let skipDefaultBuilders = 1; - let builders = [ - OpBuilder<(ins)>, - ]; - let extraClassDeclaration = [{ - OpBuilder getBodyBuilder() { - Block* body = getBody(0); - return OpBuilder::atBlockEnd(body); - } - }]; -} - -def Basicpy_ExecDiscardOp : Basicpy_Op<"exec_discard", [ - NoSideEffect, ReturnLike, Terminator]> { - let summary = "Terminator for an exec block"; - let description = [{ - Discards results and terminates an exec block. - }]; - let arguments = (ins Variadic:$operands); - let assemblyFormat = "operands attr-dict `:` type(operands)"; -} - -def Basicpy_FuncTemplateCallOp : Basicpy_Op<"func_template_call", []> { - let summary = "Calls a function template"; - let description = [{ - Most function calls start with this generic calling op, which binds - symbolically to a func_template. At this level, there are very few - semantics associated with the call, since, often, both types and the - specific concrete callee cannot be determined. - - Per python calling conventions, all functions return one result, even if - None or a tuple (which may be syntactically unpacked to multiple results). - - If specified, the `argNames` operand is right aligned to the list of - positional `args`, representing arguments that are special or have been - passed with a keyword. The following arg names are special: - '*': Indicates that the argument is a positional argument pack (must be - the first arg name, if present). - '**': Indicates that the argument is a keyword argument pack (must be - the last arg name, if present). - }]; - - let arguments = (ins - FlatSymbolRefAttr:$callee, - Variadic:$args, - StrArrayAttr:$arg_names); - let results = (outs AnyType:$result); - let assemblyFormat = "$callee `(` $args `)` `kw` $arg_names attr-dict `:` functional-type($args, results)"; - let skipDefaultBuilders = 1; - let builders = [ - OpBuilder<(ins)>, - ]; -} - -def Basicpy_FuncTemplateOp : Basicpy_Op<"func_template", [ - IsolatedFromAbove, - SingleBlockImplicitTerminator<"FuncTemplateTerminatorOp">, - NativeOpTrait<"SymbolTable">, - Symbol]> { - let summary = "Group of multiple overload-resolved concrete functions"; - let description = [{ - The outer func_template op acts as a module that can contain named concrete - functions that are interpreted as overloads. If the function signature is - sufficient to disambiguate (i.e. with nothing more than arity and MLIR - argument types), then this is all that is needed. However, in many cases, - additional attributes will need to be specified to further constrain types. - The first matching function signature is selected to satisfy a - `func_template_call` op. - - TODO: Define this extended constraint matching. - - Instantiation - ------------- - Once a concrete function is selected as being applicable to a given call, - it will typically be instantiated as a standalone, unspecialized function - in the calling module (as a peer to the func_template). This function - will be uniquely identified by concating the outer func_template's symbol - name, '$', and the concrete instance's symbol name. - - Note that the function may still be unspecialized (in that it contains - UnknownType arguments/results), and type inference is expected to further - specialize/inline/constrain it. - - Naming - ------ - By convention, func_templates are named to avoid collision for various - uses: - - Global function templates: "__global$python.qualified.name" - - Method names: "__method$method_name" - - Attribute getter: "__getattr$attr_name" - - Attribute setter: "__setattr$attr_name" - - As in user-level python, for functions that bind to an instance, the first - argument must be a concrete type for the bound instance type. In this way, - there is one `func_template` for every unique member name and the normal - type constraints system is used to select the overload, just as if it was - a normal function call. It is left to utility routines to merge libraries - in a way that preserves this invariant. - - TODO: This needs to be fleshed out more as some additional rules about - ordering and conflict resolution are likely needed to make this correct. - - Correlation with python runtime - ------------------------------- - When extracting a program, it is typically necessary to create weak - references to specific python functions and correlate them back to a named - template defined here. Often times this can just be done lexically, but - to avoid fragility, any func_template that correlates to a python - runtime function will have an additional attribute `py_bind` that is an - array of StringAttr qualified names to resolve and bind to in the python - runtime. In cases of divergence, the symbol name of the template should - be chosen just for uniqueness (not significance). - - The qualified name format for `py_bind` attribute is: - package.name#local.qualified.name - }]; - let arguments = (ins); - let regions = (region SizedRegion<1>:$body); - - let skipDefaultBuilders = 1; - let builders = [ - OpBuilder<(ins)>, - ]; - let extraClassDeclaration = [{ - OpBuilder getBodyBuilder() { - Block* body = getBody(0); - return OpBuilder::atBlockEnd(body); - } - }]; -} - -def Basicpy_FuncTemplateTerminatorOp : Basicpy_Op<"func_template_terminator", [ - HasParent<"Basicpy::FuncTemplateOp">, - Terminator]> { - let summary = "Terminator pseudo-op for the FuncTemplateOp"; - - let parser = ?; - let printer = ?; -} - -def Basicpy_SlotObjectMakeOp : Basicpy_Op<"slot_object_make", [ - NoSideEffect]> { - let summary = "Creates an instance of a SlotObject type"; - let description = [{ - SlotObjects are typically instances of built-in classes that have a fixed - number of slots. Unlike in standard python, the types of each slot are - tracked. - - This op has a custom assembly form which can be used when valid that - omits the operand types (since they are equal to the types in the returned - slot object). Example: - %0 = basicpy.singleton : !basicpy.NoneType - %1 = basicpy.slot_object_make(%0) -> - !basicpy.SlotObject - }]; - let arguments = (ins - // TODO: Tighter constraints on allowable types. - Variadic:$slots - ); - let results = (outs - Basicpy_SlotObjectType:$result - ); -} - -def Basicpy_SlotObjectGetOp : Basicpy_Op<"slot_object_get", [ - NoSideEffect]> { - let summary = "Gets a slot from a slot object"; - let description = [{ - Gets a slot from a SlotObject. - - Example: - %0 = basicpy.slot_object_make ... - %1 = basicpy.slot_object_get %0[1] : !basicpy.SlotObject<...> - }]; - let arguments = (ins - Basicpy_SlotObjectType:$object, - IndexAttr:$index - ); - let results = (outs - AnyType:$result - ); -} - -#endif // NPCOMP_DIALECT_BASICPY_IR_BASICPY_OPS diff --git a/include/npcomp/Dialect/Basicpy/IR/CMakeLists.txt b/include/npcomp/Dialect/Basicpy/IR/CMakeLists.txt deleted file mode 100644 index 08e54a253..000000000 --- a/include/npcomp/Dialect/Basicpy/IR/CMakeLists.txt +++ /dev/null @@ -1,15 +0,0 @@ -set(dialect BasicpyOps) -set(dialect_namespace basicpy) -set(LLVM_TARGET_DEFINITIONS ${dialect}.td) - -mlir_tablegen(${dialect}.h.inc -gen-op-decls) -mlir_tablegen(${dialect}.cpp.inc -gen-op-defs) -mlir_tablegen(${dialect}Dialect.h.inc -gen-dialect-decls -dialect=${dialect_namespace}) -mlir_tablegen(${dialect}Dialect.cpp.inc -gen-dialect-defs -dialect=${dialect_namespace}) -mlir_tablegen(${dialect}Enums.h.inc -gen-enum-decls) -mlir_tablegen(${dialect}Enums.cpp.inc -gen-enum-defs) -add_public_tablegen_target(MLIR${dialect}IncGen) -add_dependencies(mlir-headers MLIR${dialect}IncGen) - -add_mlir_doc(BasicpyDialect BasicpyDialect Basicpy/ -gen-dialect-doc) -add_mlir_doc(BasicpyOps BasicpyOps Basicpy/ -gen-op-doc) diff --git a/include/npcomp/Dialect/Basicpy/Transforms/CMakeLists.txt b/include/npcomp/Dialect/Basicpy/Transforms/CMakeLists.txt deleted file mode 100644 index 2630b6d4c..000000000 --- a/include/npcomp/Dialect/Basicpy/Transforms/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -set(LLVM_TARGET_DEFINITIONS Passes.td) -mlir_tablegen(Passes.h.inc -gen-pass-decls) -add_public_tablegen_target(NPCOMPBasicpyPassIncGen) - -add_mlir_doc(Passes Transforms ./ -gen-pass-doc) diff --git a/include/npcomp/Dialect/Basicpy/Transforms/Passes.h b/include/npcomp/Dialect/Basicpy/Transforms/Passes.h deleted file mode 100644 index 923034719..000000000 --- a/include/npcomp/Dialect/Basicpy/Transforms/Passes.h +++ /dev/null @@ -1,30 +0,0 @@ -//===------------------------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_BASICPY_TRANSFORMS_PASSES_H -#define NPCOMP_DIALECT_BASICPY_TRANSFORMS_PASSES_H - -#include "mlir/Pass/Pass.h" - -#include - -namespace mlir { -namespace NPCOMP { -namespace Basicpy { - -std::unique_ptr> createFunctionTypeInferencePass(); - -} // namespace Basicpy - -/// Registers all Basicpy transformation passes. -void registerBasicpyPasses(); - -} // namespace NPCOMP -} // namespace mlir - -#endif // NPCOMP_DIALECT_BASICPY_TRANSFORMS_PASSES_H diff --git a/include/npcomp/Dialect/Basicpy/Transforms/Passes.td b/include/npcomp/Dialect/Basicpy/Transforms/Passes.td deleted file mode 100644 index dbfbc05fb..000000000 --- a/include/npcomp/Dialect/Basicpy/Transforms/Passes.td +++ /dev/null @@ -1,23 +0,0 @@ -//===-- Passes.td - Pass definition file -------------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_BASICPY_PASSES -#define NPCOMP_BASICPY_PASSES - -include "mlir/Pass/PassBase.td" - -//===----------------------------------------------------------------------===// -// TypeInference -//===----------------------------------------------------------------------===// - -def FunctionTypeInference : Pass<"basicpy-type-inference", "FuncOp"> { - let summary = "Performs function level type inference"; - let constructor = "mlir::NPCOMP::Basicpy::createFunctionTypeInferencePass()"; -} - -#endif // NPCOMP_BASICPY_PASSES diff --git a/include/npcomp/Dialect/CMakeLists.txt b/include/npcomp/Dialect/CMakeLists.txt index 017219dfa..826f348ce 100644 --- a/include/npcomp/Dialect/CMakeLists.txt +++ b/include/npcomp/Dialect/CMakeLists.txt @@ -1,5 +1,3 @@ -add_subdirectory(Basicpy) -add_subdirectory(Numpy) add_subdirectory(Refback) add_subdirectory(Refbackrt) add_subdirectory(TorchConversion) diff --git a/include/npcomp/Dialect/Numpy/CMakeLists.txt b/include/npcomp/Dialect/Numpy/CMakeLists.txt deleted file mode 100644 index 9f57627c3..000000000 --- a/include/npcomp/Dialect/Numpy/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(IR) -add_subdirectory(Transforms) diff --git a/include/npcomp/Dialect/Numpy/IR/CMakeLists.txt b/include/npcomp/Dialect/Numpy/IR/CMakeLists.txt deleted file mode 100644 index 36ad4acb9..000000000 --- a/include/npcomp/Dialect/Numpy/IR/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -add_mlir_dialect(NumpyOps numpy) -add_mlir_doc(NumpyDialect NumpyDialect Numpy/ -gen-dialect-doc) -add_mlir_doc(NumpyOps NumpyOps Numpy/ -gen-op-doc) diff --git a/include/npcomp/Dialect/Numpy/IR/NumpyDialect.h b/include/npcomp/Dialect/Numpy/IR/NumpyDialect.h deleted file mode 100644 index 43352f6ac..000000000 --- a/include/npcomp/Dialect/Numpy/IR/NumpyDialect.h +++ /dev/null @@ -1,68 +0,0 @@ -//===- NumpyDialect.h - Core numpy dialect ----------------------*- C++ -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_NUMPY_IR_NUMPY_DIALECT_H -#define NPCOMP_DIALECT_NUMPY_IR_NUMPY_DIALECT_H - -#include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/Dialect.h" -#include "npcomp/Typing/Analysis/CPA/Interfaces.h" - -namespace mlir { -namespace NPCOMP { -namespace Numpy { - -namespace detail { -struct NdArrayTypeStorage; -} // namespace detail - -/// The singleton type representing an unknown dtype. -class AnyDtypeType : public Type::TypeBase { -public: - using Base::Base; - - static AnyDtypeType get(MLIRContext *context) { return Base::get(context); } -}; - -class NdArrayType - : public Type::TypeBase { -public: - using Base::Base; - - /// Constructs an NdArray with a dtype and no shape. Setting the dtype - /// to !basicpy.UnknownType will print as ?. - static NdArrayType get(Type dtype, - llvm::Optional> shape = llvm::None); - - /// Helper that gets an equivalent NdArrayType from a ShapedType. - static NdArrayType getFromShapedType(ShapedType shapedType); - - /// Returns whether the dtype is a concrete type (versus - /// !basicpy.UnknownType). - bool hasKnownDtype(); - Type getDtype(); - - /// If the shape has been partially specified, this will have a value. - /// unknown dimensions are -1. - llvm::Optional> getOptionalShape(); - - /// Converts to an equivalent TensorType. - TensorType toTensorType(); - - // CPA::TypeMapInterface methods. - Typing::CPA::TypeNode *mapToCPAType(Typing::CPA::Context &context); -}; - -} // namespace Numpy -} // namespace NPCOMP -} // namespace mlir - -#include "npcomp/Dialect/Numpy/IR/NumpyOpsDialect.h.inc" - -#endif // NPCOMP_DIALECT_NUMPY_IR_NUMPY_DIALECT_H diff --git a/include/npcomp/Dialect/Numpy/IR/NumpyDialect.td b/include/npcomp/Dialect/Numpy/IR/NumpyDialect.td deleted file mode 100644 index 80aa9527f..000000000 --- a/include/npcomp/Dialect/Numpy/IR/NumpyDialect.td +++ /dev/null @@ -1,118 +0,0 @@ -//===- NumpyDialect.td - Core numpy dialect ----------------*- tablegen -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_NUMPY_IR_NUMPY_DIALECT -#define NPCOMP_DIALECT_NUMPY_IR_NUMPY_DIALECT - -include "mlir/IR/OpBase.td" -include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.td" - -//===----------------------------------------------------------------------===// -// Dialect definition -//===----------------------------------------------------------------------===// - -def Numpy_Dialect : Dialect { - let name = "numpy"; - let summary = "Core numpy dialect"; - let description = [{ - Dialect of types and core numpy ops and abstractions. - }]; - let cppNamespace = "::mlir::NPCOMP::Numpy"; -} - -//===----------------------------------------------------------------------===// -// Op templates -//===----------------------------------------------------------------------===// - -class Numpy_Op traits = []> : - Op { - let parser = [{ return parse$cppClass(parser, &result); }]; - let printer = [{ return print$cppClass(p, *this); }]; -} - -//===----------------------------------------------------------------------===// -// Dialect types -//===----------------------------------------------------------------------===// - -def Numpy_AnyDtype : DialectType()">, "any dtype">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::Numpy::AnyDtypeType>()"> { - let description = [{ - Placeholder for an unknown dtype in a tensor. - }]; -} - -def Numpy_NdArrayType : DialectType()">, "ndarray type">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::Numpy::NdArrayType>()"> { - let description = [{ - NdArrayType: Models a numpy.ndarray and compatible types. - Unlike lower level representations, this type solely exists to represent - top-level semantics and source-dialect transformations. As such, it - is not a general modeling like `tensor` or `memref`, instead being just - enough to infer proper lowerings to those types. - - Like its numpy counterparts, NdArrayType represents a mutable array of - some value type (dtype), with a shape, strides, and various controls - around contiguity. Most of that is not modeled in this type, which focuses - on a representation sufficient to infer high level types and aliasing - based on program flow. - - Note that most operations in numpy can be legally defined similar to the - following: - %0 = ... -> !numpy.ndarray<...> - %1 = numpy.copy_to_tensor %0 -> tensor<...> - %2 = numpy.some_operation %1 - %4 = numpy.copy_from_tensor -> !numpy.ndarray<...> - - (in other words, the operation does not alias any of its operands to its - results) - - When this is the case, the operation will *only* be defined for tensors, - as staying in the value domain makes sense for as many operations as - can be reasonably represented as such. It is left to subsequent parts of - the compiler to transform the program in such a way as to elide the copies - that such sequences encode. - - Only ops that mutate or alias their operands should accept and/or produce - ndarray types. - }]; -} - -//===----------------------------------------------------------------------===// -// Type predicates -//===----------------------------------------------------------------------===// - -// Any tensor type legal for numpy ops. -def Numpy_AnyTensor : TensorOf<[AnyType]>; - -// Any type, at any stage of analysis that can represent a numpy array. -def Numpy_AnyArray : AnyTypeOf<[ - Numpy_AnyTensor, - Numpy_NdArrayType -]>; - -def Numpy_SliceTupleElement : AnyTypeOf<[ - // Supports both "Index Arrays" and "Boolean mask index arrays". - Numpy_AnyArray, - - // Indicates that an axis should be added (np.newaxis == None). - Basicpy_NoneType, - - // Indicates that intervening axes should be preserved. - Basicpy_EllipsisType, - - // A discrete numeric index (represented as IndexType so that a proper - // width can be target dependent). - Index, - - // A generalized slice object. - Basicpy_SliceSlotObjectType, -], "types that are legal elements of a __getitem__ tuple operating on arrays">; - -#endif // NPCOMP_DIALECT_NUMPY_IR_NUMPY_DIALECT diff --git a/include/npcomp/Dialect/Numpy/IR/NumpyOps.h b/include/npcomp/Dialect/Numpy/IR/NumpyOps.h deleted file mode 100644 index 701841143..000000000 --- a/include/npcomp/Dialect/Numpy/IR/NumpyOps.h +++ /dev/null @@ -1,25 +0,0 @@ -//===- NumpyOps.h - Core numpy dialect ops ----------------------*- C++ -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_NUMPY_IR_NUMPY_OPS_H -#define NPCOMP_DIALECT_NUMPY_IR_NUMPY_OPS_H - -#include "mlir/IR/Attributes.h" -#include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/Dialect.h" -#include "mlir/IR/FunctionSupport.h" -#include "mlir/IR/OpDefinition.h" -#include "mlir/IR/SymbolTable.h" -#include "mlir/Interfaces/CastInterfaces.h" -#include "mlir/Interfaces/SideEffectInterfaces.h" -#include "npcomp/Typing/Analysis/CPA/Interfaces.h" - -#define GET_OP_CLASSES -#include "npcomp/Dialect/Numpy/IR/NumpyOps.h.inc" - -#endif // NPCOMP_DIALECT_NUMPY_IR_NUMPY_OPS_H diff --git a/include/npcomp/Dialect/Numpy/IR/NumpyOps.td b/include/npcomp/Dialect/Numpy/IR/NumpyOps.td deleted file mode 100644 index 7860e991d..000000000 --- a/include/npcomp/Dialect/Numpy/IR/NumpyOps.td +++ /dev/null @@ -1,271 +0,0 @@ -//===- NumpyOps.td - Core numpy dialect ops ----------------*- tablegen -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_NUMPY_IR_NUMPY_OPS -#define NPCOMP_DIALECT_NUMPY_IR_NUMPY_OPS - -include "npcomp/Dialect/Numpy/IR/NumpyDialect.td" -include "npcomp/Typing/Analysis/CPA/Interfaces.td" -include "mlir/Interfaces/SideEffectInterfaces.td" -include "mlir/Interfaces/CastInterfaces.td" -include "mlir/IR/SymbolInterfaces.td" - -//----------------------------------------------------------------------------// -// IR casting and conversions -//----------------------------------------------------------------------------// - -def Numpy_NarrowOp : Numpy_Op<"narrow", []> { - let summary = "Narrows an array to a known type at boundaries."; - let description = [{ - During tracing, specific data types are often unknown. This op generically - narrows from an unknown to a known data type at boundaries. - }]; - let arguments = (ins - Numpy_AnyArray:$operand - ); - let results = (outs - Numpy_AnyArray:$result - ); - let assemblyFormat = [{ - $operand attr-dict `:` functional-type($operand, $result) - }]; -} - -def Numpy_StaticInfoCastOp : Numpy_Op<"static_info_cast", [ - DeclareOpInterfaceMethods, - NoSideEffect]> { - let summary = "Adds/removes static information from an array type."; - let description = [{ - This op does not imply any runtime code. Semantically it is an identity - function. - }]; - let arguments = (ins - Numpy_AnyArray:$operand - ); - let results = (outs - Numpy_AnyArray:$result - ); - let assemblyFormat = [{ - $operand attr-dict `:` type($operand) `to` type($result) - }]; - let hasCanonicalizer = 1; -} - -def Numpy_TensorStaticInfoCastOp : Numpy_Op<"tensor_static_info_cast", [ - DeclareOpInterfaceMethods, - NoSideEffect]> { - let summary = "Adds/removes static information from a tensor type."; - let description = [{ - This op does not imply any runtime code. Semantically it is an identity - function. - - Unlike `tensor.cast`, this op allows changing dtype, following the - rules of numpy arrays where no runtime code is implied. In particular, - `!numpy.any_dtype` is compatible with all other element types, but otherwise - the element types must be the same. An element type of `!numpy.any_dtype` - represents the absence of static knowledge of the dtype. It does not - itself represent a concrete runtime element type. - }]; - let arguments = (ins - Numpy_AnyTensor:$operand - ); - let results = (outs - Numpy_AnyTensor:$result - ); - let assemblyFormat = [{ - $operand attr-dict `:` type($operand) `to` type($result) - }]; -} - -//----------------------------------------------------------------------------// -// NdArray type handling -//----------------------------------------------------------------------------// - -def Numpy_CreateArrayFromTensorOp : Numpy_Op<"create_array_from_tensor", [ - DeclareOpInterfaceMethods, - NoSideEffect]> { - let summary = "Creates an ndarray from a tensor."; - let description = [{ - Creates a new ndarray that will contain the data of the given tensor. - }]; - let arguments = (ins - Numpy_AnyTensor:$source - ); - let results = (outs - Numpy_AnyArray:$dest - ); - let assemblyFormat = [{ - $source attr-dict `:` functional-type($source, $dest) - }]; -} - -def Numpy_CopyToTensorOp : Numpy_Op<"copy_to_tensor", [ - DeclareOpInterfaceMethods]> { - let summary = "Copies an ndarray, yielding a value-typed tensor."; - let description = [{ - The semantics of this operation connote a copy of the data in the source - ndarray, producing a destination value that will have the value in the - ndarray at the point of the copy. Of course, downstream transformations - are free to rearrange things to elide the copy or otherwise eliminate the - need for it. - }]; - let arguments = (ins - Numpy_NdArrayType:$source - ); - let results = (outs - Numpy_AnyTensor:$dest - ); - let assemblyFormat = [{ - $source attr-dict `:` functional-type($source, $dest) - }]; - let hasCanonicalizer = 1; -} - -def Numpy_OverwriteArrayOp : Numpy_Op<"overwrite_array", []> { - let summary = "Ovewrite the contents of array with a tensor."; - let description = [{ - Replaces the contents of `array` with corresponding values from `tensor`. - - Immediately after this op has completed, indexing `array` will result - in identical values as indexing into `tensor`. Of course, later ops - might mutate `array`, so this relationship need not hold for the entire - program. - - This op has undefined behavior if the tensor and array have different - shapes or dtypes. - }]; - let arguments = (ins - Numpy_AnyTensor:$tensor, - Numpy_NdArrayType:$array - ); - let results = (outs - ); - let assemblyFormat = [{ - $tensor `overwrites` $array attr-dict `:` type($tensor) `,` type($array) - }]; -} - -//----------------------------------------------------------------------------// -// Universal function ops (ufunc) -// See: https://docs.scipy.org/doc/numpy/reference/ufuncs.html -//----------------------------------------------------------------------------// - -def Numpy_BuiltinUfuncCallOp : Numpy_Op<"builtin_ufunc_call", [ - DeclareOpInterfaceMethods]> { - let summary = "A __call__ operation on a named/builtin ufunc"; - let description = [{ - Simple ufunc call semantics for builtin ufuncs with none of the advanced - arguments specified. - - Note that without the `out=` parameter, ufunc call operations (unlike - others like `at`) are defined purely in the value domain and do not alias. - As such, they operate on tensors, not ndarray. - }]; - let arguments = (ins - StrAttr:$qualified_name, - Variadic:$inputs - ); - let results = (outs - Numpy_AnyTensor:$output - ); - let assemblyFormat = [{ - `<` $qualified_name `>` `(` operands `)` attr-dict `:` functional-type(operands, results) - }]; -} - -//----------------------------------------------------------------------------// -// Built-in array functions -// -// These are ops that mirror supported array functions in numpy or related -// libraries. Note that there is some evolution happening on the dispatch -// mechanism for these. -// See: https://numpy.org/neps/nep-0018-array-function-protocol.html -// See: https://numpy.org/neps/nep-0037-array-module.html -// -// Note that operators are in general free to take any arguments, but there -// are some conventions that are mirrored here: -// -// - `out` arguments indicate that the operation should perform a mutation -// of a specific array. This is not modeled at the individual op level, -// instead producing IR constructs to map the intent. -//----------------------------------------------------------------------------// - -def Numpy_DotOp : Numpy_Op<"dot", []> { - let summary = "Represents the `numpy.dot` operator"; - let description = [{ - See: https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html - }]; - let arguments = (ins - Numpy_AnyArray:$a, - Numpy_AnyArray:$b - ); - let results = (outs - Numpy_AnyArray:$output - ); - let assemblyFormat = [{ - operands attr-dict `:` functional-type(operands, $output) - }]; -} - -def Numpy_TransposeOp : Numpy_Op<"transpose", []> { - let summary = "Represents the `numpy.transpose` op with no permutation specified"; - let description = [{ - This op is equivalent to calling `numpy.transpose(arr)`, which reverses - the axes of the array. It is separate from the explicit form because it - is not always possible to locallly infer an appropriate axis transform - at the point of declaration. - - See: https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html - }]; - let arguments = (ins - Numpy_AnyArray:$a - ); - let results = (outs - Numpy_AnyArray:$output - ); - let assemblyFormat = [{ - operands attr-dict `:` functional-type(operands, $output) - }]; -} - -//----------------------------------------------------------------------------// -// Slicing -// See: https://docs.scipy.org/doc/numpy/user/basics.indexing.html -//----------------------------------------------------------------------------// - -def Numpy_GetSliceOp : Numpy_Op<"get_slice", []> { - let summary = "Gets a slice of an array"; - let description = [{ - This op encapsulates all forms of indexing into an array by taking a - variable number of `slice` arguments, each of which represents a single - entry in a generalized indexing-tuple. Once full type inference has - been performed, there should be sufficient static information to determine - the exact slice semantics solely by the signature of types of the `slice` - arguments. - - Note that there is a more general form of this op that is generally - needed for AST extraction that takes a variable length `tuple` instead - of a static list of arguments. It is expected that during type refinement - most such uses should degenerate to this static variant. - - Per numpy semantics, many forms of slice return a view instead of a copy, - and determining the exact form requires additional analysis. - }]; - let arguments = (ins - Numpy_AnyArray:$a, - Variadic:$slice_elements - ); - let results = (outs - Numpy_AnyArray:$result - ); - let assemblyFormat = [{ - operands attr-dict `:` functional-type(operands, $result) - }]; -} - -#endif // NPCOMP_DIALECT_NUMPY_IR_NUMPY_OPS diff --git a/include/npcomp/Dialect/Numpy/Transforms/CMakeLists.txt b/include/npcomp/Dialect/Numpy/Transforms/CMakeLists.txt deleted file mode 100644 index a2f1694a3..000000000 --- a/include/npcomp/Dialect/Numpy/Transforms/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -set(LLVM_TARGET_DEFINITIONS Passes.td) -mlir_tablegen(Passes.h.inc -gen-pass-decls) -add_public_tablegen_target(NPCOMPNumpyPassIncGen) - -add_mlir_doc(Passes NPCOMPNumpyTransforms ./ -gen-pass-doc) diff --git a/include/npcomp/Dialect/Numpy/Transforms/Passes.h b/include/npcomp/Dialect/Numpy/Transforms/Passes.h deleted file mode 100644 index a7dc9051f..000000000 --- a/include/npcomp/Dialect/Numpy/Transforms/Passes.h +++ /dev/null @@ -1,30 +0,0 @@ -//===------------------------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_NUMPY_TRANSFORMS_PASSES_H -#define NPCOMP_DIALECT_NUMPY_TRANSFORMS_PASSES_H - -#include "mlir/Pass/Pass.h" - -#include - -namespace mlir { -namespace NPCOMP { -namespace Numpy { - -std::unique_ptr> createPublicFunctionsToTensorPass(); - -} // namespace Numpy - -/// Registers all Numpy transformation passes. -void registerNumpyPasses(); - -} // namespace NPCOMP -} // namespace mlir - -#endif // NPCOMP_DIALECT_NUMPY_TRANSFORMS_PASSES_H diff --git a/include/npcomp/Dialect/Numpy/Transforms/Passes.td b/include/npcomp/Dialect/Numpy/Transforms/Passes.td deleted file mode 100644 index ae41945f7..000000000 --- a/include/npcomp/Dialect/Numpy/Transforms/Passes.td +++ /dev/null @@ -1,23 +0,0 @@ -//===-- Passes.td - Pass definition file -------------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_NUMPY_PASSES -#define NPCOMP_NUMPY_PASSES - -include "mlir/Pass/PassBase.td" - -//===----------------------------------------------------------------------===// -// TypeInference -//===----------------------------------------------------------------------===// - -def NumpyPublicFunctionsToTensor : Pass<"numpy-public-functions-to-tensor", "ModuleOp"> { - let summary = "Converts public functions to operate on tensors (instead of ndarray)"; - let constructor = "mlir::NPCOMP::Numpy::createPublicFunctionsToTensorPass()"; -} - -#endif // NPCOMP_NUMPY_PASSES diff --git a/include/npcomp/Typing/Analysis/CMakeLists.txt b/include/npcomp/Typing/Analysis/CMakeLists.txt deleted file mode 100644 index fd2a3eae4..000000000 --- a/include/npcomp/Typing/Analysis/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory(CPA) diff --git a/include/npcomp/Typing/Analysis/CPA/Algorithm.h b/include/npcomp/Typing/Analysis/CPA/Algorithm.h deleted file mode 100644 index ab847b172..000000000 --- a/include/npcomp/Typing/Analysis/CPA/Algorithm.h +++ /dev/null @@ -1,91 +0,0 @@ -//===- Algorithm.h - Main algorithm ---------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// Support types and utilities for the Cartesian Product Algorithm for -// Type Inference. -// -// See: -// http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.8177 -// http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.129.2756 -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_TYPING_ANALYSIS_CPA_ALGORITHM_H -#define NPCOMP_TYPING_ANALYSIS_CPA_ALGORITHM_H - -#include "npcomp/Typing/Analysis/CPA/Types.h" -#include "llvm/ADT/DenseSet.h" - -namespace mlir { -namespace NPCOMP { -namespace Typing { -namespace CPA { - -/// Propagates constraints in an environment. -class PropagationWorklist { -public: - PropagationWorklist(Environment &env); - - /// Propagates any current constraints that match the transitivity rule: - /// Ï„v <: t, t <: Ï„ (Ï„v=ValueType, t=TypeVar, Ï„=TypeBase) - /// Expanding to: - /// Ï„v <: Ï„ - /// (Ï„v=ValueType, t=TypeVar, Ï„=TypeBase) - void propagateTransitivity(); - - /// Commits the current round, returning true if any new constraints were - /// added. - bool commit(); - -private: - Environment &env; - llvm::DenseSet currentConstraints; - int newConstraintCount = 0; -}; - -/// Resolves all variables associated with a type node in a greedy fashion. -/// -/// Given a TypeNode that may or may not have dependent variables, this -/// will recursively resolve all variables to concrete types by applying -/// a hook that reduces members of the type variable set to a singular -/// ValueType. The default hook will only allow memberships of 1 and performs -/// no union widening. -/// -/// This greedy algorithm is fairly limited in what it can resolve. For example, -/// it cannot disambiguate two candidates like Array and Array in a -/// robust way. -class GreedyTypeNodeVarResolver { -public: - GreedyTypeNodeVarResolver(Context &context, MLIRContext &mlirContext, - llvm::Optional loc) - : context(context), loc(loc) {} - - /// Analyzes TypeNode, adding any necessary variable mappings. On failure, - /// an error will be emitted. - LogicalResult analyzeTypeNode(TypeNode *tn); - - /// The mappings for all TypeVars. If all calls to analyzeTypeNode() - /// succeeded, this should be sufficient to construct a concrete IR Type. - TypeVarMap &getMappings() { return mappings; } - -private: - ValueType *unionCandidateTypes(const ValueTypeSet &candidates); - - // Initialization. - Context &context; - llvm::Optional loc; - - // Runtime state. - TypeVarSet allVars; - TypeVarMap mappings; -}; - -} // namespace CPA -} // namespace Typing -} // namespace NPCOMP -} // namespace mlir - -#endif // NPCOMP_TYPING_ANALYSIS_CPA_ALGORITHM_H diff --git a/include/npcomp/Typing/Analysis/CPA/CMakeLists.txt b/include/npcomp/Typing/Analysis/CPA/CMakeLists.txt deleted file mode 100644 index ca8045647..000000000 --- a/include/npcomp/Typing/Analysis/CPA/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -set(LLVM_TARGET_DEFINITIONS Interfaces.td) -mlir_tablegen(TypeInterfaces.h.inc -gen-type-interface-decls) -mlir_tablegen(TypeInterfaces.cpp.inc -gen-type-interface-defs) -mlir_tablegen(OpInterfaces.h.inc -gen-op-interface-decls) -mlir_tablegen(OpInterfaces.cpp.inc -gen-op-interface-defs) -add_public_tablegen_target(NPCOMPTypingCPAInterfacesIncGen) diff --git a/include/npcomp/Typing/Analysis/CPA/Interfaces.h b/include/npcomp/Typing/Analysis/CPA/Interfaces.h deleted file mode 100644 index a6fe7e812..000000000 --- a/include/npcomp/Typing/Analysis/CPA/Interfaces.h +++ /dev/null @@ -1,24 +0,0 @@ -//===- Interfaces.h - Interfaces for IR types -----------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_TYPING_ANALYSIS_CPA_INTERFACES_H -#define NPCOMP_TYPING_ANALYSIS_CPA_INTERFACES_H - -#include "mlir/IR/OpDefinition.h" -#include "mlir/IR/Types.h" - -#include "npcomp/Typing/Analysis/CPA/Types.h" - -namespace mlir { - -#include "npcomp/Typing/Analysis/CPA/OpInterfaces.h.inc" -#include "npcomp/Typing/Analysis/CPA/TypeInterfaces.h.inc" - -} // namespace mlir - -#endif // NPCOMP_TYPING_ANALYSIS_CPA_INTERFACES_H diff --git a/include/npcomp/Typing/Analysis/CPA/Interfaces.td b/include/npcomp/Typing/Analysis/CPA/Interfaces.td deleted file mode 100644 index dd2608f0c..000000000 --- a/include/npcomp/Typing/Analysis/CPA/Interfaces.td +++ /dev/null @@ -1,38 +0,0 @@ -//===- CPAInterfaces.td - Interfaces to augment CPA --------*- tablegen -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_TYPING_ANALYSIS_CPA_CPA_INTERFACES -#define NPCOMP_TYPING_ANALYSIS_CPA_CPA_INTERFACES - -include "mlir/IR/OpBase.td" - -// TODO: Check upstream: there doesn't seem to be a way to define interfaces -// outside of the ::mlir root namespace. -def NPCOMP_TypingCPATypeMapInterface : TypeInterface< - "NPCOMPTypingTypeMapInterface"> { - let methods = [ - InterfaceMethod<"Maps an MLIR Type to a CPA::TypeNode.", - /*retTy=*/ "::mlir::NPCOMP::Typing::CPA::TypeNode *", - /*methodName=*/ "mapToCPAType", - /*args=*/ (ins - "::mlir::NPCOMP::Typing::CPA::Context &":$context)>, - ]; -} - -def NPCOMP_CPATypeInferenceOpInterface : OpInterface< - "NPCOMPTypingCPATypeInferenceOpInterface"> { - let methods = [ - InterfaceMethod<"Adds CPA constraints for the op.", - /*retTy=*/ "void", - /*methodName=*/ "addCPAConstraints", - /*args=*/ (ins - "::mlir::NPCOMP::Typing::CPA::Context &":$context)>, - ]; -} - -#endif // NPCOMP_TYPING_ANALYSIS_CPA_CPA_INTERFACES diff --git a/include/npcomp/Typing/Analysis/CPA/Types.h b/include/npcomp/Typing/Analysis/CPA/Types.h deleted file mode 100644 index b3a59f78d..000000000 --- a/include/npcomp/Typing/Analysis/CPA/Types.h +++ /dev/null @@ -1,636 +0,0 @@ -//===- CPASupport.h - Support types and utilities for CPA -----------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// Support types and utilities for the Cartesian Product Algorithm for -// Type Inference. -// -// See: -// http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.8177 -// http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.129.2756 -//===----------------------------------------------------------------------===// - -#include "mlir/IR/Types.h" -#include "mlir/IR/Value.h" -#include "mlir/Support/LogicalResult.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/DenseSet.h" -#include "llvm/ADT/MapVector.h" -#include "llvm/ADT/SmallPtrSet.h" -#include "llvm/Support/Allocator.h" -#include "llvm/Support/Casting.h" -#include "llvm/Support/raw_ostream.h" - -#ifndef NPCOMP_TYPING_ANALYSIS_CPA_SUPPORT_H -#define NPCOMP_TYPING_ANALYSIS_CPA_SUPPORT_H - -namespace mlir { -namespace NPCOMP { -namespace Typing { -namespace CPA { - -class Context; -class TypeVarSet; -class TypeVarMap; - -/// A uniqued string identifier. -class Identifier { -public: - StringRef getValue() const { return value; } - - void print(raw_ostream &os, bool brief = false); - - friend raw_ostream &operator<<(raw_ostream &os, Identifier ident) { - ident.print(os); - return os; - } - -private: - Identifier(StringRef value) : value(value) {} - StringRef value; - friend class Context; -}; - -/// Base class for the CPA type hierarchy. -class ObjectBase { -public: - enum class Kind { - // Type - FIRST_TYPE, - TypeNode = FIRST_TYPE, - TypeVar, - CastType, - ReadType, - WriteType, - - // ValueType - FIRST_VALUE_TYPE, - ValueType = FIRST_VALUE_TYPE, - IRValueType, - ObjectValueType, - LAST_VALUE_TYPE = ObjectValueType, - - LAST_TYPE = TypeVar, - - // Constraint - Constraint, - }; - ObjectBase(Kind kind) : kind(kind) {} - virtual ~ObjectBase(); - - Kind getKind() const { return kind; } - - virtual void print(Context &context, raw_ostream &os, bool brief = false) = 0; - -private: - const Kind kind; -}; - -/// Base class for types. -/// This type hierarchy is adapted from section 2.1 of: -/// Precise Constraint-Based Type Inference for Java -/// -/// Referred to as: 'Ï„' (tau) -class TypeNode : public ObjectBase { -public: - TypeNode(Kind kind, unsigned hashValue) - : ObjectBase(kind), - hashValue(llvm::hash_combine(static_cast(kind), hashValue)) {} - static bool classof(const ObjectBase *tb) { - return tb->getKind() >= Kind::FIRST_TYPE && - tb->getKind() <= Kind::LAST_TYPE; - } - - /// Collects all type variables that are dependencies of this TypeNode. - virtual void collectDependentTypeVars(Context &context, TypeVarSet &typeVars); - - /// Constructs a corresponding IR type for this TypeNode. - /// Returns a null Type on error, optionally emitting an error if a Location - /// is provided. - /// Not all TypeNodes in all states can be converted back to an IR type. - virtual mlir::Type constructIrType(Context &context, - const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc = llvm::None); - - bool operator==(const TypeNode &that) const; - void print(Context &context, raw_ostream &os, bool brief = false) override; - - struct PtrInfo : llvm::DenseMapInfo { - static TypeNode *getEmptyKey() { - static TypeNode empty(Kind::TypeNode, 0); - return ∅ - } - static TypeNode *getTombstoneKey() { - static TypeNode tombstone(Kind::TypeNode, 1); - return &tombstone; - } - static unsigned getHashValue(TypeNode *key) { return key->hashValue; } - static bool isEqual(TypeNode *lhs, TypeNode *rhs) { - if (lhs->getKind() == Kind::TypeNode || - rhs->getKind() == Kind::TypeNode) { - // Base class is only created for special static values. - return lhs == rhs; - } - if (lhs == rhs) - return true; - return *lhs == *rhs; - } - }; - -private: - unsigned hashValue; - - friend struct PtrInfo; -}; - -/// A unique type variable. -/// Both the pointer and the ordinal will be unique within a context. -/// Referred to as 't' -class TypeVar : public TypeNode { -public: - static bool classof(const ObjectBase *tb) { - return tb->getKind() == Kind::TypeVar; - } - - int getOrdinal() const { return ordinal; } - - void print(Context &context, raw_ostream &os, bool brief = false) override; - - void collectDependentTypeVars(Context &context, - TypeVarSet &typeVars) override; - mlir::Type - constructIrType(Context &context, const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc = llvm::None) override; - -private: - TypeVar(int ordinal) - : TypeNode(Kind::TypeVar, llvm::hash_code(ordinal)), ordinal(ordinal) {} - int ordinal; - friend class Context; -}; - -/// A type-cast type. -/// Referred to as: 'cast(δ, t)' -class CastType : public TypeNode { -public: - static bool classof(const ObjectBase *tb) { - return tb->getKind() == Kind::CastType; - } - - Identifier *getTypeIdentifier() const { return typeIdentifier; } - TypeVar *getTypeVar() const { return typeVar; } - - void print(Context &context, raw_ostream &os, bool brief = false) override; - -private: - CastType(Identifier *typeIdentifier, TypeVar *typeVar) - : TypeNode(Kind::CastType, llvm::hash_combine(typeIdentifier, typeVar)), - typeIdentifier(typeIdentifier), typeVar(typeVar) {} - Identifier *typeIdentifier; - TypeVar *typeVar; - friend class Context; -}; - -/// Type representing a read-field operation. -/// Referred to as: 'read Ï„' -class ReadType : public TypeNode { -public: - static bool classof(const ObjectBase *tb) { - return tb->getKind() == Kind::ReadType; - } - - TypeNode *getType() const { return type; } - - void print(Context &context, raw_ostream &os, bool brief = false) override; - -private: - ReadType(TypeNode *type) - : TypeNode(Kind::ReadType, llvm::hash_combine(type)), type(type) {} - TypeNode *type; - friend class Context; -}; - -/// Type representing a read-field operation. -/// Referred to as: 'read Ï„' -class WriteType : public TypeNode { -public: - static bool classof(const ObjectBase *tb) { - return tb->getKind() == Kind::WriteType; - } - - TypeNode *getType() const { return type; } - - void print(Context &context, raw_ostream &os, bool brief = false) override; - -private: - WriteType(TypeNode *type) - : TypeNode(Kind::WriteType, llvm::hash_combine(type)), type(type) {} - TypeNode *type; - friend class Context; -}; - -/// A legal value type in the language. We represent this as one of: -/// IRValueType: Wraps a primitive MLIR type -/// ObjectValueType: Defines an object. -/// Referred to as 'Ï„v' (tau-v) -class ValueType : public TypeNode { -public: - using TypeNode::TypeNode; - static bool classof(const ObjectBase *ob) { - return ob->getKind() >= Kind::FIRST_VALUE_TYPE && - ob->getKind() <= Kind::LAST_VALUE_TYPE; - } -}; - -/// Concrete ValueType that wraps an MLIR Type. -class IRValueType : public ValueType { -public: - IRValueType(mlir::Type irType) - : ValueType(Kind::IRValueType, llvm::hash_combine(irType)), - irType(irType) {} - static bool classof(const ObjectBase *ob) { - return ob->getKind() == Kind::IRValueType; - } - - mlir::Type getIrType() const { return irType; } - - void print(Context &context, raw_ostream &os, bool brief = false) override; - mlir::Type - constructIrType(Context &context, const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc = llvm::None) override; - -private: - const mlir::Type irType; -}; - -/// ValueType for an object. -/// Referred to as 'obj(δ, [ li : Ï„i ])' -class ObjectValueType : public ValueType { -public: - /// Constructs a corresponding IR type given a list of resolved field types. - using IrTypeConstructor = - std::function, - MLIRContext *, llvm::Optional)>; - - static bool classof(const ObjectBase *ob) { - return ob->getKind() == Kind::ObjectValueType; - } - - Identifier *getTypeIdentifier() { return typeIdentifier; } - size_t getFieldCount() { return fieldCount; } - llvm::ArrayRef getFieldIdentifiers() { - return llvm::ArrayRef(fieldIdentifiers, fieldCount); - } - llvm::ArrayRef getFieldTypes() { - return llvm::ArrayRef(fieldTypes, fieldCount); - } - - void print(Context &context, raw_ostream &os, bool brief = false) override; - void collectDependentTypeVars(Context &context, - TypeVarSet &typeVars) override; - mlir::Type - constructIrType(Context &context, const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc = llvm::None) override; - -private: - ObjectValueType(IrTypeConstructor irCtor, Identifier *typeIdentifier, - size_t fieldCount, Identifier *const *fieldIdentifiers, - TypeNode *const *fieldTypes) - // TODO: Real hashcode. - : ValueType(Kind::ObjectValueType, 0), irCtor(std::move(irCtor)), - typeIdentifier(typeIdentifier), fieldCount(fieldCount), - fieldIdentifiers(fieldIdentifiers), fieldTypes(fieldTypes) {} - IrTypeConstructor irCtor; - Identifier *typeIdentifier; - size_t fieldCount; - Identifier *const *fieldIdentifiers; - TypeNode *const *fieldTypes; - friend class Context; -}; - -/// A Constraint between two types. -/// Referred to as: 'Ï„1 <: Ï„2' -class Constraint : public ObjectBase { -public: - static bool classof(ObjectBase *ob) { - return ob->getKind() == Kind::Constraint; - } - - TypeNode *getFrom() { return from; } - TypeNode *getTo() { return to; } - - void print(Context &context, raw_ostream &os, bool brief = false) override; - - bool operator==(const Constraint &that) const { - return from == that.from && to == that.to; - } - - struct PtrInfo : llvm::DenseMapInfo { - static Constraint *getEmptyKey() { - auto emptyType = TypeNode::PtrInfo::getEmptyKey(); - static Constraint empty(emptyType, emptyType); - return ∅ - } - static Constraint *getTombstoneKey() { - auto tombstoneType = TypeNode::PtrInfo::getTombstoneKey(); - static Constraint tombstone(tombstoneType, tombstoneType); - return &tombstone; - } - static unsigned getHashValue(Constraint *key) { - return llvm::hash_combine(key->from, key->to); - } - static bool isEqual(Constraint *lhs, Constraint *rhs) { - return *lhs == *rhs; - } - }; - -private: - Constraint(TypeNode *from, TypeNode *to) - : ObjectBase(Kind::Constraint), from(from), to(to) {} - TypeNode *from; - TypeNode *to; - friend class Context; -}; - -/// A set of constraints. -/// Referred to as: 'C' -class ConstraintSet : public llvm::SmallPtrSet { -public: - static const ConstraintSet &getEmptySet(); - using SmallPtrSet::SmallPtrSet; - void print(Context &context, raw_ostream &os, bool brief = false); -}; - -/// A set of TypeVar. -/// Referred to as 't_bar' -class TypeVarSet : public llvm::SmallPtrSet { -public: - static const TypeVarSet &getEmptySet(); - using SmallPtrSet::SmallPtrSet; - void print(Context &context, raw_ostream &os, bool brief = false); -}; - -/// A small mapping of TypeVar -> TypeNode. -class TypeVarMap : public llvm::SmallMapVector {}; - -/// Set for managing TypeNodes. -class TypeNodeSet : public llvm::SmallPtrSet { -public: - static const TypeNodeSet &getEmptySet(); - using SmallPtrSet::SmallPtrSet; -}; - -/// Set for managing ValueTypes associated with a TypeVar. -class ValueTypeSet : public llvm::SmallPtrSet { -public: - static const ValueTypeSet &getEmptySet(); - using SmallPtrSet::SmallPtrSet; -}; - -/// Represents an evaluation scope (i.e. a "countour" in the literature) that -/// tracks type variables, IR associations and constraints. -class Environment { -public: - using ValueTypeNodeMap = llvm::DenseMap; - - Environment(Context &context); - - Context &getContext() { return context; } - ConstraintSet &getConstraints() { return constraints; } - TypeVarSet &getTypeVars() { return typeVars; } - - /// Maps an IR value to a CPA type by applying an IR Type -> CPA Type - /// transfer function if not already mapped. - TypeNode *mapValueToType(Value value); - - /// The current mapping of IR Value to TypeNode. - const ValueTypeNodeMap &getValueTypeMap() { return valueTypeMap; } - -private: - Context &context; - ConstraintSet constraints; - TypeVarSet typeVars; - ValueTypeNodeMap valueTypeMap; -}; - -/// Manages instances and containers needed for the lifetime of a CPA -/// analysis. -class Context { -public: - /// Hook for customizing default IR Type -> TypeNode conversion. - /// This is run if more specific conversions fail. - using IrTypeMapHook = - std::function; - - Context(IrTypeMapHook irTypeMapHook = nullptr); - - /// Gets the current environment (roughly call scope). - Environment &getCurrentEnvironment() { return *currentEnvironment; } - - /// Maps an IR Type to a CPA TypeNode. - /// This is currently not overridable but a hook may need to be provided - /// eventually. - TypeNode *mapIrType(::mlir::Type irType); - - // Create a new (non-uniqued) type var. These are not uniqued because by - // construction, we only ever ask for new type variables when needed. - TypeVar *newTypeVar() { - TypeVar *tv = allocator.Allocate(1); - new (tv) TypeVar(++typeVarCounter); - currentEnvironment->getTypeVars().insert(tv); - return tv; - } - - /// Gets a uniqued Identifier for the given value. - Identifier *getIdentifier(StringRef value) { - auto it = identifierMap.find(value); - if (it != identifierMap.end()) - return it->second; - auto *chars = allocator.Allocate(value.size()); - std::memcpy(chars, value.data(), value.size()); - StringRef uniquedValue(chars, value.size()); - Identifier *id = allocator.Allocate(1); - new (id) Identifier(uniquedValue); - identifierMap[uniquedValue] = id; - return id; - } - - /// Gets a uniqued IRValueType for the IR Type. - IRValueType *getIRValueType(Type irType) { - return getUniquedTypeNode(irType); - } - - /// Creates a new ObjectValueType. - /// Object value types are not uniqued. - ObjectValueType * - newObjectValueType(ObjectValueType::IrTypeConstructor irCtor, - Identifier *typeIdentifier, - llvm::ArrayRef fieldIdentifiers, - llvm::ArrayRef fieldTypes) { - assert(fieldIdentifiers.size() == fieldTypes.size()); - size_t n = fieldIdentifiers.size(); - - Identifier **allocFieldIdentifiers = allocator.Allocate(n); - std::copy_n(fieldIdentifiers.begin(), n, allocFieldIdentifiers); - TypeNode **allocFieldTypes = allocator.Allocate(n); - std::copy_n(fieldTypes.begin(), n, allocFieldTypes); - auto *ovt = allocator.Allocate(1); - new (ovt) ObjectValueType(irCtor, typeIdentifier, n, allocFieldIdentifiers, - allocFieldTypes); - return ovt; - } - - /// Gets a CastType. - CastType *getCastType(Identifier *typeIdentifier, TypeVar *typeVar) { - return getUniquedTypeNode(typeIdentifier, typeVar); - } - - /// Gets a ReadType. - ReadType *getReadType(TypeNode *type) { - return getUniquedTypeNode(type); - } - - /// Gets a WriteType. - WriteType *getWriteType(TypeNode *type) { - return getUniquedTypeNode(type); - } - - /// Creates a Constraint. - Constraint *getConstraint(TypeNode *t1, TypeNode *t2) { - // Lookup based on a stack allocated key. - Constraint v(t1, t2); - auto it = constraintUniquer.insert(&v); - if (!it.second) - return *it.first; - - auto *av = allocator.Allocate(1); - new (av) Constraint(v); // Copy ctor - *it.first = av; // Replace key pointer with durable allocation. - addConstraintToGraph(av); - currentEnvironment->getConstraints().insert(av); - return av; - } - - /// Creates a new ConstraintSet. - ConstraintSet *newConstraintSet() { - auto *cs = allocator.Allocate(1); - new (cs) ConstraintSet(); - return cs; - } - - /// Creates a new TypeVarSet. - TypeVarSet *newTypeVarSet() { - auto *tvs = allocator.Allocate(1); - new (tvs) TypeVarSet(); - return tvs; - } - - /// Gets a reference to the current members. - /// This is the actual backing set. Any modification to the graph can - /// invalidate iterators. - const ValueTypeSet &getMembers(TypeNode *node) { - return typeNodeMembers[node]; - } - -private: - /// Generically creates a uniquable TypeNode subclass. - template - ConcreteTy *getUniquedTypeNode(Args &&... args) { - // Lookup based on stack allocated key. - ConcreteTy v(std::forward(args)...); - auto it = typeUniquer.insert(&v); - if (!it.second) { - return static_cast(*it.first); - } - - auto *av = allocator.Allocate(1); - new (av) ConcreteTy(v); // Copy ctor - *it.first = av; // Replace key pointer with durable allocation. - return av; - } - - /// Adds a constraint to the graph structure. - void addConstraintToGraph(Constraint *c); - - /// Propagates any pending constraints. - void propagateConstraints(); - - // Configuration. - IrTypeMapHook irTypeMapHook; - - // Allocation/uniquing management. - llvm::BumpPtrAllocator allocator; - llvm::DenseMap identifierMap; - llvm::DenseSet typeUniquer; - llvm::DenseSet constraintUniquer; - int typeVarCounter = 0; - - // Graph management. - llvm::DenseMap fwdNodeToConstraintMap; - llvm::DenseMap fwdConstraintToNodeMap; - llvm::DenseMap bakNodeToConstraintMap; - // Note that we track contents for all TypeNodes, not just vars, as this - // can be used to determine illegal dataflows. - llvm::DenseMap typeNodeMembers; - - // Propagation worklist. - /// Constraints that are pending propagation. - ConstraintSet pendingConstraints; - ConstraintSet pendingConstraintWorklist; - - // Environment management. - std::vector> environmentStack; - Environment *currentEnvironment; -}; - -inline bool TypeNode::operator==(const TypeNode &that) const { - if (getKind() != that.getKind()) - return false; - switch (getKind()) { - case Kind::TypeVar: { - auto &thisCast = static_cast(*this); - auto &thatCast = static_cast(that); - return thisCast.getOrdinal() == thatCast.getOrdinal(); - } - case Kind::CastType: { - auto &thisCast = static_cast(*this); - auto &thatCast = static_cast(that); - return thisCast.getTypeIdentifier() == thatCast.getTypeIdentifier() && - thisCast.getTypeVar() == thatCast.getTypeVar(); - } - case Kind::ReadType: { - auto &thisCast = static_cast(*this); - auto &thatCast = static_cast(that); - return thisCast.getType() == thatCast.getType(); - } - case Kind::WriteType: { - auto &thisCast = static_cast(*this); - auto &thatCast = static_cast(that); - return thisCast.getType() == thatCast.getType(); - } - case Kind::IRValueType: { - auto &thisCast = static_cast(*this); - auto &thatCast = static_cast(that); - return thisCast.getIrType() == thatCast.getIrType(); - } - case Kind::ObjectValueType: - llvm_unreachable("ObjectValueType not implemented"); - default: - llvm_unreachable("unhandled TypeNode subclass"); - } - return false; -} - -} // namespace CPA -} // namespace Typing -} // namespace NPCOMP -} // namespace mlir - -#endif // NPCOMP_TYPING_CPA_SUPPORT_H diff --git a/include/npcomp/Typing/CMakeLists.txt b/include/npcomp/Typing/CMakeLists.txt deleted file mode 100644 index 3c6e75035..000000000 --- a/include/npcomp/Typing/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(Analysis) -add_subdirectory(Transforms) diff --git a/include/npcomp/Typing/Support/CPAIrHelpers.h b/include/npcomp/Typing/Support/CPAIrHelpers.h deleted file mode 100644 index 6dd2ac509..000000000 --- a/include/npcomp/Typing/Support/CPAIrHelpers.h +++ /dev/null @@ -1,47 +0,0 @@ -//===- IrHelpers.h - Helpers for bridging analysis and IR types -----------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_TYPING_SUPPORT_CPA_IR_HELPERS_H -#define NPCOMP_TYPING_SUPPORT_CPA_IR_HELPERS_H - -#include "mlir/IR/BuiltinTypes.h" -#include "npcomp/Typing/Analysis/CPA/Types.h" - -namespace mlir { -namespace NPCOMP { -namespace Typing { -namespace CPA { - -/// Creates an array object type with a possibly unknown element type. -/// By convention, arrays have a single type slot for the element type -/// named 'e'. -ObjectValueType *newArrayType(Context &context, - ObjectValueType::IrTypeConstructor irCtor, - Identifier *typeIdentifier, - llvm::Optional elementType); - -/// Gets the TypeNode associated with the element type for an array allocated -/// via newArrayType. -TypeNode *getArrayElementType(ObjectValueType *arrayType); - -/// Creates an ObjectValueType for the given TensorType. The result will -/// reconstruct the original TensorType's structure but with the resolved -/// element type. -ObjectValueType *createTensorLikeArrayType(Context &context, - TensorType tensorType); - -/// Creates a default IR type map hook which supports built-in MLIR types -/// that do not implement the analysis interfaces. -Context::IrTypeMapHook createDefaultTypeMapHook(); - -} // namespace CPA -} // namespace Typing -} // namespace NPCOMP -} // namespace mlir - -#endif // NPCOMP_TYPING_SUPPORT_CPA_IR_HELPERS_H diff --git a/include/npcomp/Typing/Transforms/CMakeLists.txt b/include/npcomp/Typing/Transforms/CMakeLists.txt deleted file mode 100644 index b50381efb..000000000 --- a/include/npcomp/Typing/Transforms/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -set(LLVM_TARGET_DEFINITIONS Passes.td) -mlir_tablegen(Passes.h.inc -gen-pass-decls) -add_public_tablegen_target(NPCOMPTypingTransformsPassIncGen) - -#add_mlir_doc(Passes Transforms ./ -gen-pass-doc) diff --git a/include/npcomp/Typing/Transforms/Passes.h b/include/npcomp/Typing/Transforms/Passes.h deleted file mode 100644 index 61b38cbf1..000000000 --- a/include/npcomp/Typing/Transforms/Passes.h +++ /dev/null @@ -1,30 +0,0 @@ -//===------------------------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_TYPING_TRANSFORMS_PASSES_H -#define NPCOMP_TYPING_TRANSFORMS_PASSES_H - -#include "mlir/Pass/Pass.h" - -#include - -namespace mlir { -namespace NPCOMP { -namespace Typing { - -std::unique_ptr> createCPAFunctionTypeInferencePass(); - -} // namespace Typing - -/// Registers all typing passes. -void registerTypingPasses(); - -} // namespace NPCOMP -} // namespace mlir - -#endif // NPCOMP_TYPING_TRANSFORMS_PASSES_H diff --git a/include/npcomp/Typing/Transforms/Passes.td b/include/npcomp/Typing/Transforms/Passes.td deleted file mode 100644 index d90a4be76..000000000 --- a/include/npcomp/Typing/Transforms/Passes.td +++ /dev/null @@ -1,23 +0,0 @@ -//===-- Passes.td - Pass definition file -------------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_TYPING_TRANSFORMS_PASSES -#define NPCOMP_TYPING_TRANSFORMS_PASSES - -include "mlir/Pass/PassBase.td" - -//===----------------------------------------------------------------------===// -// TypeInference -//===----------------------------------------------------------------------===// - -def CPAFunctionTypeInference : Pass<"npcomp-cpa-type-inference", "FuncOp"> { - let summary = "Performs CPA function level type inference"; - let constructor = "mlir::NPCOMP::Typing::createCPAFunctionTypeInferencePass()"; -} - -#endif // NPCOMP_TYPING_TRANSFORMS_PASSES diff --git a/lib/CAPI/BasicpyTypes.cpp b/lib/CAPI/BasicpyTypes.cpp deleted file mode 100644 index cdadcfc75..000000000 --- a/lib/CAPI/BasicpyTypes.cpp +++ /dev/null @@ -1,107 +0,0 @@ -//===- BasicpyTypes.cpp - C Interface for basicpy types -------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp-c/BasicpyTypes.h" - -#include "mlir/CAPI/IR.h" -#include "mlir/CAPI/Support.h" -#include "mlir/IR/BuiltinTypes.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" - -using namespace mlir; -using namespace mlir::NPCOMP; - -//===----------------------------------------------------------------------===// -// Bool type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsABasicpyBool(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompBasicpyBoolTypeGet(MlirContext context) { - return wrap(Basicpy::BoolType::get(unwrap(context))); -} - -//===----------------------------------------------------------------------===// -// Bytes type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsABasicpyBytes(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompBasicpyBytesTypeGet(MlirContext context) { - return wrap(Basicpy::BytesType::get(unwrap(context))); -} - -//===----------------------------------------------------------------------===// -// Dict type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsABasicpyDict(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompBasicpyDictTypeGet(MlirContext context) { - return wrap(Basicpy::DictType::get(unwrap(context))); -} - -//===----------------------------------------------------------------------===// -// List type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsABasicpyList(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompBasicpyListTypeGet(MlirContext context) { - return wrap(Basicpy::ListType::get(unwrap(context))); -} - -//===----------------------------------------------------------------------===// -// !basicpy.NoneType type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsANone(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompBasicpyNoneTypeGet(MlirContext context) { - return wrap(Basicpy::NoneType::get(unwrap(context))); -} - -//===----------------------------------------------------------------------===// -// SlotObject type. -//===----------------------------------------------------------------------===// - -MlirType npcompBasicPySlotObjectTypeGet(MlirContext context, - MlirStringRef className, - intptr_t slotTypeCount, - const MlirType *slotTypes) { - MLIRContext *cppContext = unwrap(context); - auto classNameAttr = StringAttr::get(cppContext, unwrap(className)); - SmallVector slotTypesCpp; - slotTypesCpp.resize(slotTypeCount); - for (intptr_t i = 0; i < slotTypeCount; ++i) { - slotTypesCpp[i] = unwrap(slotTypes[i]); - } - return wrap(Basicpy::SlotObjectType::get(classNameAttr, slotTypesCpp)); -} - -//===----------------------------------------------------------------------===// -// Tuple type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsABasicpyTuple(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompBasicpyTupleTypeGet(MlirContext context) { - return wrap(Basicpy::TupleType::get(unwrap(context))); -} diff --git a/lib/CAPI/CMakeLists.txt b/lib/CAPI/CMakeLists.txt index d4a017a3e..2705d71e5 100644 --- a/lib/CAPI/CMakeLists.txt +++ b/lib/CAPI/CMakeLists.txt @@ -8,16 +8,12 @@ add_npcomp_library(NPCOMPCAPI InitLLVM.cpp RefJITBackend.cpp Registration.cpp - BasicpyTypes.cpp - NumpyTypes.cpp LINK_LIBS PUBLIC MLIRExecutionEngine MLIRLLVMIR MLIRTargetLLVMIRExport NPCOMPInitAll - NPCOMPBasicpyDialect - NPCOMPNumpyDialect NPCOMPRefBackendJITHelpers NPCOMPRuntime TorchMLIRTorchDialect diff --git a/lib/CAPI/NumpyTypes.cpp b/lib/CAPI/NumpyTypes.cpp deleted file mode 100644 index a5b811dca..000000000 --- a/lib/CAPI/NumpyTypes.cpp +++ /dev/null @@ -1,56 +0,0 @@ -//===- NumpyTypes.cpp - C Interface for numpy types -----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp-c/NumpyTypes.h" - -#include "mlir/CAPI/IR.h" -#include "mlir/CAPI/Support.h" -#include "mlir/IR/BuiltinTypes.h" -#include "npcomp/Dialect/Numpy/IR/NumpyDialect.h" - -using namespace mlir; -using namespace mlir::NPCOMP; - -//===----------------------------------------------------------------------===// -// !numpy.any_dtype type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsANumpyAnyDtype(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompAnyDtypeTypeGet(MlirContext context) { - return wrap(Numpy::AnyDtypeType::get(unwrap(context))); -} - -//===----------------------------------------------------------------------===// -// NDArray type. -//===----------------------------------------------------------------------===// - -bool npcompTypeIsANumpyNdArray(MlirType t) { - return unwrap(t).isa(); -} - -MlirType npcompNumpyNdArrayTypeGetUnranked(MlirType elementType) { - return wrap(Numpy::NdArrayType::get(unwrap(elementType))); -} - -MlirType npcompNumpyNdArrayTypeGetRanked(intptr_t rank, const int64_t *shape, - MlirType elementType) { - llvm::ArrayRef shapeArray(shape, rank); - return wrap(Numpy::NdArrayType::get(unwrap(elementType), shapeArray)); -} - -MlirType npcompNumpyNdArrayTypeGetFromShaped(MlirType shapedType) { - return wrap(Numpy::NdArrayType::getFromShapedType( - unwrap(shapedType).cast())); -} - -MlirType npcompNumpyNdArrayTypeToTensor(MlirType ndarrayType) { - return wrap(unwrap(ndarrayType).cast().toTensorType()); -} diff --git a/lib/CAPI/Registration.cpp b/lib/CAPI/Registration.cpp index a48941895..4f5ff89ad 100644 --- a/lib/CAPI/Registration.cpp +++ b/lib/CAPI/Registration.cpp @@ -18,6 +18,7 @@ void npcompRegisterAllDialects(MlirContext context) { mlir::DialectRegistry registry; mlir::NPCOMP::registerAllDialects(registry); + mlir::torch::registerAllDialects(registry); unwrap(context)->appendDialectRegistry(registry); // TODO: Don't eagerly load once D88162 is in and clients can do this. unwrap(context)->loadAllAvailableDialects(); diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 43fba90eb..5a0d03ad4 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -4,7 +4,6 @@ add_subdirectory(Conversion) add_subdirectory(Dialect) add_subdirectory(Interfaces) add_subdirectory(RefBackend) -add_subdirectory(Typing) ################################################################################ # Setup the initialization target. @@ -34,12 +33,7 @@ add_npcomp_library(NPCOMPInitAll TorchMLIRTorchDialect NPCOMPTorchConversionDialect NPCOMPRefbackrtDialect - NPCOMPBasicpyDialect - NPCOMPBasicpyPasses NPCOMPConversionPasses - NPCOMPNumpyDialect - NPCOMPNumpyPasses - NPCOMPTypingPasses IREEDialectsIREEDialect # TODO: We shouldn't need npcomp_conversion_libs here, but we have diff --git a/lib/Conversion/BasicpyToStd/CMakeLists.txt b/lib/Conversion/BasicpyToStd/CMakeLists.txt deleted file mode 100644 index 8ee00d252..000000000 --- a/lib/Conversion/BasicpyToStd/CMakeLists.txt +++ /dev/null @@ -1,16 +0,0 @@ -add_npcomp_conversion_library(NPCOMPBasicpyToSTD - Passes.cpp - PrimitiveOpsConversion.cpp - - DEPENDS - NPCOMPConversionPassIncGen - - LINK_COMPONENTS - Core - - LINK_LIBS PUBLIC - MLIRIR - MLIRPass - MLIRTransforms - NPCOMPBasicpyDialect -) diff --git a/lib/Conversion/BasicpyToStd/Passes.cpp b/lib/Conversion/BasicpyToStd/Passes.cpp deleted file mode 100644 index 4e8ac3527..000000000 --- a/lib/Conversion/BasicpyToStd/Passes.cpp +++ /dev/null @@ -1,42 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Conversion/BasicpyToStd/Passes.h" -#include "npcomp/Conversion/BasicpyToStd/Patterns.h" - -#include "../PassDetail.h" -#include "mlir/Dialect/Traits.h" -#include "mlir/Transforms/GreedyPatternRewriteDriver.h" - -using namespace mlir; -using namespace mlir::NPCOMP; - -namespace { - -class ConvertBasicpyToStd - : public ConvertBasicpyToStdBase { -public: - void runOnOperation() override { - FuncOp func = getOperation(); - (void)applyPatternsAndFoldGreedily(func, getPatterns()); - } - - FrozenRewritePatternSet getPatterns() { - auto *context = &getContext(); - RewritePatternSet patterns(context); - populateBasicpyToStdPrimitiveOpPatterns(patterns); - return std::move(patterns); - } -}; - -} // namespace - -std::unique_ptr> -mlir::NPCOMP::createConvertBasicpyToStdPass() { - return std::make_unique(); -} diff --git a/lib/Conversion/BasicpyToStd/PrimitiveOpsConversion.cpp b/lib/Conversion/BasicpyToStd/PrimitiveOpsConversion.cpp deleted file mode 100644 index 0cbd2715d..000000000 --- a/lib/Conversion/BasicpyToStd/PrimitiveOpsConversion.cpp +++ /dev/null @@ -1,250 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "mlir/IR/PatternMatch.h" -#include "npcomp/Conversion/BasicpyToStd/Patterns.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h" - -using namespace mlir; -using namespace mlir::NPCOMP; - -namespace { - -bool isLegalBinaryOpType(Type type) { - if (type.isIntOrFloat()) { - return type.getIntOrFloatBitWidth() > 1; // Do not match i1 - } - - return false; -} - -// Convert to std ops when all types match. It is assumed that additional -// patterns and type inference are used to get into this form. -class NumericBinaryExpr : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(Basicpy::BinaryExprOp op, - PatternRewriter &rewriter) const override { - // Match failure unless if both: - // a) operands/results are the same type - // b) matches a set of supported primitive types - // c) the operation maps to a simple std op without further massaging - auto valueType = op.left().getType(); - if (valueType != op.right().getType() || valueType != op.result().getType()) - return failure(); - if (!isLegalBinaryOpType(valueType)) - return failure(); - - auto operation = Basicpy::symbolizeBinaryOperation(op.operation()); - if (!operation) - return failure(); - auto left = op.left(); - auto right = op.right(); - - // Generally, int and float ops in std are different. - using Basicpy::BinaryOperation; - if (valueType.isa()) { - // Note that not all operations make sense or are defined for integer - // math. Of specific note is the Div vs FloorDiv distinction. - switch (*operation) { - case BinaryOperation::Add: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::BitAnd: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::BitOr: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::BitXor: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::FloorDiv: - // TODO: This is not a precise match for negative division. - // SignedDivIOp rounds towards zero and python rounds towards - // most negative. - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::LShift: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::Mod: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::Mult: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::RShift: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::Sub: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - default: - return failure(); - } - } else if (valueType.isa()) { - // Note that most operations are not supported on floating point values. - // In addition, some cannot be directly implemented with single std - // ops. - switch (*operation) { - case BinaryOperation::Add: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::Div: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::FloorDiv: - // TODO: Implement floating point floor division. - return rewriter.notifyMatchFailure( - op, "floating point floor division not implemented"); - case BinaryOperation::Mod: - // TODO: Implement floating point mod. - return rewriter.notifyMatchFailure( - op, "floating point mod not implemented"); - case BinaryOperation::Mult: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - case BinaryOperation::Sub: - rewriter.replaceOpWithNewOp(op, left, right); - return success(); - default: - return failure(); - } - } - - return failure(); - } -}; - -Optional -mapBasicpyPredicateToCmpI(Basicpy::CompareOperation predicate) { - using Basicpy::CompareOperation; - switch (predicate) { - case CompareOperation::Eq: - return CmpIPredicate::eq; - case CompareOperation::Gt: - return CmpIPredicate::sgt; - case CompareOperation::GtE: - return CmpIPredicate::sge; - case CompareOperation::Is: - return CmpIPredicate::eq; - case CompareOperation::IsNot: - return CmpIPredicate::ne; - case CompareOperation::Lt: - return CmpIPredicate::slt; - case CompareOperation::LtE: - return CmpIPredicate::sle; - case CompareOperation::NotEq: - return CmpIPredicate::ne; - default: - return llvm::None; - } -} - -Optional -mapBasicpyPredicateToCmpF(Basicpy::CompareOperation predicate) { - using Basicpy::CompareOperation; - switch (predicate) { - case CompareOperation::Eq: - return CmpFPredicate::OEQ; - case CompareOperation::Gt: - return CmpFPredicate::OGT; - case CompareOperation::GtE: - return CmpFPredicate::OGE; - case CompareOperation::Is: - return CmpFPredicate::OEQ; - case CompareOperation::IsNot: - return CmpFPredicate::ONE; - case CompareOperation::Lt: - return CmpFPredicate::OLT; - case CompareOperation::LtE: - return CmpFPredicate::OLE; - case CompareOperation::NotEq: - return CmpFPredicate::ONE; - default: - return llvm::None; - } -} - -class NumericCompare : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(Basicpy::BinaryCompareOp op, - PatternRewriter &rewriter) const override { - auto loc = op.getLoc(); - auto valueType = op.left().getType(); - if (valueType != op.right().getType()) - return failure(); - if (!isLegalBinaryOpType(valueType)) - return failure(); - auto bpyPredicate = Basicpy::symbolizeCompareOperation(op.operation()); - if (!bpyPredicate) - return failure(); - - if (valueType.isa()) { - if (auto stdPredicate = mapBasicpyPredicateToCmpI(*bpyPredicate)) { - auto cmp = - rewriter.create(loc, *stdPredicate, op.left(), op.right()); - rewriter.replaceOpWithNewOp( - op, Basicpy::BoolType::get(rewriter.getContext()), cmp); - return success(); - } else { - return rewriter.notifyMatchFailure(op, "unsupported compare operation"); - } - } else if (valueType.isa()) { - if (auto stdPredicate = mapBasicpyPredicateToCmpF(*bpyPredicate)) { - auto cmp = - rewriter.create(loc, *stdPredicate, op.left(), op.right()); - rewriter.replaceOpWithNewOp( - op, Basicpy::BoolType::get(rewriter.getContext()), cmp); - return success(); - } else { - return rewriter.notifyMatchFailure(op, "unsupported compare operation"); - } - } - - return failure(); - } -}; - -// Converts the as_i1 op for numeric types. -class NumericToI1 : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(Basicpy::AsI1Op op, - PatternRewriter &rewriter) const override { - auto loc = op.getLoc(); - auto operandType = op.operand().getType(); - if (operandType.isa()) { - auto zero = rewriter.create(loc, 0, operandType); - rewriter.replaceOpWithNewOp(op, CmpIPredicate::ne, op.operand(), - zero); - return success(); - } else if (operandType.isa()) { - auto zero = rewriter.create(loc, operandType, - FloatAttr::get(operandType, 0.0)); - rewriter.replaceOpWithNewOp(op, CmpFPredicate::ONE, op.operand(), - zero); - return success(); - } - return failure(); - } -}; - -} // namespace - -void mlir::NPCOMP::populateBasicpyToStdPrimitiveOpPatterns( - RewritePatternSet &patterns) { - MLIRContext *context = patterns.getContext(); - patterns.add(context); - patterns.add(context); - patterns.add(context); -} diff --git a/lib/Conversion/CMakeLists.txt b/lib/Conversion/CMakeLists.txt index 83ed81724..67394d317 100644 --- a/lib/Conversion/CMakeLists.txt +++ b/lib/Conversion/CMakeLists.txt @@ -2,7 +2,6 @@ add_subdirectory(TorchToIREE) add_subdirectory(TorchToLinalg) add_subdirectory(TorchToSCF) add_subdirectory(TorchToStd) -add_subdirectory(BasicpyToStd) get_property(npcomp_conversion_libs GLOBAL PROPERTY NPCOMP_CONVERSION_LIBS) diff --git a/lib/Conversion/Passes.cpp b/lib/Conversion/Passes.cpp index 245416862..37ba5aae9 100644 --- a/lib/Conversion/Passes.cpp +++ b/lib/Conversion/Passes.cpp @@ -8,7 +8,6 @@ #include "npcomp/Conversion/Passes.h" -#include "npcomp/Conversion/BasicpyToStd/Passes.h" #include "npcomp/Conversion/TorchToIREE/TorchToIREE.h" #include "npcomp/Conversion/TorchToLinalg/TorchToLinalg.h" #include "npcomp/Conversion/TorchToSCF/TorchToSCF.h" diff --git a/lib/Dialect/Basicpy/CMakeLists.txt b/lib/Dialect/Basicpy/CMakeLists.txt deleted file mode 100644 index 9f57627c3..000000000 --- a/lib/Dialect/Basicpy/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(IR) -add_subdirectory(Transforms) diff --git a/lib/Dialect/Basicpy/IR/BasicpyDialect.cpp b/lib/Dialect/Basicpy/IR/BasicpyDialect.cpp deleted file mode 100644 index 227a9e22e..000000000 --- a/lib/Dialect/Basicpy/IR/BasicpyDialect.cpp +++ /dev/null @@ -1,217 +0,0 @@ -//===- BasicpyDialect.cpp - Basic python dialect ----------------*- C++ -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "mlir/IR/DialectImplementation.h" -#include "mlir/Transforms/InliningUtils.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h" -#include "llvm/ADT/TypeSwitch.h" - -using namespace mlir; -using namespace mlir::NPCOMP; -using namespace mlir::NPCOMP::Basicpy; - -#include "npcomp/Dialect/Basicpy/IR/BasicpyOpsDialect.cpp.inc" - -//===----------------------------------------------------------------------===// -// Dialect Interfaces -//===----------------------------------------------------------------------===// - -namespace { -struct BasicpyInlinerInterface : public DialectInlinerInterface { - using DialectInlinerInterface::DialectInlinerInterface; - bool isLegalToInline(Region *dest, Region *src, bool wouldBeCloned, - BlockAndValueMapping &valueMapping) const final { - return true; - } - bool isLegalToInline(Operation *, Region *, bool wouldBeCloned, - BlockAndValueMapping &) const final { - return true; - } -}; -} // end anonymous namespace - -//===----------------------------------------------------------------------===// -// Dialect Class -//===----------------------------------------------------------------------===// - -void BasicpyDialect::initialize() { - addOperations< -#define GET_OP_LIST -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.cpp.inc" - >(); - addTypes(); - addInterfaces(); - getContext()->getOrLoadDialect(); - - // TODO: Make real ops for everything we need. - allowUnknownOperations(); -} - -Operation *BasicpyDialect::materializeConstant(OpBuilder &builder, - Attribute value, Type type, - Location loc) { - // std.constant is used for literal i1 types (not !basicpy.BoolType). - if (auto integerType = type.dyn_cast()) { - if (integerType.getWidth() == 1) - return builder.create(loc, value); - } - // NumericConstantOp. - // Supports IntegerType (any signedness), FloatType and ComplexType. - if (type.isa() || type.isa() || - type.isa()) - return builder.create(loc, type, value); - - // Bool (i1 -> !basicpy.BoolType). - if (type.isa()) { - auto i1Value = value.dyn_cast(); - if (i1Value && i1Value.getType().getIntOrFloatBitWidth() == 1) - return builder.create(loc, type, i1Value); - } - - // Bytes. - if (type.isa()) { - if (auto strValue = value.dyn_cast()) - return builder.create(loc, type, strValue); - } - - // Str. - if (type.isa()) { - if (auto strValue = value.dyn_cast()) - return builder.create(loc, type, strValue); - } - - if (auto typeAttr = value.dyn_cast()) - return builder.create(loc, typeAttr.getValue()); - - return nullptr; -} - -Type BasicpyDialect::parseType(DialectAsmParser &parser) const { - StringRef keyword; - if (parser.parseKeyword(&keyword)) - return Type(); - - if (keyword == "BoolType") - return BoolType::get(getContext()); - if (keyword == "BytesType") - return BytesType::get(getContext()); - if (keyword == "DictType") - return DictType::get(getContext()); - if (keyword == "EllipsisType") - return EllipsisType::get(getContext()); - if (keyword == "ListType") - return ListType::get(getContext()); - if (keyword == "NoneType") - return NoneType::get(getContext()); - if (keyword == "SlotObject") { - StringRef className; - if (parser.parseLess() || parser.parseKeyword(&className)) { - return Type(); - } - - llvm::SmallVector slotTypes; - while (succeeded(parser.parseOptionalComma())) { - Type slotType; - if (parser.parseType(slotType)) - return Type(); - slotTypes.push_back(slotType); - } - if (parser.parseGreater()) - return Type(); - return SlotObjectType::get(StringAttr::get(getContext(), className), - slotTypes); - } - if (keyword == "StrType") - return StrType::get(getContext()); - if (keyword == "TupleType") - return TupleType::get(getContext()); - if (keyword == "UnknownType") - return UnknownType::get(getContext()); - - parser.emitError(parser.getNameLoc(), "unknown basicpy type"); - return Type(); -} - -void BasicpyDialect::printType(Type type, DialectAsmPrinter &os) const { - TypeSwitch(type) - .Case([&](Type) { os << "BoolType"; }) - .Case([&](Type) { os << "BytesType"; }) - .Case([&](Type) { os << "DictType"; }) - .Case([&](Type) { os << "EllipsisType"; }) - .Case([&](Type) { os << "ListType"; }) - .Case([&](Type) { os << "NoneType"; }) - .Case([&](SlotObjectType slotObject) { - auto slotTypes = slotObject.getSlotTypes(); - os << "SlotObject<" << slotObject.getClassName().getValue(); - if (!slotTypes.empty()) { - os << ", "; - llvm::interleaveComma(slotTypes, os, - [&](Type t) { os.printType(t); }); - } - os << ">"; - }) - .Case([&](Type) { os << "StrType"; }) - .Case([&](Type) { os << "TupleType"; }) - .Case([&](Type) { os << "UnknownType"; }) - .Default( - [&](Type) { llvm_unreachable("unexpected 'basicpy' type kind"); }); -} - -//----------------------------------------------------------------------------// -// Type and attribute detail -//----------------------------------------------------------------------------// -namespace mlir { -namespace NPCOMP { -namespace Basicpy { -namespace detail { - -struct SlotObjectTypeStorage : public TypeStorage { - using KeyTy = std::pair>; - SlotObjectTypeStorage(StringAttr className, ArrayRef slotTypes) - : className(className), slotTypes(slotTypes) {} - bool operator==(const KeyTy &other) const { - return className == other.first && slotTypes == other.second; - } - static llvm::hash_code hashKey(const KeyTy &key) { - return llvm::hash_combine(key.first, key.second); - } - static SlotObjectTypeStorage *construct(TypeStorageAllocator &allocator, - const KeyTy &key) { - ArrayRef slotTypes = allocator.copyInto(key.second); - return new (allocator.allocate()) - SlotObjectTypeStorage(key.first, slotTypes); - } - - StringAttr className; - ArrayRef slotTypes; -}; -} // namespace detail -} // namespace Basicpy -} // namespace NPCOMP -} // namespace mlir - -StringAttr SlotObjectType::getClassName() { return getImpl()->className; } -ArrayRef SlotObjectType::getSlotTypes() { return getImpl()->slotTypes; } -unsigned SlotObjectType::getSlotCount() { return getImpl()->slotTypes.size(); } - -SlotObjectType SlotObjectType::get(StringAttr className, - ArrayRef slotTypes) { - return Base::get(className.getContext(), className, slotTypes); -} - -//----------------------------------------------------------------------------// -// CPA Interface Implementations -//----------------------------------------------------------------------------// - -Typing::CPA::TypeNode * -UnknownType::mapToCPAType(Typing::CPA::Context &context) { - return context.newTypeVar(); -} diff --git a/lib/Dialect/Basicpy/IR/BasicpyOps.cpp b/lib/Dialect/Basicpy/IR/BasicpyOps.cpp deleted file mode 100644 index 1417e7c0d..000000000 --- a/lib/Dialect/Basicpy/IR/BasicpyOps.cpp +++ /dev/null @@ -1,416 +0,0 @@ -//===- BasicpyOps.cpp - Core numpy dialect ops -------------------*- C++-*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h" -#include "mlir/IR/Builders.h" -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/FunctionImplementation.h" -#include "mlir/IR/OpImplementation.h" -#include "mlir/IR/PatternMatch.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyOpsEnums.cpp.inc" - -using namespace mlir; -using namespace mlir::NPCOMP::Basicpy; - -// Fallback verifier for ops that don't have a dedicated one. -template static LogicalResult verify(T op) { return success(); } - -//===----------------------------------------------------------------------===// -// BoolCastOp -//===----------------------------------------------------------------------===// - -OpFoldResult BoolCastOp::fold(ArrayRef operands) { - return operands[0]; -} - -//===----------------------------------------------------------------------===// -// BoolConstantOp -//===----------------------------------------------------------------------===// - -OpFoldResult BoolConstantOp::fold(ArrayRef operands) { - return valueAttr(); -} - -void BoolConstantOp::getAsmResultNames( - function_ref setNameFn) { - if (value()) - setNameFn(getResult(), "bool_true"); - else - setNameFn(getResult(), "bool_false"); -} - -//===----------------------------------------------------------------------===// -// BytesConstantOp -//===----------------------------------------------------------------------===// - -OpFoldResult BytesConstantOp::fold(ArrayRef operands) { - return valueAttr(); -} - -void BytesConstantOp::getAsmResultNames( - function_ref setNameFn) { - setNameFn(getResult(), "bytes"); -} - -//===----------------------------------------------------------------------===// -// NumericConstantOp -//===----------------------------------------------------------------------===// - -static ParseResult parseNumericConstantOp(OpAsmParser &parser, - OperationState *result) { - Attribute valueAttr; - if (parser.parseOptionalAttrDict(result->attributes) || - parser.parseAttribute(valueAttr, "value", result->attributes)) - return failure(); - - // If not an Integer or Float attr (which carry the type in the attr), - // expect a trailing type. - Type type; - if (valueAttr.isa() || valueAttr.isa()) - type = valueAttr.getType(); - else if (parser.parseColonType(type)) - return failure(); - return parser.addTypeToList(type, result->types); -} - -static void print(OpAsmPrinter &p, NumericConstantOp op) { - p << " "; - p.printOptionalAttrDict(op->getAttrs(), /*elidedAttrs=*/{"value"}); - - if (op->getAttrs().size() > 1) - p << ' '; - p << op.value(); - - // If not an Integer or Float attr, expect a trailing type. - if (!op.value().isa() && !op.value().isa()) - p << " : " << op.getType(); -} - -static LogicalResult verify(NumericConstantOp &op) { - auto value = op.value(); - if (!value) - return op.emitOpError("requires a 'value' attribute"); - auto type = op.getType(); - - if (type.isa()) { - if (!value.isa()) - return op.emitOpError("requires 'value' to be a floating point constant"); - return success(); - } - - if (auto intType = type.dyn_cast()) { - if (!value.isa()) - return op.emitOpError("requires 'value' to be an integer constant"); - if (intType.getWidth() == 1) - return op.emitOpError("cannot have an i1 type"); - return success(); - } - - if (type.isa()) { - if (auto complexComps = value.dyn_cast()) { - if (complexComps.size() == 2) { - auto realValue = complexComps[0].dyn_cast(); - auto imagValue = complexComps[1].dyn_cast(); - if (realValue && imagValue && - realValue.getType() == imagValue.getType()) - return success(); - } - } - return op.emitOpError("requires 'value' to be a two element array of " - "floating point complex number components"); - } - - return op.emitOpError("unsupported basicpy.numeric_constant type"); -} - -OpFoldResult NumericConstantOp::fold(ArrayRef operands) { - assert(operands.empty() && "numeric_constant has no operands"); - return value(); -} - -void NumericConstantOp::getAsmResultNames( - function_ref setNameFn) { - Type type = getType(); - if (auto intCst = value().dyn_cast()) { - IntegerType intTy = type.dyn_cast(); - APInt intValue = intCst.getValue(); - - // Otherwise, build a complex name with the value and type. - SmallString<32> specialNameBuffer; - llvm::raw_svector_ostream specialName(specialNameBuffer); - specialName << "num"; - if (intTy.isSigned()) - specialName << intValue.getSExtValue(); - else - specialName << intValue.getZExtValue(); - if (intTy) - specialName << '_' << type; - setNameFn(getResult(), specialName.str()); - } else { - setNameFn(getResult(), "num"); - } -} - -//===----------------------------------------------------------------------===// -// ExecOp -//===----------------------------------------------------------------------===// - -void ExecOp::build(OpBuilder &builder, OperationState &result) { - OpBuilder::InsertionGuard guard(builder); - Region *body = result.addRegion(); - builder.createBlock(body); -} - -static ParseResult parseExecOp(OpAsmParser &parser, OperationState *result) { - Region *bodyRegion = result->addRegion(); - if (parser.parseOptionalAttrDictWithKeyword(result->attributes) || - parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{})) - return failure(); - return success(); -} - -static void print(OpAsmPrinter &p, ExecOp op) { - p.printOptionalAttrDictWithKeyword(op->getAttrs()); - p.printRegion(op.body()); -} - -//===----------------------------------------------------------------------===// -// FuncTemplateCallOp -//===----------------------------------------------------------------------===// - -static LogicalResult verify(FuncTemplateCallOp op) { - auto argNames = op.arg_names(); - if (argNames.size() > op.args().size()) { - return op.emitOpError() << "expected <= kw arg names vs args"; - } - - for (auto it : llvm::enumerate(argNames)) { - auto argName = it.value().cast().getValue(); - if (argName == "*" && it.index() != 0) { - return op.emitOpError() << "positional arg pack must be the first kw arg"; - } - if (argName == "**" && it.index() != argNames.size() - 1) { - return op.emitOpError() << "kw arg pack must be the last kw arg"; - } - } - return success(); -} - -//===----------------------------------------------------------------------===// -// FuncTemplateOp -//===----------------------------------------------------------------------===// - -void FuncTemplateOp::build(OpBuilder &builder, OperationState &result) { - OpBuilder::InsertionGuard guard(builder); - ensureTerminator(*result.addRegion(), builder, result.location); -} - -static ParseResult parseFuncTemplateOp(OpAsmParser &parser, - OperationState *result) { - Region *bodyRegion = result->addRegion(); - StringAttr symbolName; - - if (parser.parseSymbolName(symbolName, SymbolTable::getSymbolAttrName(), - result->attributes) || - parser.parseOptionalAttrDictWithKeyword(result->attributes) || - parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{})) - return failure(); - - FuncTemplateOp::ensureTerminator(*bodyRegion, parser.getBuilder(), - result->location); - - return success(); -} - -static void print(OpAsmPrinter &p, FuncTemplateOp op) { - p << " "; - p.printSymbolName(op.getName()); - p.printOptionalAttrDictWithKeyword(op->getAttrs(), - {SymbolTable::getSymbolAttrName()}); - p.printRegion(op.body()); -} - -static LogicalResult verify(FuncTemplateOp op) { - Block *body = op.getBody(); - for (auto &childOp : body->getOperations()) { - if (!llvm::isa(childOp) && - !llvm::isa(childOp)) { - return childOp.emitOpError() << "illegal operation in func_template"; - } - } - return success(); -} - -//===----------------------------------------------------------------------===// -// SlotObjectMakeOp -//===----------------------------------------------------------------------===// - -static ParseResult parseSlotObjectMakeOp(OpAsmParser &parser, - OperationState *result) { - llvm::SmallVector operandTypes; - if (parser.parseOperandList(operandTypes, OpAsmParser::Delimiter::Paren) || - parser.parseOptionalAttrDict(result->attributes) || - parser.parseArrowTypeList(result->types)) { - return failure(); - } - - if (result->types.size() != 1 || - !result->types.front().isa()) { - return parser.emitError(parser.getNameLoc(), - "custom assembly form requires SlotObject result"); - } - auto slotObjectType = result->types.front().cast(); - result->addAttribute("className", slotObjectType.getClassName()); - return parser.resolveOperands(operandTypes, slotObjectType.getSlotTypes(), - parser.getNameLoc(), result->operands); -} - -static void print(OpAsmPrinter &p, SlotObjectMakeOp op) { - // If the argument types do not match the result type slots, then - // print the generic form. - auto canCustomPrint = ([&]() -> bool { - auto type = op.result().getType().dyn_cast(); - if (!type) - return false; - auto args = op.slots(); - auto slotTypes = type.getSlotTypes(); - if (args.size() != slotTypes.size()) - return false; - for (unsigned i = 0, e = args.size(); i < e; ++i) { - if (args[i].getType() != slotTypes[i]) - return false; - } - return true; - })(); - if (!canCustomPrint) { - p.printGenericOp(op); - return; - } - - p << "("; - p.printOperands(op.slots()); - p << ")"; - p.printOptionalAttrDict(op->getAttrs(), {"className"}); - - // Not really a symbol but satisfies same rules. - p.printArrowTypeList(op.getOperation()->getResultTypes()); -} - -//===----------------------------------------------------------------------===// -// SlotObjectGetOp -//===----------------------------------------------------------------------===// - -static ParseResult parseSlotObjectGetOp(OpAsmParser &parser, - OperationState *result) { - OpAsmParser::OperandType object; - IntegerAttr indexAttr; - Type indexType = parser.getBuilder().getIndexType(); - if (parser.parseOperand(object) || parser.parseLSquare() || - parser.parseAttribute(indexAttr, indexType, "index", - result->attributes) || - parser.parseRSquare()) { - return failure(); - } - Type objectType; - if (parser.parseColonType(objectType) || - parser.resolveOperand(object, objectType, result->operands)) { - return failure(); - } - - auto castObjectType = objectType.dyn_cast(); - if (!castObjectType) { - return parser.emitError(parser.getNameLoc(), - "illegal object type on custom assembly form"); - } - auto index = indexAttr.getValue().getZExtValue(); - auto slotTypes = castObjectType.getSlotTypes(); - if (index >= slotTypes.size()) { - return parser.emitError(parser.getNameLoc(), - "out of bound index on custom assembly form"); - } - result->addTypes({slotTypes[index]}); - return success(); -} - -static void print(OpAsmPrinter &p, SlotObjectGetOp op) { - // If the argument types do not match the result type slots, then - // print the generic form. - auto canCustomPrint = ([&]() -> bool { - auto type = op.object().getType().dyn_cast(); - if (!type) - return false; - auto index = op.index().getZExtValue(); - if (index >= type.getSlotCount()) - return false; - if (op.result().getType() != type.getSlotTypes()[index]) - return false; - return true; - })(); - if (!canCustomPrint) { - p.printGenericOp(op); - return; - } - - p << " "; - p.printOperand(op.object()); - p << "[" << op.index() << "]"; - p.printOptionalAttrDict(op->getAttrs(), {"index"}); - p << " : "; - p.printType(op.object().getType()); -} - -//===----------------------------------------------------------------------===// -// SingletonOp -//===----------------------------------------------------------------------===// - -OpFoldResult SingletonOp::fold(ArrayRef operands) { - auto resultType = getResult().getType(); - return TypeAttr::get(resultType); -} - -//===----------------------------------------------------------------------===// -// StrConstantOp -//===----------------------------------------------------------------------===// - -OpFoldResult StrConstantOp::fold(ArrayRef operands) { - return valueAttr(); -} - -void StrConstantOp::getAsmResultNames( - function_ref setNameFn) { - setNameFn(getResult(), "str"); -} - -//===----------------------------------------------------------------------===// -// UnknownCastOp -//===----------------------------------------------------------------------===// - -namespace { - -class ElideIdentityUnknownCast : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(UnknownCastOp op, - PatternRewriter &rewriter) const override { - if (op.operand().getType() != op.result().getType()) - return failure(); - rewriter.replaceOp(op, op.operand()); - return success(); - } -}; - -} // namespace - -void UnknownCastOp::getCanonicalizationPatterns(RewritePatternSet &patterns, - MLIRContext *context) { - patterns.add(context); -} - -#define GET_OP_CLASSES -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.cpp.inc" diff --git a/lib/Dialect/Basicpy/IR/CMakeLists.txt b/lib/Dialect/Basicpy/IR/CMakeLists.txt deleted file mode 100644 index 7e4d19571..000000000 --- a/lib/Dialect/Basicpy/IR/CMakeLists.txt +++ /dev/null @@ -1,15 +0,0 @@ -add_npcomp_dialect_library(NPCOMPBasicpyDialect - BasicpyDialect.cpp - BasicpyOps.cpp - - ADDITIONAL_HEADER_DIRS - ${PROJECT_SOURCE_DIR}/include/npcomp/Dialect/Basicpy - - DEPENDS - MLIRBasicpyOpsIncGen - - LINK_LIBS PUBLIC - NPCOMPTypingCPA - MLIRIR - MLIRStandard -) diff --git a/lib/Dialect/Basicpy/Transforms/CMakeLists.txt b/lib/Dialect/Basicpy/Transforms/CMakeLists.txt deleted file mode 100644 index 323f56e93..000000000 --- a/lib/Dialect/Basicpy/Transforms/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -add_npcomp_conversion_library(NPCOMPBasicpyPasses - Passes.cpp - TypeInference.cpp - - ADDITIONAL_HEADER_DIRS - ${PROJECT_SOURCE_DIR}/include/npcomp/Dialect/Basicpy/Transforms - - DEPENDS - NPCOMPBasicpyPassIncGen - - LINK_COMPONENTS - Core - - LINK_LIBS PUBLIC - MLIRIR - MLIRPass - NPCOMPTypingCPA -) diff --git a/lib/Dialect/Basicpy/Transforms/PassDetail.h b/lib/Dialect/Basicpy/Transforms/PassDetail.h deleted file mode 100644 index fbd10b8ac..000000000 --- a/lib/Dialect/Basicpy/Transforms/PassDetail.h +++ /dev/null @@ -1,25 +0,0 @@ -//===- PassDetail.h - Pass details ------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_BASICPY_TRANSFORMS_PASSDETAIL_H -#define NPCOMP_DIALECT_BASICPY_TRANSFORMS_PASSDETAIL_H - -#include "mlir/Pass/Pass.h" - -namespace mlir { -namespace NPCOMP { -namespace Basicpy { - -#define GEN_PASS_CLASSES -#include "npcomp/Dialect/Basicpy/Transforms/Passes.h.inc" - -} // namespace Basicpy -} // namespace NPCOMP -} // end namespace mlir - -#endif // NPCOMP_DIALECT_BASICPY_TRANSFORMS_PASSDETAIL_H diff --git a/lib/Dialect/Basicpy/Transforms/Passes.cpp b/lib/Dialect/Basicpy/Transforms/Passes.cpp deleted file mode 100644 index 7b8a06295..000000000 --- a/lib/Dialect/Basicpy/Transforms/Passes.cpp +++ /dev/null @@ -1,20 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Dialect/Basicpy/Transforms/Passes.h" - -//===----------------------------------------------------------------------===// -// Pass registration -//===----------------------------------------------------------------------===// - -namespace { -#define GEN_PASS_REGISTRATION -#include "npcomp/Dialect/Basicpy/Transforms/Passes.h.inc" -} // end namespace - -void mlir::NPCOMP::registerBasicpyPasses() { ::registerPasses(); } diff --git a/lib/Dialect/Basicpy/Transforms/TypeInference.cpp b/lib/Dialect/Basicpy/Transforms/TypeInference.cpp deleted file mode 100644 index 900f336a4..000000000 --- a/lib/Dialect/Basicpy/Transforms/TypeInference.cpp +++ /dev/null @@ -1,490 +0,0 @@ -//===- TypeInference.cpp - Type inference passes -----------------*- C++-*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "PassDetail.h" - -#include "mlir/Dialect/SCF/SCF.h" -#include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "mlir/Interfaces/ControlFlowInterfaces.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h" -#include "npcomp/Dialect/Basicpy/Transforms/Passes.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/ilist.h" -#include "llvm/Support/Allocator.h" -#include "llvm/Support/Debug.h" - -#define DEBUG_TYPE "basicpy-type-inference" - -using namespace llvm; -using namespace mlir; -using namespace mlir::NPCOMP::Basicpy; - -namespace { - -/// Value type wrapping a type node. -class TypeNode : public ilist_node { -public: - enum class Discrim { - CONST_TYPE, - VAR_ORDINAL, - }; - - TypeNode(Value def, Type constType) - : def(def), select(constType), discrim(Discrim::CONST_TYPE) {} - TypeNode(Value def, unsigned varOrdinal) - : def(def), select(varOrdinal), discrim(Discrim::VAR_ORDINAL) {} - - bool operator==(const TypeNode &other) const { - if (discrim != other.discrim) - return false; - switch (discrim) { - case Discrim::CONST_TYPE: - return select.constType == other.select.constType; - case Discrim::VAR_ORDINAL: - return select.varOrdinal == other.select.varOrdinal; - } - return false; - } - - Value getDef() const { return def; } - Discrim getDiscrim() const { return discrim; } - - Type getConstType() const { - assert(discrim == Discrim::CONST_TYPE); - return select.constType; - } - - unsigned getVarOrdinal() const { - assert(discrim == Discrim::VAR_ORDINAL); - return select.varOrdinal; - } - -private: - Value def; - union Select { - Select(Type constType) : constType(constType) {} - Select(unsigned varOrdinal) : varOrdinal(varOrdinal) {} - Type constType; - unsigned varOrdinal; - } select; - Discrim discrim; -}; - -/// A type equation, representing expected equality of types. -/// If the equation is derived from an operation, it is preserved for debugging -/// and messaging. -class TypeEquation : public ilist_node { -public: - TypeEquation(TypeNode *left, TypeNode *right, Operation *context) - : left(left), right(right), context(context) {} - - TypeNode *getLeft() const { return left; } - TypeNode *getRight() const { return right; } - Operation *getContext() const { return context; } - -private: - TypeNode *left; - TypeNode *right; - Operation *context; -}; - -raw_ostream &operator<<(raw_ostream &os, const TypeNode &tn) { - switch (tn.getDiscrim()) { - case TypeNode::Discrim::CONST_TYPE: - os << "CONST(" << tn.getConstType() << ")"; - break; - case TypeNode::Discrim::VAR_ORDINAL: - os << "VAR(" << tn.getVarOrdinal() << ")"; - break; - } - return os; -} - -raw_ostream &operator<<(raw_ostream &os, const TypeEquation &eq) { - os << "[TypeEq left=<" << *eq.getLeft() << ">, right=<" << *eq.getRight() - << ">: " << *eq.getContext() << "]"; - return os; -} - -/// Container for constructing type equations in an HM-like type inference -/// setup. -/// -/// As a first pass, every eligible Value (def) is assigned either a Type or -/// a TypeVar (placeholder). -class TypeEquations { -public: - TypeEquations() = default; - - // Gets a type node for the given def, creating it if necessary. - TypeNode *getTypeNode(Value def) { - TypeNode *&typeNode = defToNodeMap[def]; - if (typeNode) - return typeNode; - - if (def.getType().isa()) { - // Type variable. - typeNode = createTypeVar(def); - } else { - // Constant type. - typeNode = createConstType(def, def.getType()); - } - return typeNode; - } - - template TypeEquation *addEquation(Args &&... args) { - TypeEquation *eq = allocator.Allocate(1); - new (eq) TypeEquation(std::forward(args)...); - equations.push_back(*eq); - return eq; - } - - /// Adds an equality equation for two defs, creating type nodes if necessary. - void addTypeEqualityEquation(Value def1, Value def2, Operation *context) { - addEquation(getTypeNode(def1), getTypeNode(def2), context); - } - - /// Print a report of the equations for debugging. - void report(raw_ostream &os) { - os << "Type variable map:\n"; - for (auto it : llvm::enumerate(ordinalToVarNode)) { - os << ": " << it.index() << " = " << it.value()->getDef() << "\n"; - } - os << "Type equations:\n"; - os << "---------------\n"; - for (auto &eq : equations) { - os << " : " << eq << "\n"; - } - } - - simple_ilist &getEquations() { return equations; } - - TypeNode *lookupVarOrdinal(unsigned ordinal) { - assert(ordinal < ordinalToVarNode.size()); - return ordinalToVarNode[ordinal]; - } - -private: - TypeNode *createConstType(Value def, Type constType) { - TypeNode *n = allocator.Allocate(1); - new (n) TypeNode(def, constType); - nodes.push_back(*n); - return n; - } - - TypeNode *createTypeVar(Value def) { - TypeNode *n = allocator.Allocate(1); - new (n) TypeNode(def, nextOrdinal++); - nodes.push_back(*n); - ordinalToVarNode.push_back(n); - assert(ordinalToVarNode.size() == nextOrdinal); - return n; - } - - BumpPtrAllocator allocator; - simple_ilist nodes; - simple_ilist equations; - llvm::DenseMap defToNodeMap; - llvm::SmallVector ordinalToVarNode; - unsigned nextOrdinal = 0; -}; - -/// (Very) simple type unification. This really isn't advanced enough for -/// anything beyond simple, unambiguous programs. -/// It is also terribly inefficient. -class TypeUnifier { -public: - using SubstMap = llvm::DenseMap; - - Optional unifyEquations(TypeEquations &equations) { - Optional subst; - subst.emplace(); - - for (auto &eq : equations.getEquations()) { - subst = unify(eq.getLeft(), eq.getRight(), std::move(subst)); - if (!subst) { - break; - } - } - - return subst; - } - - Type resolveSubst(TypeNode *typeNode, const Optional &subst) { - if (!subst) - return nullptr; - if (typeNode->getDiscrim() == TypeNode::Discrim::CONST_TYPE) { - return typeNode->getConstType(); - } - if (typeNode->getDiscrim() == TypeNode::Discrim::VAR_ORDINAL) { - auto foundIt = subst->find(typeNode->getVarOrdinal()); - if (foundIt != subst->end()) { - return resolveSubst(foundIt->second, subst); - } else { - return nullptr; - } - } - return nullptr; - } - - Optional unify(TypeNode *typeX, TypeNode *typeY, - Optional subst) { - LLVM_DEBUG(llvm::dbgs() << "+ UNIFY: " << *typeX << ", " << *typeY << "\n"); - if (!subst) { - emitError(typeX->getDef().getLoc()) << "cannot unify type"; - emitRemark(typeY->getDef().getLoc()) << "conflicting expression here"; - return None; - } else if (*typeX == *typeY) { - return subst; - } else if (typeX->getDiscrim() == TypeNode::Discrim::VAR_ORDINAL) { - return unifyVariable(typeX, typeY, std::move(*subst)); - } else if (typeY->getDiscrim() == TypeNode::Discrim::VAR_ORDINAL) { - return unifyVariable(typeY, typeX, std::move(*subst)); - } else { - LLVM_DEBUG(llvm::dbgs() << " Unify fallthrough\n"); - return None; - } - } - - Optional unifyVariable(TypeNode *varNode, TypeNode *typeNode, - SubstMap subst) { - assert(varNode->getDiscrim() == TypeNode::Discrim::VAR_ORDINAL); - LLVM_DEBUG(llvm::dbgs() << " - UNIFY VARIABLE: " << *varNode << " <- " - << *typeNode << "\n"); - // Var node in subst? - auto it = subst.find(varNode->getVarOrdinal()); - if (it != subst.end()) { - TypeNode *found = it->second; - LLVM_DEBUG(llvm::dbgs() << " --> FOUND VAR: " << *found << "\n"); - return unify(found, typeNode, std::move(subst)); - } - - // Type node in subst? - if (typeNode->getDiscrim() == TypeNode::Discrim::VAR_ORDINAL) { - it = subst.find(typeNode->getVarOrdinal()); - if (it != subst.end()) { - TypeNode *found = it->second; - LLVM_DEBUG(llvm::dbgs() << " --> FOUND TYPE: " << *found << "\n"); - return unify(varNode, found, std::move(subst)); - } - } - - // Does the variable appear in the type? - if (occursCheck(varNode, typeNode, subst)) { - LLVM_DEBUG(llvm::dbgs() << "FAILED OCCURS_CHECK\n"); - return None; - } - - // varNode is not yet in subst and cannot simplify typeNode. Extend. - subst[varNode->getVarOrdinal()] = typeNode; - return std::move(subst); - } - - bool occursCheck(TypeNode *varNode, TypeNode *typeNode, SubstMap &subst) { - if (*varNode == *typeNode) - return true; - - if (typeNode->getDiscrim() == TypeNode::Discrim::VAR_ORDINAL) { - unsigned typeOrdinal = typeNode->getVarOrdinal(); - auto foundIt = subst.find(typeOrdinal); - if (foundIt != subst.end()) { - return occursCheck(varNode, foundIt->second, subst); - } - } - - return false; - } -}; - -class TypeEquationPopulator { -public: - TypeEquationPopulator(TypeEquations &equations) : equations(equations) {} - - /// If a return op was visited, this will be one of them. - Operation *getLastReturnOp() { return funcReturnOp; } - - /// Gets any ReturnLike ops that do not return from the outer function. - /// This is used to fixup parent SCF ops and the like. - llvm::SmallVectorImpl &getInnerReturnLikeOps() { - return innerReturnLikeOps; - } - - LogicalResult runOnFunction(FuncOp funcOp) { - // Iterate and create type nodes for entry block arguments, as these - // must be resolved no matter what. - if (funcOp.getBody().empty()) - return success(); - - auto &entryBlock = funcOp.getBody().front(); - for (auto blockArg : entryBlock.getArguments()) { - equations.getTypeNode(blockArg); - } - - // Then walk ops, creating equations. - LLVM_DEBUG(llvm::dbgs() << "POPULATE CHILD OPS:\n"); - auto result = funcOp.walk([&](Operation *childOp) -> WalkResult { - if (childOp == funcOp) - return WalkResult::advance(); - LLVM_DEBUG(llvm::dbgs() << " + POPULATE: " << *childOp << "\n"); - // Special op handling. - // Many of these (that are not standard ops) should become op - // interfaces. - // -------------------- - if (auto op = dyn_cast(childOp)) { - // Note that the condition is always i1 and not subject to type - // inference. - equations.addTypeEqualityEquation(op.true_value(), op.false_value(), - op); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // Note that the result is always i1 and not subject to type - // inference. - equations.getTypeNode(op.operand()); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // Note that the condition is always i1 and not subject to type - // inference. - for (auto result : op.getResults()) { - equations.getTypeNode(result); - } - return WalkResult::advance(); - } - if (auto yieldOp = dyn_cast(childOp)) { - auto scfParentOp = yieldOp->getParentOp(); - if (scfParentOp->getNumResults() != yieldOp.getNumOperands()) { - yieldOp.emitWarning() - << "cannot run type inference on yield due to arity mismatch"; - return WalkResult::advance(); - } - for (auto it : - llvm::zip(scfParentOp->getResults(), yieldOp.getOperands())) { - equations.addTypeEqualityEquation(std::get<1>(it), std::get<0>(it), - yieldOp); - } - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - equations.addTypeEqualityEquation(op.operand(), op.result(), op); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // TODO: This should really be applying arithmetic promotion, not - // strict equality. - equations.addTypeEqualityEquation(op.left(), op.right(), op); - equations.addTypeEqualityEquation(op.left(), op.result(), op); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // TODO: This should really be applying arithmetic promotion, not - // strict equality. - equations.addTypeEqualityEquation(op.left(), op.right(), op); - return WalkResult::advance(); - } - - // Fallback trait based equations. - // ---------------------- - // Ensure that constant nodes get assigned a constant type. - if (childOp->hasTrait()) { - equations.getTypeNode(childOp->getResult(0)); - return WalkResult::advance(); - } - // Function returns must all have the same types. - if (childOp->hasTrait()) { - if (childOp->getParentOp() == funcOp) { - if (funcReturnOp) { - if (funcReturnOp->getNumOperands() != childOp->getNumOperands()) { - childOp->emitOpError() << "different arity of function returns"; - return WalkResult::interrupt(); - } - for (auto it : llvm::zip(funcReturnOp->getOperands(), - childOp->getOperands())) { - equations.addTypeEqualityEquation(std::get<0>(it), - std::get<1>(it), childOp); - } - } - funcReturnOp = childOp; - return WalkResult::advance(); - } else { - innerReturnLikeOps.push_back(childOp); - } - } - - childOp->emitRemark() << "unhandled op in type inference"; - - return WalkResult::advance(); - }); - - return success(result.wasInterrupted()); - } - -private: - // The last encountered ReturnLike op. - Operation *funcReturnOp = nullptr; - llvm::SmallVector innerReturnLikeOps; - TypeEquations &equations; -}; - -class FunctionTypeInferencePass - : public FunctionTypeInferenceBase { -public: - void runOnOperation() override { - FuncOp func = getOperation(); - if (func.getBody().empty()) - return; - - TypeEquations equations; - TypeEquationPopulator p(equations); - (void)p.runOnFunction(func); - LLVM_DEBUG(equations.report(llvm::dbgs())); - - TypeUnifier unifier; - auto substMap = unifier.unifyEquations(equations); - if (!substMap) { - func.emitError() << "type inference failed"; - return signalPassFailure(); - } - - // Apply substitutions. - LLVM_DEBUG(llvm::dbgs() << "Unification subst:\n"); - LLVM_DEBUG(for (auto it - : *substMap) { - llvm::dbgs() << " " << it.first << " -> " << *it.second << "\n"; - }); - for (auto it : *substMap) { - TypeNode *varNode = equations.lookupVarOrdinal(it.first); - Type resolvedType = unifier.resolveSubst(it.second, substMap); - if (!resolvedType) { - emitError(varNode->getDef().getLoc()) << "unable to infer type"; - continue; - } - varNode->getDef().setType(resolvedType); - } - - // Now rewrite the function type based on actual types of entry block - // args and the final return op operands. - auto entryBlockTypes = func.getBody().front().getArgumentTypes(); - SmallVector inputTypes(entryBlockTypes.begin(), - entryBlockTypes.end()); - SmallVector resultTypes; - if (p.getLastReturnOp()) { - auto resultRange = p.getLastReturnOp()->getOperandTypes(); - resultTypes.append(resultRange.begin(), resultRange.end()); - } - auto funcType = FunctionType::get(&getContext(), inputTypes, resultTypes); - func.setType(funcType); - } -}; - -} // namespace - -std::unique_ptr> -mlir::NPCOMP::Basicpy::createFunctionTypeInferencePass() { - return std::make_unique(); -} diff --git a/lib/Dialect/CMakeLists.txt b/lib/Dialect/CMakeLists.txt index 017219dfa..826f348ce 100644 --- a/lib/Dialect/CMakeLists.txt +++ b/lib/Dialect/CMakeLists.txt @@ -1,5 +1,3 @@ -add_subdirectory(Basicpy) -add_subdirectory(Numpy) add_subdirectory(Refback) add_subdirectory(Refbackrt) add_subdirectory(TorchConversion) diff --git a/lib/Dialect/Numpy/CMakeLists.txt b/lib/Dialect/Numpy/CMakeLists.txt deleted file mode 100644 index 9f57627c3..000000000 --- a/lib/Dialect/Numpy/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(IR) -add_subdirectory(Transforms) diff --git a/lib/Dialect/Numpy/IR/CMakeLists.txt b/lib/Dialect/Numpy/IR/CMakeLists.txt deleted file mode 100644 index 4e831e804..000000000 --- a/lib/Dialect/Numpy/IR/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -add_npcomp_dialect_library(NPCOMPNumpyDialect - NumpyDialect.cpp - NumpyOps.cpp - - ADDITIONAL_HEADER_DIRS - ${PROJECT_SOURCE_DIR}/include/npcomp/Dialect/Numpy - - DEPENDS - MLIRNumpyOpsIncGen - - LINK_LIBS PUBLIC - NPCOMPBasicpyDialect - MLIRIR -) diff --git a/lib/Dialect/Numpy/IR/NumpyDialect.cpp b/lib/Dialect/Numpy/IR/NumpyDialect.cpp deleted file mode 100644 index 1fae40499..000000000 --- a/lib/Dialect/Numpy/IR/NumpyDialect.cpp +++ /dev/null @@ -1,247 +0,0 @@ -//===- NumpyDialect.cpp - Core numpy dialect --------------------*- C++ -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Dialect/Numpy/IR/NumpyDialect.h" - -#include "mlir/IR/DialectImplementation.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "npcomp/Dialect/Numpy/IR/NumpyOps.h" -#include "npcomp/Typing/Support/CPAIrHelpers.h" -#include "llvm/ADT/TypeSwitch.h" - -using namespace mlir; -using namespace mlir::NPCOMP; -using namespace mlir::NPCOMP::Numpy; - -#include "npcomp/Dialect/Numpy/IR/NumpyOpsDialect.cpp.inc" - -void NumpyDialect::initialize() { - addOperations< -#define GET_OP_LIST -#include "npcomp/Dialect/Numpy/IR/NumpyOps.cpp.inc" - >(); - addTypes(); - getContext()->loadDialect(); -} - -Type NumpyDialect::parseType(DialectAsmParser &parser) const { - StringRef keyword; - if (parser.parseKeyword(&keyword)) - return Type(); - - if (keyword == "any_dtype") - return AnyDtypeType::get(getContext()); - if (keyword == "ndarray") { - // Parse: - // ndarray<*:?> - // ndarray<*:i32> - // ndarary<[1,2,3]:i32> - // Note that this is a different syntax than the built-ins as the dialect - // parser is not general enough to parse a dimension list with an optional - // element type (?). The built-in form is also remarkably ambiguous when - // considering extending it. - Type dtype = Basicpy::UnknownType::get(getContext()); - bool hasShape = false; - llvm::SmallVector shape; - if (parser.parseLess()) - return Type(); - if (succeeded(parser.parseOptionalStar())) { - // Unranked. - } else { - // Parse dimension list. - hasShape = true; - if (parser.parseLSquare()) - return Type(); - for (bool first = true;; first = false) { - if (!first) { - if (failed(parser.parseOptionalComma())) { - break; - } - } - if (succeeded(parser.parseOptionalQuestion())) { - shape.push_back(-1); - continue; - } - - int64_t dim; - auto optionalPr = parser.parseOptionalInteger(dim); - if (optionalPr.hasValue()) { - if (failed(*optionalPr)) - return Type(); - shape.push_back(dim); - continue; - } - break; - } - if (parser.parseRSquare()) { - return Type(); - } - } - - // Parse colon dtype. - if (parser.parseColon()) { - return Type(); - } - - if (failed(parser.parseOptionalQuestion())) { - // Specified dtype. - if (parser.parseType(dtype)) { - return Type(); - } - } - if (parser.parseGreater()) { - return Type(); - } - - llvm::Optional> optionalShape; - if (hasShape) - optionalShape = shape; - auto ndarray = NdArrayType::get(dtype, optionalShape); - return ndarray; - } - - parser.emitError(parser.getNameLoc(), "unknown numpy type: ") << keyword; - return Type(); -} - -void NumpyDialect::printType(Type type, DialectAsmPrinter &os) const { - TypeSwitch(type) - .Case([&](Type) { os << "any_dtype"; }) - .Case([&](NdArrayType t) { - auto unknownType = Basicpy::UnknownType::get(getContext()); - auto ndarray = type.cast(); - auto shape = ndarray.getOptionalShape(); - auto dtype = ndarray.getDtype(); - os << "ndarray<"; - if (!shape) { - os << "*:"; - } else { - os << "["; - for (auto it : llvm::enumerate(*shape)) { - if (it.index() > 0) - os << ","; - if (it.value() < 0) - os << "?"; - else - os << it.value(); - } - os << "]:"; - } - if (dtype != unknownType) - os.printType(dtype); - else - os << "?"; - os << ">"; - }) - .Default([&](Type) { llvm_unreachable("unexpected 'numpy' type kind"); }); -} - -//----------------------------------------------------------------------------// -// Type and attribute detail -//----------------------------------------------------------------------------// -namespace mlir { -namespace NPCOMP { -namespace Numpy { -namespace detail { - -struct NdArrayTypeStorage : public TypeStorage { - using KeyTy = std::pair>>; - NdArrayTypeStorage(Type dtype, int rank, const int64_t *shapeElements) - : dtype(dtype), rank(rank), shapeElements(shapeElements) {} - bool operator==(const KeyTy &key) const { - return key == KeyTy(dtype, getOptionalShape()); - } - static llvm::hash_code hashKey(const KeyTy &key) { - if (key.second) { - return llvm::hash_combine(key.first, *key.second); - } else { - return llvm::hash_combine(key.first, -1); - } - } - static NdArrayTypeStorage *construct(TypeStorageAllocator &allocator, - const KeyTy &key) { - int rank = -1; - const int64_t *shapeElements = nullptr; - if (key.second.hasValue()) { - auto allocElements = allocator.copyInto(*key.second); - rank = key.second->size(); - shapeElements = allocElements.data(); - } - return new (allocator.allocate()) - NdArrayTypeStorage(key.first, rank, shapeElements); - } - - llvm::Optional> getOptionalShape() const { - if (rank < 0) - return llvm::None; - return ArrayRef(shapeElements, rank); - } - - Type dtype; - int rank; - const int64_t *shapeElements; -}; - -} // namespace detail -} // namespace Numpy -} // namespace NPCOMP -} // namespace mlir - -NdArrayType NdArrayType::get(Type dtype, - llvm::Optional> shape) { - assert(dtype && "dtype cannot be null"); - return Base::get(dtype.getContext(), dtype, shape); -} - -NdArrayType NdArrayType::getFromShapedType(ShapedType shapedType) { - llvm::Optional> shape; - if (shapedType.hasRank()) - shape = shapedType.getShape(); - return get(shapedType.getElementType(), shape); -} - -bool NdArrayType::hasKnownDtype() { - return getDtype() != Basicpy::UnknownType::get(getContext()); -} - -Type NdArrayType::getDtype() { return getImpl()->dtype; } - -llvm::Optional> NdArrayType::getOptionalShape() { - return getImpl()->getOptionalShape(); -} - -TensorType NdArrayType::toTensorType() { - auto shape = getOptionalShape(); - if (shape) { - return RankedTensorType::get(*shape, getDtype()); - } else { - return UnrankedTensorType::get(getDtype()); - } -} - -Typing::CPA::TypeNode * -NdArrayType::mapToCPAType(Typing::CPA::Context &context) { - llvm::Optional dtype; - if (hasKnownDtype()) { - // TODO: This should be using a general mechanism for resolving the dtype, - // but we don't have that yet, and for NdArray, these must be primitives - // anyway. - dtype = context.getIRValueType(getDtype()); - } - // Safe to capture an ArrayRef backed by type storage since it is uniqued. - auto optionalShape = getOptionalShape(); - auto irCtor = [optionalShape](Typing::CPA::ObjectValueType *ovt, - llvm::ArrayRef fieldTypes, - MLIRContext *mlirContext, - llvm::Optional) { - assert(fieldTypes.size() == 1); - return NdArrayType::get(fieldTypes.front(), optionalShape); - }; - return Typing::CPA::newArrayType(context, irCtor, - context.getIdentifier("!NdArray"), dtype); -} diff --git a/lib/Dialect/Numpy/IR/NumpyOps.cpp b/lib/Dialect/Numpy/IR/NumpyOps.cpp deleted file mode 100644 index c7b0210d1..000000000 --- a/lib/Dialect/Numpy/IR/NumpyOps.cpp +++ /dev/null @@ -1,152 +0,0 @@ -//===- NumpyOps.cpp - Core numpy dialect ops --------------------*- C++ -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Dialect/Numpy/IR/NumpyOps.h" -#include "mlir/IR/Builders.h" -#include "mlir/IR/FunctionImplementation.h" -#include "mlir/IR/OpImplementation.h" -#include "mlir/IR/PatternMatch.h" -#include "mlir/IR/TypeUtilities.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "npcomp/Dialect/Numpy/IR/NumpyDialect.h" - -using namespace mlir; -using namespace mlir::NPCOMP; -using namespace mlir::NPCOMP::Numpy; - -//----------------------------------------------------------------------------// -// Type inference -//----------------------------------------------------------------------------// - -/// Adds constraints to relating a unary op that accepts and returns either -/// tensor or ndarray types where the dtype should be the same. -/// Type constraints are added on the dtype, not the outer object type. -static void constrainUnaryDtypeInvariantOp(Typing::CPA::Context &context, - Value source, Value dest, - Operation *op) { - auto &env = context.getCurrentEnvironment(); - auto *sourceTn = - llvm::dyn_cast(env.mapValueToType(source)); - auto *destTn = - llvm::dyn_cast(env.mapValueToType(dest)); - if (sourceTn && destTn && sourceTn->getFieldCount() == 1 && - destTn->getFieldCount() == 1) { - context.getConstraint(sourceTn->getFieldTypes().front(), - destTn->getFieldTypes().front()); - } -} - -void CreateArrayFromTensorOp::addCPAConstraints(Typing::CPA::Context &context) { - constrainUnaryDtypeInvariantOp(context, source(), dest(), *this); -} - -void CopyToTensorOp::addCPAConstraints(Typing::CPA::Context &context) { - constrainUnaryDtypeInvariantOp(context, source(), dest(), *this); -} - -void BuiltinUfuncCallOp::addCPAConstraints(Typing::CPA::Context &context) { - // TODO: This should really be a function call chosen so as to promote - // arguments. For now, though, we just say that the result is constrained - // to the inputs. Note that not all ufuncs transfer types like this. - // We just pretend this is two unary functions that write into the output. - for (auto input : inputs()) { - constrainUnaryDtypeInvariantOp(context, input, output(), *this); - } -} - -//----------------------------------------------------------------------------// -// StaticInfoCast -//----------------------------------------------------------------------------// - -bool StaticInfoCastOp::areCastCompatible(mlir::TypeRange inputs, - mlir::TypeRange outputs) { - auto input = inputs[0].cast(); - auto output = outputs[0].cast(); - if (input.getOptionalShape() && output.getOptionalShape()) { - if (failed(verifyCompatibleShape(*input.getOptionalShape(), - *output.getOptionalShape()))) - return false; - } - return input.getDtype() == output.getDtype() || - input.getDtype().isa() || - output.getDtype().isa(); -} - -void StaticInfoCastOp::getCanonicalizationPatterns(RewritePatternSet &patterns, - MLIRContext *context) { - // static_info_cast(oneUse@create_array_from_tensor(%tensor)) - // --> - // create_array_from_tensor(tensor_static_info_cast(%tensor)) - // - // This pattern tends to create more tensor code and less array code. - // This form is considered more canonical because it has same number of ops - // but is more analyzable. - // - // TODO: Consider a world where we numpy.ndarray can track an "immutable" bit - // which makes it tensor-like. Is that useful? - patterns.add(+[](StaticInfoCastOp op, PatternRewriter &rewriter) { - auto createArray = op.getOperand().getDefiningOp(); - if (!createArray || !createArray.getResult().hasOneUse()) - return failure(); - auto tensorCast = rewriter.create( - op.getLoc(), op.getType().cast().toTensorType(), - createArray.getOperand()); - rewriter.replaceOpWithNewOp(op, op.getType(), - tensorCast); - rewriter.eraseOp(createArray); - return success(); - }); -} - -//----------------------------------------------------------------------------// -// TensorStaticInfoCast -//----------------------------------------------------------------------------// - -bool TensorStaticInfoCastOp::areCastCompatible(mlir::TypeRange inputs, - mlir::TypeRange outputs) { - auto input = inputs[0].cast(); - auto output = outputs[0].cast(); - if (input.hasRank() && output.hasRank()) { - if (failed(verifyCompatibleShape(input.getShape(), output.getShape()))) - return false; - } - return input.getElementType() == output.getElementType() || - input.getElementType().isa() || - output.getElementType().isa(); -} - -//----------------------------------------------------------------------------// -// CreateArrayFromTensorOp -//----------------------------------------------------------------------------// - -namespace { -/// Match create_array_from_tensor -> copy_to_tensor and elide in favor -/// of the original tensor. -class ElideCreateRedundantArrayFromTensor - : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(CopyToTensorOp op, - PatternRewriter &rewriter) const override { - auto createArrayOp = - dyn_cast_or_null(op.source().getDefiningOp()); - if (createArrayOp && createArrayOp.dest().hasOneUse()) { - rewriter.replaceOp(op, createArrayOp.source()); - } - return success(); - } -}; -} // namespace - -void CopyToTensorOp::getCanonicalizationPatterns(RewritePatternSet &patterns, - MLIRContext *context) { - patterns.add(context); -} - -#define GET_OP_CLASSES -#include "npcomp/Dialect/Numpy/IR/NumpyOps.cpp.inc" diff --git a/lib/Dialect/Numpy/Transforms/CMakeLists.txt b/lib/Dialect/Numpy/Transforms/CMakeLists.txt deleted file mode 100644 index 6128b33ef..000000000 --- a/lib/Dialect/Numpy/Transforms/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -add_npcomp_conversion_library(NPCOMPNumpyPasses - Passes.cpp - PublicFunctionToTensor.cpp - - ADDITIONAL_HEADER_DIRS - ${PROJECT_SOURCE_DIR}/include/npcomp/Dialect/Numpy/Transforms - - DEPENDS - NPCOMPNumpyPassIncGen - - LINK_COMPONENTS - Core - - LINK_LIBS PUBLIC - MLIRIR - MLIRPass - NPCOMPNumpyDialect -) diff --git a/lib/Dialect/Numpy/Transforms/PassDetail.h b/lib/Dialect/Numpy/Transforms/PassDetail.h deleted file mode 100644 index 4ca3a3c5d..000000000 --- a/lib/Dialect/Numpy/Transforms/PassDetail.h +++ /dev/null @@ -1,25 +0,0 @@ -//===- PassDetail.h - Pass details ------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_DIALECT_NUMPY_TRANSFORMS_PASSDETAIL_H -#define NPCOMP_DIALECT_NUMPY_TRANSFORMS_PASSDETAIL_H - -#include "mlir/Pass/Pass.h" - -namespace mlir { -namespace NPCOMP { -namespace Numpy { - -#define GEN_PASS_CLASSES -#include "npcomp/Dialect/Numpy/Transforms/Passes.h.inc" - -} // namespace Numpy -} // namespace NPCOMP -} // end namespace mlir - -#endif // NPCOMP_DIALECT_NUMPY_TRANSFORMS_PASSDETAIL_H diff --git a/lib/Dialect/Numpy/Transforms/Passes.cpp b/lib/Dialect/Numpy/Transforms/Passes.cpp deleted file mode 100644 index 8961a2588..000000000 --- a/lib/Dialect/Numpy/Transforms/Passes.cpp +++ /dev/null @@ -1,20 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Dialect/Numpy/Transforms/Passes.h" - -//===----------------------------------------------------------------------===// -// Pass registration -//===----------------------------------------------------------------------===// - -namespace { -#define GEN_PASS_REGISTRATION -#include "npcomp/Dialect/Numpy/Transforms/Passes.h.inc" -} // end namespace - -void mlir::NPCOMP::registerNumpyPasses() { ::registerPasses(); } diff --git a/lib/Dialect/Numpy/Transforms/PublicFunctionToTensor.cpp b/lib/Dialect/Numpy/Transforms/PublicFunctionToTensor.cpp deleted file mode 100644 index 668c3b86d..000000000 --- a/lib/Dialect/Numpy/Transforms/PublicFunctionToTensor.cpp +++ /dev/null @@ -1,98 +0,0 @@ -//===- PublicFunctionToTensor.cpp - Type inference passes --------*- C++-*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "PassDetail.h" - -#include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "mlir/IR/Builders.h" -#include "mlir/IR/BuiltinOps.h" -#include "npcomp/Dialect/Numpy/IR/NumpyDialect.h" -#include "npcomp/Dialect/Numpy/IR/NumpyOps.h" -#include "npcomp/Dialect/Numpy/Transforms/Passes.h" - -using namespace mlir; -using namespace mlir::NPCOMP::Numpy; - -namespace { - -class PublicFunctionsToTensorPass - : public NumpyPublicFunctionsToTensorBase { - void runOnOperation() override { - auto module = getOperation(); - module.walk([&](FuncOp func) { - if (func.getVisibility() != SymbolTable::Visibility::Public) - return; - if (func.isExternal()) - return; - auto uses = SymbolTable::getSymbolUses(func, module); - if (!uses || uses->begin() != uses->end()) { - func.emitWarning() << "unimplemented: cannot convert ndarray->tensor " - << "signature for public function with uses"; - return; - } - rewriteSignature(func); - }); - } - - void rewriteSignature(FuncOp func) { - auto &entryBlock = func.getBlocks().front(); - auto funcType = func.getType(); - auto loc = func.getLoc(); - - // Rewrite inputs. - auto builder = OpBuilder::atBlockBegin(&entryBlock); - auto inputTypes = llvm::to_vector<4>(funcType.getInputs()); - for (unsigned i = 0; i < inputTypes.size(); ++i) { - auto arrayType = inputTypes[i].dyn_cast(); - if (!arrayType) - continue; - Type tensorType = arrayType.toTensorType(); - BlockArgument argument = entryBlock.getArgument(i); - argument.setType(tensorType); - auto createOp = - builder.create(loc, arrayType, argument); - argument.replaceAllUsesExcept(createOp, - SmallPtrSet{createOp}); - inputTypes[i] = tensorType; - } - - // Rewrite result signature. - auto resultTypes = llvm::to_vector<4>(funcType.getResults()); - for (auto &resultType : resultTypes) { - auto arrayType = resultType.dyn_cast(); - if (arrayType) - resultType = arrayType.toTensorType(); - } - - // Update signature. - funcType = - FunctionType::get(funcType.getContext(), inputTypes, resultTypes); - func.setType(funcType); - - // Rewrite all return terminators. - func.walk([&](ReturnOp term) { - OpBuilder builder(term); - for (unsigned i = 0; i < term.getNumOperands(); ++i) { - Value operand = term.getOperand(i); - auto arrayType = operand.getType().dyn_cast(); - if (!arrayType) - continue; - Type tensorType = arrayType.toTensorType(); - auto copyOp = builder.create(loc, tensorType, operand); - term.setOperand(i, copyOp); - } - }); - } -}; - -} // namespace - -std::unique_ptr> -mlir::NPCOMP::Numpy::createPublicFunctionsToTensorPass() { - return std::make_unique(); -} diff --git a/lib/InitAll.cpp b/lib/InitAll.cpp index 7bfae1aaa..012dcfda1 100644 --- a/lib/InitAll.cpp +++ b/lib/InitAll.cpp @@ -13,22 +13,15 @@ #include "npcomp/Backend/Common/Passes.h" #include "npcomp/Backend/IREE/Passes.h" #include "npcomp/Conversion/Passes.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "npcomp/Dialect/Basicpy/Transforms/Passes.h" -#include "npcomp/Dialect/Numpy/IR/NumpyDialect.h" -#include "npcomp/Dialect/Numpy/Transforms/Passes.h" #include "npcomp/Dialect/Refback/IR/RefbackDialect.h" #include "npcomp/Dialect/Refbackrt/IR/RefbackrtDialect.h" #include "npcomp/Dialect/TorchConversion/IR/TorchConversionDialect.h" #include "npcomp/Dialect/TorchConversion/Transforms/Passes.h" #include "npcomp/RefBackend/RefBackend.h" -#include "npcomp/Typing/Transforms/Passes.h" void mlir::NPCOMP::registerAllDialects(mlir::DialectRegistry ®istry) { // clang-format off - registry.insert(); @@ -38,10 +31,7 @@ void mlir::NPCOMP::registerAllDialects(mlir::DialectRegistry ®istry) { void mlir::NPCOMP::registerAllPasses() { mlir::NPCOMP::registerRefBackendPasses(); mlir::NPCOMP::registerConversionPasses(); - mlir::NPCOMP::registerBasicpyPasses(); - mlir::NPCOMP::registerNumpyPasses(); mlir::NPCOMP::registerTorchConversionPasses(); - mlir::NPCOMP::registerTypingPasses(); mlir::NPCOMP::IREEBackend::registerIREEBackendPasses(); mlir::NPCOMP::CommonBackend::registerCommonBackendPasses(); } diff --git a/lib/Typing/Analysis/CMakeLists.txt b/lib/Typing/Analysis/CMakeLists.txt deleted file mode 100644 index fd2a3eae4..000000000 --- a/lib/Typing/Analysis/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory(CPA) diff --git a/lib/Typing/Analysis/CPA/Algorithm.cpp b/lib/Typing/Analysis/CPA/Algorithm.cpp deleted file mode 100644 index ea2194b79..000000000 --- a/lib/Typing/Analysis/CPA/Algorithm.cpp +++ /dev/null @@ -1,129 +0,0 @@ -//===- Algorith.cpp - Main algorithm --------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Typing/Analysis/CPA/Algorithm.h" - -#include "mlir/IR/Diagnostics.h" -#include "llvm/Support/Debug.h" - -#define DEBUG_TYPE "cpa-type-inference" - -using namespace mlir; -using namespace mlir::NPCOMP::Typing::CPA; - -//------------------------------------------------------------------------------ -// PropagationWorklist -//------------------------------------------------------------------------------ - -PropagationWorklist::PropagationWorklist(Environment &env) : env(env) { - auto &contents = env.getConstraints(); - currentConstraints.reserve(contents.size() * 2); - for (auto *c : contents) { - currentConstraints.insert(c); - } -} - -bool PropagationWorklist::commit() { - bool hadNew = newConstraintCount > 0; - newConstraintCount = 0; - return hadNew; -} - -void PropagationWorklist::propagateTransitivity() { - // Prepare for join. - constexpr size_t N = 8; - llvm::DenseMap> varToValueType; - llvm::DenseMap> varToAny; - for (auto *c : currentConstraints) { - auto *lhsVar = llvm::dyn_cast(c->getFrom()); - auto *rhsVar = llvm::dyn_cast(c->getTo()); - - if (lhsVar) { - varToAny[lhsVar].push_back(c->getTo()); - } - if (rhsVar) { - if (auto *vt = llvm::dyn_cast(c->getFrom())) { - varToValueType[rhsVar].push_back(vt); - } - } - } - - // Expand join. - for (auto vtIt : varToValueType) { - auto &lhsSet = vtIt.second; - auto anyIt = varToAny.find(vtIt.first); - if (anyIt == varToAny.end()) - continue; - auto &rhsSet = anyIt->second; - - for (ValueType *lhsItem : lhsSet) { - for (TypeNode *rhsItem : rhsSet) { - Constraint *newC = env.getContext().getConstraint(lhsItem, rhsItem); - if (currentConstraints.insert(newC).second) { - LLVM_DEBUG(llvm::dbgs() << "-->ADD TRANS CONSTRAINT: "; - newC->print(env.getContext(), llvm::dbgs()); - llvm::dbgs() << "\n";); - newConstraintCount += 1; - } - } - } - } -} - -//------------------------------------------------------------------------------ -// GreedyTypeNodeVarResolver -//------------------------------------------------------------------------------ - -ValueType * -GreedyTypeNodeVarResolver::unionCandidateTypes(const ValueTypeSet &candidates) { - if (candidates.empty()) { - (void)mlir::emitOptionalError(loc, "no candidate types were identified"); - return nullptr; - } - if (candidates.size() != 1) { - (void)mlir::emitOptionalError(loc, - "ambiguous candidate types were identified"); - return nullptr; - } - - return *candidates.begin(); -} - -LogicalResult GreedyTypeNodeVarResolver::analyzeTypeNode(TypeNode *tn) { - TypeVarSet newVars; - tn->collectDependentTypeVars(context, newVars); - if (newVars.empty()) - return success(); - - // Breadth-first resolution of vars (that do not depend on other vars). - ValueTypeSet pendingValueTypes; - for (TypeVar *newTv : newVars) { - if (!allVars.insert(newTv).second) - continue; - if (mappings.count(newTv) == 1) - continue; - - // Known mappings to this TypeVar. - auto &existingMembers = context.getMembers(newTv); - ValueTypeSet members(existingMembers.begin(), existingMembers.end()); - - ValueType *concreteVt = unionCandidateTypes(members); - if (!concreteVt) - return failure(); - mappings[newTv] = concreteVt; - pendingValueTypes.insert(concreteVt); - } - - // Recursively analyze any newly discovered concrete types. - for (ValueType *nextValueType : pendingValueTypes) { - if (failed(analyzeTypeNode(nextValueType))) - return failure(); - } - - return success(); -} diff --git a/lib/Typing/Analysis/CPA/CMakeLists.txt b/lib/Typing/Analysis/CPA/CMakeLists.txt deleted file mode 100644 index 542768bbd..000000000 --- a/lib/Typing/Analysis/CPA/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -add_npcomp_library(NPCOMPTypingCPA - Algorithm.cpp - Interfaces.cpp - Types.cpp - - DEPENDS - NPCOMPTypingCPAInterfacesIncGen - - LINK_LIBS - PUBLIC - MLIRIR -) diff --git a/lib/Typing/Analysis/CPA/Types.cpp b/lib/Typing/Analysis/CPA/Types.cpp deleted file mode 100644 index f8f225a68..000000000 --- a/lib/Typing/Analysis/CPA/Types.cpp +++ /dev/null @@ -1,289 +0,0 @@ -//===- Support.cpp - Support types and utilities for CPA ------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Typing/Analysis/CPA/Types.h" - -#include "mlir/IR/Operation.h" -#include "npcomp/Typing/Analysis/CPA/Interfaces.h" - -using namespace mlir::NPCOMP::Typing::CPA; - -ObjectBase::~ObjectBase() = default; - -//===----------------------------------------------------------------------===// -// Data structures -//===----------------------------------------------------------------------===// - -const ConstraintSet &ConstraintSet::getEmptySet() { - static ConstraintSet s; - return s; -} - -const TypeVarSet &TypeVarSet::getEmptySet() { - static TypeVarSet s; - return s; -} - -const TypeNodeSet &TypeNodeSet::getEmptySet() { - static TypeNodeSet s; - return s; -} - -const ValueTypeSet &ValueTypeSet::getEmptySet() { - static ValueTypeSet s; - return s; -} - -//===----------------------------------------------------------------------===// -// Environment -//===----------------------------------------------------------------------===// - -Environment::Environment(Context &context) : context(context) {} - -TypeNode *Environment::mapValueToType(Value value) { - TypeNode *&cpaType = valueTypeMap[value]; - if (cpaType) - return cpaType; - - cpaType = context.mapIrType(value.getType()); - assert(cpaType && "currently every IR type must map to a CPA type"); - - // Do accounting for type vars. - if (auto *tv = llvm::dyn_cast(cpaType)) { - typeVars.insert(tv); - // TODO: Tie to value. - } - - return cpaType; -} - -//===----------------------------------------------------------------------===// -// TypeNode and descendent methods -//===----------------------------------------------------------------------===// - -void TypeNode::collectDependentTypeVars(Context &context, - TypeVarSet &typeVars) {} - -mlir::Type TypeNode::constructIrType(Context &context, - const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc) { - (void)mlir::emitOptionalError(loc, - "base class cannot construct concrete types"); - return {}; -} - -void TypeVar::collectDependentTypeVars(Context &context, TypeVarSet &typeVars) { - typeVars.insert(this); -} - -mlir::Type TypeVar::constructIrType(Context &context, const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc) { - auto *resolvedTypeNode = mapping.lookup(this); - if (!resolvedTypeNode) { - if (loc) { - mlir::emitError(*loc) - << "type variable " << getOrdinal() << " was not assigned a type"; - } - return {}; - } - return resolvedTypeNode->constructIrType(context, mapping, mlirContext, loc); -} - -mlir::Type IRValueType::constructIrType(Context &context, - const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc) { - return irType; -} - -void ObjectValueType::collectDependentTypeVars(Context &context, - TypeVarSet &typeVars) { - for (auto *fieldType : getFieldTypes()) { - fieldType->collectDependentTypeVars(context, typeVars); - } -} - -mlir::Type ObjectValueType::constructIrType(Context &context, - const TypeVarMap &mapping, - MLIRContext *mlirContext, - llvm::Optional loc) { - llvm::SmallVector fieldIrTypes; - for (TypeNode *fieldType : getFieldTypes()) { - auto irType = - fieldType->constructIrType(context, mapping, mlirContext, loc); - if (!irType) - return {}; - fieldIrTypes.push_back(irType); - } - return irCtor(this, fieldIrTypes, mlirContext, loc); -} - -//===----------------------------------------------------------------------===// -// Context -//===----------------------------------------------------------------------===// - -Context::Context(IrTypeMapHook irTypeMapHook) : irTypeMapHook(irTypeMapHook) { - environmentStack.emplace_back(std::make_unique(*this)); - currentEnvironment = environmentStack.back().get(); -} - -TypeNode *Context::mapIrType(::mlir::Type irType) { - // First, see if the type knows how to map itself. - assert(irType); - if (auto mapper = irType.dyn_cast()) { - auto *cpaType = mapper.mapToCPAType(*this); - if (cpaType) - return cpaType; - } - - if (irTypeMapHook) { - auto *cpaType = irTypeMapHook(*this, irType); - if (cpaType) - return cpaType; - } - - // Fallback to an IR type. - return getIRValueType(irType); -} - -void Context::addConstraintToGraph(Constraint *c) { - fwdNodeToConstraintMap[c->getFrom()].insert(c); - fwdConstraintToNodeMap[c].insert(c->getTo()); - bakNodeToConstraintMap[c->getTo()].insert(c); - pendingConstraints.insert(c); - propagateConstraints(); -} - -void Context::propagateConstraints() { - // Process pending constraints until converges. - while (!pendingConstraints.empty()) { - // Swap for stable iteration. - assert(pendingConstraintWorklist.empty()); - pendingConstraintWorklist.swap(pendingConstraints); - - for (auto *constraint : pendingConstraintWorklist) { - ValueTypeSet &fromContents = typeNodeMembers[constraint->getFrom()]; - ValueTypeSet &toContents = typeNodeMembers[constraint->getTo()]; - - bool modified = false; - for (ValueType *fromItem : fromContents) { - modified = toContents.insert(fromItem).second || modified; - } - // If the 'from' is a ValueType, consider it part of its own set. - if (auto *fromIdentity = - llvm::dyn_cast(constraint->getFrom())) { - modified = toContents.insert(fromIdentity).second; - } - - // If the 'to' item was modified, propagate any of its constraints. - if (modified) { - ConstraintSet &toPropagate = - fwdNodeToConstraintMap[constraint->getTo()]; - for (Constraint *newConstraint : toPropagate) { - pendingConstraints.insert(newConstraint); - } - } - } - pendingConstraintWorklist.clear(); - } -} - -//===----------------------------------------------------------------------===// -// Printing -//===----------------------------------------------------------------------===// - -void Identifier::print(raw_ostream &os, bool brief) { - os << "'" << value << "'"; -} - -void TypeNode::print(Context &context, raw_ostream &os, bool brief) { - os << ""; -} - -void TypeVar::print(Context &context, raw_ostream &os, bool brief) { - os << "TypeVar(" << ordinal; - if (!brief) { - auto &members = context.getMembers(this); - if (members.empty()) { - os << " => EMPTY"; - } else { - os << " => [\n"; - for (ValueType *member : members) { - os << " "; - member->print(context, os, true); - os << "\n"; - } - os << "]"; - } - } - os << ")"; -} - -void CastType::print(Context &context, raw_ostream &os, bool brief) { - os << "cast(" << *typeIdentifier << ", "; - typeVar->print(context, os, true); - os << ")"; -} - -void ReadType::print(Context &context, raw_ostream &os, bool brief) { - os << "read("; - type->print(context, os, true); - os << ")"; -} - -void WriteType::print(Context &context, raw_ostream &os, bool brief) { - os << "write("; - type->print(context, os, true); - os << ")"; -} - -void IRValueType::print(Context &context, raw_ostream &os, bool brief) { - os << "irtype(" << irType << ")"; -} - -void ObjectValueType::print(Context &context, raw_ostream &os, bool brief) { - os << "object(" << *typeIdentifier << ",["; - bool first = true; - for (auto it : llvm::zip(getFieldIdentifiers(), getFieldTypes())) { - if (!first) - os << ", "; - else - first = false; - os << *std::get<0>(it) << ":"; - auto *ft = std::get<1>(it); - if (ft) - ft->print(context, os, true); - else - os << "NULL"; - } - os << "])"; -} - -void Constraint::print(Context &context, raw_ostream &os, bool brief) { - from->print(context, os, true); - os << " <: "; - to->print(context, os, true); -} - -void ConstraintSet::print(Context &context, raw_ostream &os, bool brief) { - for (auto it : llvm::enumerate(*this)) { - os << it.index() << ": "; - it.value()->print(context, os, brief); - os << "\n"; - } -} - -void TypeVarSet::print(Context &context, raw_ostream &os, bool brief) { - for (auto it : *this) { - os << it->getOrdinal() << ": "; - it->print(context, os, brief); - os << "\n"; - } -} diff --git a/lib/Typing/CMakeLists.txt b/lib/Typing/CMakeLists.txt deleted file mode 100644 index c6334a006..000000000 --- a/lib/Typing/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -add_subdirectory(Analysis) -add_subdirectory(Transforms) -add_subdirectory(Support) diff --git a/lib/Typing/Support/CMakeLists.txt b/lib/Typing/Support/CMakeLists.txt deleted file mode 100644 index de134726f..000000000 --- a/lib/Typing/Support/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -add_npcomp_library(NPCOMPTypingCPASupport - CPAIrHelpers.cpp - - LINK_LIBS - PUBLIC - MLIRIR - NPCOMPTypingCPA - NPCOMPBasicpyDialect -) diff --git a/lib/Typing/Support/CPAIrHelpers.cpp b/lib/Typing/Support/CPAIrHelpers.cpp deleted file mode 100644 index 22163aaba..000000000 --- a/lib/Typing/Support/CPAIrHelpers.cpp +++ /dev/null @@ -1,77 +0,0 @@ -//===- IrHelpers.cpp - Helpers for bridging analysis and IR types ---------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Typing/Support/CPAIrHelpers.h" - -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "llvm/ADT/Optional.h" - -using namespace mlir; -using namespace mlir::NPCOMP::Basicpy; -using namespace mlir::NPCOMP::Typing::CPA; - -namespace CPA = mlir::NPCOMP::Typing::CPA; - -ObjectValueType::IrTypeConstructor static createTensorLikeIrTypeConstructor( - TensorType tt) { - return [tt](ObjectValueType *ovt, llvm::ArrayRef fieldTypes, - MLIRContext *mlirContext, - llvm::Optional loc) -> mlir::Type { - if (auto ranked = tt.dyn_cast()) { - return RankedTensorType::get(tt.getShape(), fieldTypes.front()); - } else { - // Unranked. - return UnrankedTensorType::get(fieldTypes.front()); - } - }; -} - -ObjectValueType *CPA::newArrayType(Context &context, - ObjectValueType::IrTypeConstructor irCtor, - Identifier *typeIdentifier, - llvm::Optional elementType) { - TypeNode *concreteElementType; - if (elementType) { - concreteElementType = *elementType; - } else { - concreteElementType = context.newTypeVar(); - } - auto arrayElementIdent = context.getIdentifier("e"); - return context.newObjectValueType(irCtor, typeIdentifier, {arrayElementIdent}, - {concreteElementType}); -} - -TypeNode *CPA::getArrayElementType(ObjectValueType *arrayType) { - assert(arrayType->getFieldCount() == 1 && - "expected to be an arity 1 array type"); - return arrayType->getFieldTypes().front(); -} - -ObjectValueType *CPA::createTensorLikeArrayType(Context &context, - TensorType tensorType) { - auto elTy = tensorType.getElementType(); - llvm::Optional dtype; - if (elTy != UnknownType::get(tensorType.getContext())) { - dtype = context.mapIrType(elTy); - } - return newArrayType(context, createTensorLikeIrTypeConstructor(tensorType), - context.getIdentifier("!Tensor"), dtype); -} - -static TypeNode *defaultTypeMapHook(Context &context, mlir::Type irType) { - // Handle core types that we can't define an interface on. - if (auto tensorType = irType.dyn_cast()) { - return createTensorLikeArrayType(context, tensorType); - } - - return nullptr; -} - -Context::IrTypeMapHook CPA::createDefaultTypeMapHook() { - return defaultTypeMapHook; -} diff --git a/lib/Typing/Transforms/CMakeLists.txt b/lib/Typing/Transforms/CMakeLists.txt deleted file mode 100644 index 82c5374cd..000000000 --- a/lib/Typing/Transforms/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -add_npcomp_conversion_library(NPCOMPTypingPasses - Passes.cpp - CPATypeInference.cpp - - ADDITIONAL_HEADER_DIRS - ${PROJECT_SOURCE_DIR}/include/npcomp/Typing/Transforms - - DEPENDS - NPCOMPTypingTransformsPassIncGen - - LINK_COMPONENTS - Core - - LINK_LIBS PUBLIC - MLIRIR - MLIRPass - NPCOMPTypingCPASupport - NPCOMPTypingCPA -) diff --git a/lib/Typing/Transforms/CPATypeInference.cpp b/lib/Typing/Transforms/CPATypeInference.cpp deleted file mode 100644 index 688f6fd6d..000000000 --- a/lib/Typing/Transforms/CPATypeInference.cpp +++ /dev/null @@ -1,309 +0,0 @@ -//===- CPATypeInference.cpp - Type inference passes -----------------*- -// C++-*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "PassDetail.h" - -#include "mlir/Dialect/SCF/SCF.h" -#include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "mlir/Interfaces/ControlFlowInterfaces.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" -#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h" -#include "npcomp/Typing/Analysis/CPA/Algorithm.h" -#include "npcomp/Typing/Analysis/CPA/Interfaces.h" -#include "npcomp/Typing/Analysis/CPA/Types.h" -#include "npcomp/Typing/Support/CPAIrHelpers.h" -#include "npcomp/Typing/Transforms/Passes.h" -#include "llvm/Support/Debug.h" - -#define DEBUG_TYPE "cpa-type-inference" - -using namespace llvm; -using namespace mlir; -using namespace mlir::NPCOMP::Basicpy; -using namespace mlir::NPCOMP::Typing; - -static void printReport(CPA::Environment &env, MLIRContext &mlirContext, - llvm::raw_ostream &os) { - auto &cpaContext = env.getContext(); - os << "CONSTRAINTS:\n"; - os << "------------\n"; - env.getConstraints().print(cpaContext, os); - - os << "\nTYPEVARS:\n"; - os << "---------\n"; - env.getTypeVars().print(cpaContext, os); - - os << "\nVALUE->TYPE NODE MAPPING:"; - os << "\n-------------------------\n"; - for (auto &it : env.getValueTypeMap()) { - auto irValue = it.first; - auto typeNode = it.second; - CPA::GreedyTypeNodeVarResolver resolver(cpaContext, mlirContext, - irValue.getLoc()); - if (failed(resolver.analyzeTypeNode(typeNode))) { - os << "! "; - typeNode->print(cpaContext, os, false); - os << " -> " << irValue; - os << "\n"; - continue; - } - - if (resolver.getMappings().empty()) { - // Not generic. - os << "= "; - typeNode->print(cpaContext, os, false); - os << " -> " << irValue; - os << "\n"; - continue; - } - - // Generic. - os << "*"; - auto newIrType = typeNode->constructIrType( - cpaContext, resolver.getMappings(), &mlirContext, irValue.getLoc()); - if (!newIrType) { - os << "!"; - } else { - os << " " << newIrType << ":"; - } - - os << " "; - typeNode->print(cpaContext, os, false); - os << " -> " << irValue; - os << "\n"; - } -} - -namespace { - -class InitialConstraintGenerator { -public: - InitialConstraintGenerator(CPA::Environment &env) : env(env) {} - - /// If a return op was visited, this will be one of them. - Operation *getLastReturnOp() { return funcReturnOp; } - - /// Gets any ReturnLike ops that do not return from the outer function. - /// This is used to fixup parent SCF ops and the like. - llvm::SmallVectorImpl &getInnerReturnLikeOps() { - return innerReturnLikeOps; - } - - CPA::TypeNode *resolveValueType(Value value) { - return env.mapValueToType(value); - } - - void addSubtypeConstraint(Value superValue, Value subValue, - Operation *contextOp) { - auto superVt = resolveValueType(superValue); - auto subVt = resolveValueType(subValue); - env.getContext().getConstraint(superVt, subVt); - } - - LogicalResult runOnFunction(FuncOp funcOp) { - // Iterate and create type nodes for entry block arguments, as these - // must be resolved no matter what. - if (funcOp.getBody().empty()) - return success(); - - auto &entryBlock = funcOp.getBody().front(); - for (auto blockArg : entryBlock.getArguments()) { - resolveValueType(blockArg); - } - - // Then walk ops, creating equations. - LLVM_DEBUG(llvm::dbgs() << "POPULATE CHILD OPS:\n"); - auto result = funcOp.walk([&](Operation *childOp) -> WalkResult { - if (childOp == funcOp) - return WalkResult::advance(); - LLVM_DEBUG(llvm::dbgs() << " + POPULATE: " << *childOp << "\n"); - // Use the op interface. - if (auto opInt = - dyn_cast(childOp)) { - opInt.addCPAConstraints(env.getContext()); - return WalkResult::advance(); - } - // Special op handling. - // Many of these (that are not standard ops) should become op - // interfaces. - // -------------------- - if (auto op = dyn_cast(childOp)) { - // Note that the condition is always i1 and not subject to type - // inference. - // addSubtypeConstraint(op.true_value(), op.false_value(), op); - // addSubtypeConstraint(op.false_value(), op.true_value(), op); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // Note that the result is always i1 and not subject to type - // inference. - resolveValueType(op.operand()); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // Note that the condition is always i1 and not subject to type - // inference. - for (auto result : op.getResults()) { - resolveValueType(result); - } - return WalkResult::advance(); - } - if (auto yieldOp = dyn_cast(childOp)) { - auto scfParentOp = yieldOp->getParentOp(); - if (scfParentOp->getNumResults() != yieldOp.getNumOperands()) { - yieldOp.emitWarning() - << "cannot run type inference on yield due to arity mismatch"; - return WalkResult::advance(); - } - for (auto it : - llvm::zip(scfParentOp->getResults(), yieldOp.getOperands())) { - addSubtypeConstraint(std::get<1>(it), std::get<0>(it), yieldOp); - } - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - addSubtypeConstraint(op.operand(), op.result(), op); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // TODO: This should really be applying arithmetic promotion, not - // strict equality. - addSubtypeConstraint(op.left(), op.result(), op); - addSubtypeConstraint(op.right(), op.result(), op); - return WalkResult::advance(); - } - if (auto op = dyn_cast(childOp)) { - // TODO: This should really be applying arithmetic promotion, not - // strict equality. - addSubtypeConstraint(op.left(), op.right(), op); - addSubtypeConstraint(op.right(), op.left(), op); - return WalkResult::advance(); - } - - // Fallback trait based equations. - // ---------------------- - // Ensure that constant nodes get assigned a constant type. - if (childOp->hasTrait()) { - resolveValueType(childOp->getResult(0)); - return WalkResult::advance(); - } - // Function returns must all have the same types. - if (childOp->hasTrait()) { - if (childOp->getParentOp() == funcOp) { - if (funcReturnOp) { - if (funcReturnOp->getNumOperands() != childOp->getNumOperands()) { - childOp->emitOpError() << "different arity of function returns"; - return WalkResult::interrupt(); - } - for (auto it : llvm::zip(funcReturnOp->getOperands(), - childOp->getOperands())) { - // addSubtypeConstraint(std::get<0>(it), std::get<1>(it), - // childOp); - addSubtypeConstraint(std::get<1>(it), std::get<0>(it), childOp); - } - } - funcReturnOp = childOp; - return WalkResult::advance(); - } else { - innerReturnLikeOps.push_back(childOp); - } - } - - childOp->emitRemark() << "unhandled op in type inference"; - - return WalkResult::advance(); - }); - - return success(result.wasInterrupted()); - } - -private: - // The last encountered ReturnLike op. - Operation *funcReturnOp = nullptr; - llvm::SmallVector innerReturnLikeOps; - CPA::Environment &env; -}; - -class CPAFunctionTypeInferencePass - : public CPAFunctionTypeInferenceBase { -public: - void runOnOperation() override { - FuncOp func = getOperation(); - if (func.getBody().empty()) - return; - - CPA::Context cpaContext(CPA::createDefaultTypeMapHook()); - auto &env = cpaContext.getCurrentEnvironment(); - - InitialConstraintGenerator p(env); - (void)p.runOnFunction(func); - - CPA::PropagationWorklist prop(env); - do { - prop.propagateTransitivity(); - } while (prop.commit()); - - LLVM_DEBUG(printReport(env, getContext(), llvm::dbgs())); - - // Apply updates. - // TODO: This is far too naive and is basically only valid for single-block - // functions that are not called. Generalize it. - for (auto &it : env.getValueTypeMap()) { - auto irValue = it.first; - auto typeNode = it.second; - auto loc = irValue.getLoc(); - CPA::GreedyTypeNodeVarResolver resolver(cpaContext, getContext(), - irValue.getLoc()); - if (failed(resolver.analyzeTypeNode(typeNode))) { - mlir::emitRemark(loc) - << "type inference did not converge to an " - << "unambiguous type (this is a terribly unacceptable level of " - << "detail in an error message)"; - return signalPassFailure(); - } - - if (resolver.getMappings().empty()) { - // The type is not generic/unknown, so it does not need to be updated. - continue; - } - - auto newType = typeNode->constructIrType( - cpaContext, resolver.getMappings(), &getContext(), loc); - if (!newType) { - auto diag = mlir::emitRemark(loc); - diag << "type inference converged but a concrete IR " - << "type could not be constructed"; - return signalPassFailure(); - } - irValue.setType(newType); - } - - // Now rewrite the function type based on actual types of entry block - // args and the final return op operands. - // Again, this is just a toy that will work for very simple, global - // functions. - auto entryBlockTypes = func.getBody().front().getArgumentTypes(); - SmallVector inputTypes(entryBlockTypes.begin(), - entryBlockTypes.end()); - SmallVector resultTypes; - if (p.getLastReturnOp()) { - auto resultRange = p.getLastReturnOp()->getOperandTypes(); - resultTypes.append(resultRange.begin(), resultRange.end()); - } - auto funcType = FunctionType::get(&getContext(), inputTypes, resultTypes); - func.setType(funcType); - } -}; - -} // namespace - -std::unique_ptr> -mlir::NPCOMP::Typing::createCPAFunctionTypeInferencePass() { - return std::make_unique(); -} diff --git a/lib/Typing/Transforms/PassDetail.h b/lib/Typing/Transforms/PassDetail.h deleted file mode 100644 index ad460ad3e..000000000 --- a/lib/Typing/Transforms/PassDetail.h +++ /dev/null @@ -1,25 +0,0 @@ -//===- PassDetail.h - Pass details ------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_TYPING_TRANSFORMS_PASSDETAIL_H -#define NPCOMP_TYPING_TRANSFORMS_PASSDETAIL_H - -#include "mlir/Pass/Pass.h" - -namespace mlir { -namespace NPCOMP { -namespace Typing { - -#define GEN_PASS_CLASSES -#include "npcomp/Typing/Transforms/Passes.h.inc" - -} // namespace Typing -} // namespace NPCOMP -} // end namespace mlir - -#endif // NPCOMP_TYPING_TRANSFORMS_PASSDETAIL_H diff --git a/lib/Typing/Transforms/Passes.cpp b/lib/Typing/Transforms/Passes.cpp deleted file mode 100644 index 2b04326df..000000000 --- a/lib/Typing/Transforms/Passes.cpp +++ /dev/null @@ -1,20 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "npcomp/Typing/Transforms/Passes.h" - -//===----------------------------------------------------------------------===// -// Pass registration -//===----------------------------------------------------------------------===// - -namespace { -#define GEN_PASS_REGISTRATION -#include "npcomp/Typing/Transforms/Passes.h.inc" -} // end namespace - -void mlir::NPCOMP::registerTypingPasses() { ::registerPasses(); } diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 36740e331..b73c0df94 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,8 +1,6 @@ include(AddMLIRPython) include(MLIRDetectPythonEnv) -# Specifies that all MLIR packages are co-located under npcomp. -# TODO: Add an upstream cmake param for this vs having a global here. add_compile_definitions("MLIR_PYTHON_PACKAGE_PREFIX=npcomp.") ################################################################################ @@ -35,14 +33,9 @@ declare_mlir_python_sources(NPCOMPPythonSources.Core ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/npcomp" SOURCES build.py - decorators.py - exporter.py - smoketest.py - types.py + __init__.py SOURCES_GLOB compiler/*.py - frontends/*.py - torch/*.py utils/*.py ) @@ -74,10 +67,6 @@ declare_mlir_python_extension(NPCOMPPythonExtensions.Core ${_addl_extension_sources} EMBED_CAPI_LINK_LIBS NPCOMPCAPI - # XXX: It's hacky to hardcode this here. We just need TorchMLIRCAPI built - # into the NPCOMPPythonCAPI shlib. - # If we follow what iree-dialects is doing, we can avoid this I think. - TorchMLIRCAPI PRIVATE_LINK_LIBS LLVMSupport ) @@ -86,26 +75,9 @@ declare_mlir_python_extension(NPCOMPPythonExtensions.Core # Declare dialects ################################################################################ -declare_mlir_dialect_python_bindings( - ADD_TO_PARENT NPCOMPPythonSources.Dialects - ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/npcomp" - TD_FILE dialects/BasicpyBind.td - SOURCES dialects/basicpy.py - DIALECT_NAME basicpy) - -declare_mlir_dialect_python_bindings( - ADD_TO_PARENT NPCOMPPythonSources.Dialects - ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/npcomp" - TD_FILE dialects/NumpyBind.td - SOURCES dialects/numpy.py - DIALECT_NAME numpy) - -declare_mlir_dialect_python_bindings( - ADD_TO_PARENT NPCOMPPythonSources.Dialects - ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/npcomp" - TD_FILE dialects/TorchBind.td - SOURCES dialects/torch.py - DIALECT_NAME torch) +# Declare dialects with `declare_mlir_dialect_python_bindings`. +# Currently there are none, because the interesting ones to bind have been +# removed or moved into torch-mlir. ################################################################################ # Build composite binaries diff --git a/python/NpcompModule.cpp b/python/NpcompModule.cpp index f513e3c60..8b4b8fc31 100644 --- a/python/NpcompModule.cpp +++ b/python/NpcompModule.cpp @@ -15,34 +15,11 @@ #include "mlir-c/BuiltinAttributes.h" #include "mlir-c/BuiltinTypes.h" #include "mlir-c/Diagnostics.h" -#include "npcomp-c/BasicpyTypes.h" #include "npcomp-c/InitLLVM.h" -#include "npcomp-c/NumpyTypes.h" #include "npcomp-c/Registration.h" namespace { -MlirType shapedToNdArrayArrayType(MlirType shaped_type) { - if (!mlirTypeIsAShaped(shaped_type)) { - throw py::raiseValueError("type is not a shaped type"); - } - return npcompNumpyNdArrayTypeGetFromShaped(shaped_type); -} - -MlirType ndarrayToTensorType(MlirType ndarray_type) { - if (!npcompTypeIsANumpyNdArray(ndarray_type)) { - throw py::raiseValueError("type is not an ndarray type"); - } - return npcompNumpyNdArrayTypeToTensor(ndarray_type); -} - -MlirType slotObjectType(MlirContext context, const std::string &className, - const std::vector &slotTypes) { - MlirStringRef classNameSr{className.data(), className.size()}; - return ::npcompBasicPySlotObjectTypeGet(context, classNameSr, - slotTypes.size(), slotTypes.data()); -} - // TODO: Move this upstream. void emitError(MlirLocation loc, std::string message) { ::mlirEmitError(loc, message.c_str()); @@ -56,9 +33,6 @@ PYBIND11_MODULE(_npcomp, m) { ::npcompInitializeLLVMCodegen(); m.def("register_all_dialects", ::npcompRegisterAllDialects); - m.def("shaped_to_ndarray_type", shapedToNdArrayArrayType); - m.def("ndarray_to_tensor_type", ndarrayToTensorType); - m.def("slot_object_type", slotObjectType); m.def("emit_error", emitError); // Optional backend modules. diff --git a/python/npcomp/compiler/numpy/extensions/numpy/__init__.py b/python/npcomp/__init__.py similarity index 78% rename from python/npcomp/compiler/numpy/extensions/numpy/__init__.py rename to python/npcomp/__init__.py index 3852bb585..a7de032d7 100644 --- a/python/npcomp/compiler/numpy/extensions/numpy/__init__.py +++ b/python/npcomp/__init__.py @@ -2,5 +2,4 @@ # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -from .builtin_ops import * -from .value_coder import * +from ._mlir_libs._npcomp import register_all_dialects diff --git a/python/npcomp/compiler/generic/backend/iree.py b/python/npcomp/compiler/generic/backend/iree.py deleted file mode 100644 index 2154ca647..000000000 --- a/python/npcomp/compiler/generic/backend/iree.py +++ /dev/null @@ -1,16 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import os - - -def get_translate_exe(): - search_names = ["iree-translate", "iree-translate.exe"] - resources_dir = os.path.join(os.path.dirname(__file__)) - for search_name in search_names: - exe = os.path.join(resources_dir, search_name) - if os.path.exists(exe): - return exe - raise RuntimeError(f"Could not find iree-translate at path: {resources_dir} " - f"(is it installed?)") diff --git a/python/npcomp/compiler/generic/backend/refjit.py b/python/npcomp/compiler/generic/backend/refjit.py index ad6623800..749851397 100644 --- a/python/npcomp/compiler/generic/backend/refjit.py +++ b/python/npcomp/compiler/generic/backend/refjit.py @@ -7,11 +7,6 @@ import platform _refjit = None -BACKEND_PASSES = ( - "builtin.func(convert-scf-to-std)", - "builtin.func(canonicalize)", -) - def get_refjit(): """Dynamically resolves the refjit backend native module.""" diff --git a/python/npcomp/compiler/numpy/backend/__init__.py b/python/npcomp/compiler/numpy/backend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python/npcomp/compiler/numpy/backend/iree.py b/python/npcomp/compiler/numpy/backend/iree.py deleted file mode 100644 index 467c65d64..000000000 --- a/python/npcomp/compiler/numpy/backend/iree.py +++ /dev/null @@ -1,109 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import io -import subprocess - -from npcomp.ir import * -from npcomp.passmanager import * -from npcomp.compiler.generic.backend import iree as iree_backend -from npcomp.compiler.utils import logging - -__all__ = [ - "is_enabled", - "CompilerBackend", -] - -FRONTEND_PASSES = ( - "builtin.func(basicpy-type-inference)", - "builtin.func(convert-basicpy-to-std)", - "builtin.func(canonicalize)", - "builtin.func(convert-scf-to-std)", -) - -_ireert = None -_cached_config = None - - -def _get_iree(): - """Dynamically resolves the iree backend module.""" - global _ireert - try: - from pyiree import rt as imported_rt - except ImportError: - raise ImportError("IREE runtime library not found (pyiree.rt)") - _ireert = imported_rt - return _ireert - - -def is_enabled() -> bool: - """Returns whether the backend is enabled for the current build.""" - try: - _get_iree() - return True - except ImportError: - return False - - -class CompilerBackend: - """Main entry-point for the backend.""" - - def __init__(self): - super().__init__() - self._ireert = _get_iree() - self._debug = logging.debug_enabled() - - def compile(self, imported_module: Module): - """Compiles an imported module. - - Args: - imported_ir_module: The MLIR module as imported from the ImportFrontend. - Returns: - An opaque, backend specific module object that can be passed to load. - The object may actually be something more specific to the backend (i.e. - for IREE, it is a serialized VM flatbuffer) but the contract is that - it is operated on by methods on this class. - """ - with imported_module.context: - # Frontend. - if self._debug: - logging.debug("Input IR:\n{}", imported_module) - assert ( - imported_module.operation.verify()), "Imported module does not verify" - # Frontend. - pm = PassManager.parse(",".join(FRONTEND_PASSES)) - pm.run(imported_module) - if self._debug: - logging.debug("Frontend IR:{}", imported_module) - - # TODO: There should be some common utility for invoking backend processes - # safely (and have options like saving temps, etc). - args = [ - iree_backend.get_translate_exe(), "--iree-mlir-to-vm-bytecode-module" - ] - p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) - imported_module.operation.print(binary=True, - enable_debug_info=True, - file=p.stdin) - out, err = p.communicate() - return out - - def load(self, vm_blob): - """Loads a compiled artifact into the runtime. - - This is meant as a simple mechanism for testing and is not optimized or - highly parameterized. It loads a compiled result into a new runtime - instance and returns an object that exposes a python function for each - public function compiled in the imported_ir_module that was compiled. - """ - ireert = self._ireert - m = ireert.VmModule.from_flatbuffer(vm_blob) - global _cached_config - if not _cached_config: - # TODO: Need to make the configuration more flexible. - _cached_config = ireert.Config(driver_name="vmla") - ctx = ireert.SystemContext(config=_cached_config) - ctx.add_module(m) - # TODO: The implicit tying of the 'module' name has got to go. - return ctx.modules.module diff --git a/python/npcomp/compiler/numpy/backend/refjit.py b/python/npcomp/compiler/numpy/backend/refjit.py deleted file mode 100644 index 3f9948817..000000000 --- a/python/npcomp/compiler/numpy/backend/refjit.py +++ /dev/null @@ -1,76 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import os - -from npcomp.ir import * -from npcomp.passmanager import * -from npcomp.compiler.generic.backend import refjit as refjit_backend -from npcomp.compiler.utils import logging - -__all__ = [ - "is_enabled", - "CompilerBackend", -] - -FRONTEND_PASSES = ( - "builtin.func(npcomp-cpa-type-inference)", - "numpy-public-functions-to-tensor", - "builtin.func(convert-scf-to-std)", - "builtin.func(canonicalize)", -) - -# Re-export. -is_enabled = refjit_backend.is_enabled - - -class CompilerBackend: - """Main entry-point for the backend.""" - - def __init__(self): - super().__init__() - self._refjit = refjit_backend.get_refjit() - self._debug = logging.debug_enabled() - - def compile(self, imported_module: Module): - """Compiles an imported module. - - Args: - legacy_imported_ir_module: The MLIR module as imported from the - ImportFrontend. - Returns: - An opaque, backend specific module object that can be passed to load. - The object may actually be something more specific to the backend (i.e. - for IREE, it is a serialized VM flatbuffer) but the contract is that - it is operated on by methods on this class. - """ - with imported_module.context as context: - # Frontend. - if self._debug: - logging.debug("Input IR:\n{}", imported_module) - assert ( - imported_module.operation.verify()), "Imported module does not verify" - pm = PassManager.parse(",".join(FRONTEND_PASSES)) - pm.run(imported_module) - if self._debug: - logging.debug("Frontend IR:\n{}", imported_module) - - # Backend. - # Note that this is a separate pass manager purely to aid in debugging. - pm = PassManager() - self._refjit.build_backend_compilation_pipeline(pm) - pm.run(imported_module) - if self._debug: - logging.debug("Backend IR:\n{}", imported_module) - - jit_module = self._refjit.JITModule.from_compiled_module( - imported_module, refjit_backend.get_runtime_libs()) - return jit_module - - def load(self, jit_module): - """Loads a compiled artifact into the runtime. - - Since this is a JIT instead of an AOT compiler, - """ - return refjit_backend.JitModuleInvoker(jit_module) diff --git a/python/npcomp/compiler/numpy/extensions/numpy/builtin_ops.py b/python/npcomp/compiler/numpy/extensions/numpy/builtin_ops.py deleted file mode 100644 index a8658be01..000000000 --- a/python/npcomp/compiler/numpy/extensions/numpy/builtin_ops.py +++ /dev/null @@ -1,109 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Configures evaluation support for numpy builtin ops.""" - -from typing import Callable, Iterator, Sequence, Tuple - -import functools -import numpy as np - -from ...interfaces import * -from ...partial_eval_base import * - -from ....utils import logging - -from ..... import ir as _ir -from ....._mlir_libs import _npcomp as _cext -from .....dialects import numpy as numpy_ops - -__all__ = [ - "get_ufuncs_from_module", - "bind_ufuncs", -] - -################################################################################ -# Ufunc evaluation -################################################################################ - - -def _default_ufunc_predicate(ufunc: np.ufunc) -> bool: - """Filters ufuncs based on ability to evaluate them.""" - # Support up to 2 input, 1 output functions. - if ufunc.nin > 2 or ufunc.nout != 1: - return False - return True - - -def get_ufuncs_from_module( - *, - module=np, - prefix: str = "numpy.", - predicate: Callable[[np.ufunc], bool] = _default_ufunc_predicate, -) -> Iterator[Tuple[str, np.ufunc]]: - """Iterates over all ufuncs in a module. - - Yields: - Tuple of (prefixed_name, ufunc). - """ - ufunc_class = np.ufunc - for local_name in dir(module): - value = getattr(module, local_name) - if isinstance(value, ufunc_class): - if not predicate(value): - logging.debug("Skipped ufunc: {}{} ({})", prefix, local_name, value) - else: - yield (prefix + local_name), value - - -def bind_ufuncs(ufuncs: Iterator[Tuple[str, np.ufunc]], - pe_hook: MappedPartialEvalHook): - """Binds a set of ufuncs to a partial eval hook.""" - for qualified_name, ufunc in ufuncs: - pe_hook.bind_action(functools.partial(BuiltinUfuncLiveValueRef, - qualified_name, ufunc), - for_ref=ufunc) - - -class BuiltinUfuncLiveValueRef(LiveValueRef): - """A partial evaluation that emits IR for invoking a ufunc.""" - __slots__ = ["_qualified_name", "_ufunc"] - - def __init__(self, qualified_name: str, ufunc: np.ufunc, live_value): - super().__init__(live_value) - self._qualified_name = qualified_name - self._ufunc = ufunc - - def resolve_call(self, env: Environment, args: Sequence[_ir.Value], - keywords: Sequence[str]) -> PartialEvalResult: - if keywords: - return PartialEvalResult.error_message( - "ufunc call does not currently support keyword args") - if len(args) != self._ufunc.nin: - return PartialEvalResult.error_message( - "ufunc {} expected {} inputs but got {}".format( - self._qualified_name, self._ufunc.nin, len(args))) - ic = env.ic - - # Because a ufunc call is defined in terms of tensors and, at this stage, - # all "public" values are ndarray, do appropriate conversions. - def copy_to_tensor(value): - tensor_type = _cext.ndarray_to_tensor_type(value.type) - return numpy_ops.CopyToTensorOp(tensor_type, value, loc=ic.loc, - ip=ic.ip).result - - tensor_args = [copy_to_tensor(arg) for arg in args] - result_type = ic.unknown_tensor_type - tensor_result = numpy_ops.BuiltinUfuncCallOp(result_type, - _ir.StringAttr.get( - self._qualified_name, - context=ic.context), - tensor_args, - loc=ic.loc, - ip=ic.ip).result - array_result = numpy_ops.CreateArrayFromTensorOp( - _cext.shaped_to_ndarray_type(tensor_result.type), - tensor_result, - loc=ic.loc, - ip=ic.ip).result - return PartialEvalResult.yields_ir_value(array_result) diff --git a/python/npcomp/compiler/numpy/extensions/numpy/value_coder.py b/python/npcomp/compiler/numpy/extensions/numpy/value_coder.py deleted file mode 100644 index 9858efbf7..000000000 --- a/python/npcomp/compiler/numpy/extensions/numpy/value_coder.py +++ /dev/null @@ -1,49 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Value coders for Numpy types.""" - -import numpy as np -from typing import Union - -from ...interfaces import * - -from ....utils import logging - -from ..... import ir as _ir -from .....dialects import std as std_ops, numpy as numpy_ops -from ....._mlir_libs import _npcomp as _cext - -__all__ = [ - "CreateNumpyValueCoder", -] - -_NotImplementedType = type(NotImplemented) - - -class NdArrayValueCoder(ValueCoder): - """Value coder for numpy types.""" - __slots__ = [] - - def code_py_value_as_const(self, env: Environment, - py_value) -> Union[_NotImplementedType, _ir.Value]: - # TODO: Query for ndarray compat (for duck typed and such) - # TODO: Have a higher level name resolution signal which indicates const - ic = env.ic - if isinstance(py_value, np.ndarray): - dense_attr = _ir.DenseElementsAttr.get(py_value, context=ic.context) - tensor_type = dense_attr.type - tensor_value = std_ops.ConstantOp(tensor_type, - dense_attr, - loc=ic.loc, - ip=ic.ip).result - ndarray_type = _cext.shaped_to_ndarray_type(tensor_type) - return numpy_ops.CreateArrayFromTensorOp(ndarray_type, - tensor_value, - loc=ic.loc, - ip=ic.ip).result - return NotImplemented - - -def CreateNumpyValueCoder() -> ValueCoder: - return ValueCoderChain((NdArrayValueCoder(),)) diff --git a/python/npcomp/compiler/numpy/frontend.py b/python/npcomp/compiler/numpy/frontend.py deleted file mode 100644 index 6544705cb..000000000 --- a/python/npcomp/compiler/numpy/frontend.py +++ /dev/null @@ -1,159 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -""" -Frontend to the compiler, allowing various ways to import code. -""" - -import ast -import inspect -import textwrap -from typing import Optional - -from ..utils import logging -from .importer import * -from .interfaces import * -from .name_resolver_base import * -from .value_coder_base import * -from .target import * - -from ..utils.mlir_utils import * - -from ... import ir as _ir - -__all__ = [ - "ImportFrontend", -] - - -class ImportFrontend: - """Frontend for importing various entities into a Module.""" - __slots__ = [ - "_ir_module", - "_config", - "_ic", - ] - - def __init__(self, - *, - config: Configuration, - ir_context: Optional[_ir.Context] = None): - super().__init__() - ic = self._ic = ImportContext(ir_context) - self._ic.module = _ir.Module.create(loc=ic.loc) - self._config = config - - @property - def ir_context(self) -> _ir.Context: - return self._ic.context - - @property - def ir_module(self) -> _ir.Module: - return self._ic.module - - def import_global_function(self, f): - """Imports a global function. - - This facility is not general and does not allow customization of the - containing environment, method import, etc. - - Most errors are emitted via the MLIR context's diagnostic infrastructure, - but errors related to extracting source, etc are raised directly. - - Args: - f: The python callable. - """ - ic = self._ic - target = self._config.target_factory(ic) - filename = inspect.getsourcefile(f) - source_lines, start_lineno = inspect.getsourcelines(f) - source = "".join(source_lines) - source = textwrap.dedent(source) - ast_root = ast.parse(source, filename=filename) - ast.increment_lineno(ast_root, start_lineno - 1) - ast_fd = ast_root.body[0] - - # Define the function. - # TODO: Much more needs to be done here (arg/result mapping, etc) - logging.debug(":::::::") - logging.debug("::: Importing global function {}:\n{}", ast_fd.name, - ast.dump(ast_fd, include_attributes=True)) - - # TODO: VERY BAD: Assumes all positional params. - f_signature = inspect.signature(f) - f_params = f_signature.parameters - f_input_types = [ - self._resolve_signature_annotation(target, p.annotation) - for p in f_params.values() - ] - f_return_type = self._resolve_signature_annotation( - target, f_signature.return_annotation) - ir_f_type = _ir.FunctionType.get(f_input_types, [f_return_type], - context=ic.context) - - ic.set_file_line_col(filename, ast_fd.lineno, ast_fd.col_offset) - ic.insert_end_of_block(ic.module.body) - ir_f, entry_block = ic.FuncOp(ast_fd.name, - ir_f_type, - create_entry_block=True) - ic.insert_end_of_block(entry_block) - env = self._create_const_global_env(f, - parameter_bindings=zip( - f_params.keys(), - entry_block.arguments), - target=target) - fctx = FunctionContext(ic=ic, ir_f=ir_f, filename=filename, environment=env) - - fdimport = FunctionDefImporter(fctx, ast_fd) - fdimport.import_body() - return ir_f - - def _create_const_global_env(self, f, parameter_bindings, target): - """Helper to generate an environment for a global function. - - This is a helper for the very common case and will be wholly insufficient - for advanced cases, including mutable global state, closures, etc. - Globals from the module are considered immutable. - """ - ic = self._ic - try: - code = f.__code__ - globals_dict = f.__globals__ - builtins_module = globals_dict["__builtins__"] - except AttributeError: - assert False, ( - "Function {} does not have required user-defined function attributes". - format(f)) - - # Locals resolver. - # Note that co_varnames should include both parameter and local names. - locals_resolver = LocalNameResolver(code.co_varnames) - resolvers = ( - locals_resolver, - ConstModuleNameResolver(globals_dict, as_dict=True), - ConstModuleNameResolver(builtins_module), - ) - env = Environment(config=self._config, ic=ic, name_resolvers=resolvers) - - # Bind parameters. - for name, value in parameter_bindings: - logging.debug("STORE PARAM: {} <- {}", name, value) - locals_resolver.checked_resolve_name(name).store(env, value) - return env - - def _resolve_signature_annotation(self, target: Target, annot): - ic = self._ic - if annot is inspect.Signature.empty: - return ic.unknown_type - - # TODO: Do something real here once we need more than the primitive types. - if annot is int: - return target.impl_int_type - elif annot is float: - return target.impl_float_type - elif annot is bool: - return ic.bool_type - elif annot is str: - return ic.str_type - else: - return ic.unknown_type diff --git a/python/npcomp/compiler/numpy/importer.py b/python/npcomp/compiler/numpy/importer.py deleted file mode 100644 index 3e0fa98cb..000000000 --- a/python/npcomp/compiler/numpy/importer.py +++ /dev/null @@ -1,519 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -""" -Importers for populating MLIR from AST. -""" -import ast -import sys -import traceback - -from .interfaces import * -from ..utils import logging - -from ... import ir as _ir -from ...dialects import std as std_ops, basicpy as basicpy_ops -from ..._mlir_libs import _npcomp as _cext - -__all__ = [ - "FunctionContext", - "FunctionDefImporter", - "ExpressionImporter", -] - - -class FunctionContext: - """Accounting information for importing a function.""" - __slots__ = [ - "ic", - "ir_f", - "filename", - "environment", - ] - - def __init__(self, *, ic: ImportContext, ir_f: _ir.Operation, filename: str, - environment: Environment): - self.ic = ic - self.ir_f = ir_f - self.filename = filename - self.environment = environment - - def abort(self, message): - """Emits an error diagnostic and raises an exception to abort.""" - loc = self.current_loc - _cext.emit_error(loc, message) - raise EmittedError(loc, message) - - def check_partial_evaluated(self, result: PartialEvalResult): - """Checks that a PartialEvalResult has evaluated without error.""" - if result.type == PartialEvalType.ERROR: - exc_type, exc_value, tb = result.yields - loc = self.current_loc - if issubclass(exc_type, UserReportableError): - message = exc_value.message - else: - message = ("Error while evaluating value from environment:\n" + - "".join(traceback.format_exception(exc_type, exc_value, tb))) - - # TODO: Add this to the python API. - _cext.emit_error(loc, message) - raise EmittedError(loc, message) - if result.type == PartialEvalType.NOT_EVALUATED: - self.abort("Unable to evaluate expression") - - @property - def current_loc(self): - return self.ic.loc - - def update_loc(self, ast_node): - self.ic.set_file_line_col(self.filename, ast_node.lineno, - ast_node.col_offset) - - def lookup_name(self, name) -> NameReference: - """Lookup a name in the environment, requiring it to have evaluated.""" - ref = self.environment.resolve_name(name) - if ref is None: - self.abort("Could not resolve referenced name '{}'".format(name)) - logging.debug("Map name({}) -> {}", name, ref) - return ref - - def emit_const_value(self, py_value) -> _ir.Value: - """Codes a value as a constant, returning an ir Value.""" - env = self.environment - result = env.code_py_value_as_const(py_value) - if result is NotImplemented: - self.abort("Cannot code python value as constant: {}".format(py_value)) - return result - - def emit_partial_eval_result(self, - partial_result: PartialEvalResult) -> _ir.Value: - """Emits a partial eval result either as a direct IR value or a constant.""" - self.check_partial_evaluated(partial_result) - if partial_result.type == PartialEvalType.YIELDS_IR_VALUE: - # Return directly. - return partial_result.yields - elif partial_result.type == PartialEvalType.YIELDS_LIVE_VALUE: - # Import constant. - return self.emit_const_value(partial_result.yields.live_value) - else: - self.abort("Unhandled partial eval result type {}".format(partial_result)) - - -class BaseNodeVisitor(ast.NodeVisitor): - """Base class of a node visitor that aborts on unhandled nodes.""" - IMPORTER_TYPE = "" - __slots__ = [ - "fctx", - ] - - def __init__(self, fctx): - super().__init__() - self.fctx = fctx - - def visit(self, node): - self.fctx.update_loc(node) - return super().visit(node) - - def generic_visit(self, ast_node): - logging.debug("UNHANDLED NODE: {}", ast.dump(ast_node)) - self.fctx.abort("unhandled python %s AST node '%s'" % - (self.IMPORTER_TYPE, ast_node.__class__.__name__)) - - -class FunctionDefImporter(BaseNodeVisitor): - """AST visitor for importing a function's statements. - - Handles nodes that are direct children of a FunctionDef. - """ - IMPORTER_TYPE = "statement" - __slots__ = [ - "ast_fd", - "_last_was_return", - ] - - def __init__(self, fctx, ast_fd): - super().__init__(fctx) - self.ast_fd = ast_fd - self._last_was_return = False - - def import_body(self): - ic = self.fctx.ic - for ast_stmt in self.ast_fd.body: - self._last_was_return = False - logging.debug("STMT: {}", ast.dump(ast_stmt, include_attributes=True)) - self.visit(ast_stmt) - if not self._last_was_return: - # Add a default terminator. - none_value = basicpy_ops.SingletonOp(ic.none_type, loc=ic.loc, - ip=ic.ip).result - none_cast = basicpy_ops.UnknownCastOp(ic.unknown_type, - none_value, - loc=ic.loc, - ip=ic.ip).result - std_ops.ReturnOp([none_cast], loc=ic.loc, ip=ic.ip) - - def visit_Assign(self, ast_node): - expr = ExpressionImporter(self.fctx) - expr.visit(ast_node.value) - for target in ast_node.targets: - self.fctx.update_loc(target) - if not isinstance(target.ctx, ast.Store): - # TODO: Del, AugStore, etc - self.fctx.abort("Unsupported assignment context type %s" % - target.ctx.__class__.__name__) - name_ref = self.fctx.lookup_name(target.id) - try: - name_ref.store(self.fctx.environment, expr.value) - logging.debug("STORE: {} <- {}", name_ref, expr.value) - except NotImplementedError: - self.fctx.abort( - "Cannot assign to '{}': Store not supported".format(name_ref)) - - def visit_Expr(self, ast_node): - ic = self.fctx.ic - exec_ip = ic.basicpy_ExecOp() - - # Evaluate the expression in the exec body. - ic.push_ip(exec_ip) - expr = ExpressionImporter(self.fctx) - expr.visit(ast_node.value) - basicpy_ops.ExecDiscardOp([expr.value], loc=ic.loc, ip=ic.ip) - ic.pop_ip() - - def visit_Pass(self, ast_node): - pass - - def visit_Return(self, ast_node): - ic = self.fctx.ic - with ic.loc, ic.ip: - expr = ExpressionImporter(self.fctx) - expr.visit(ast_node.value) - casted = basicpy_ops.UnknownCastOp(ic.unknown_type, expr.value).result - std_ops.ReturnOp([casted]) - self._last_was_return = True - - -class ExpressionImporter(BaseNodeVisitor): - """Imports expression nodes. - - Visitor methods should either raise an exception or set self.value to the - IR value that the expression lowers to. - """ - IMPORTER_TYPE = "expression" - __slots__ = [ - "value", - ] - - def __init__(self, fctx): - super().__init__(fctx) - self.value = None - - def visit(self, node): - super().visit(node) - assert self.value, ("ExpressionImporter did not assign a value (%r)" % - (ast.dump(node),)) - - def sub_evaluate(self, sub_node): - sub_importer = ExpressionImporter(self.fctx) - sub_importer.visit(sub_node) - return sub_importer.value - - def emit_constant(self, value): - env = self.fctx.environment - ir_const_value = env.code_py_value_as_const(value) - if ir_const_value is NotImplemented: - self.fctx.abort("unknown constant type '%r'" % (value,)) - self.value = ir_const_value - - def visit_Attribute(self, ast_node): - # Import the attribute's value recursively as a partial eval if possible. - pe_importer = PartialEvalImporter(self.fctx) - pe_importer.visit(ast_node) - if pe_importer.partial_eval_result: - self.fctx.check_partial_evaluated(pe_importer.partial_eval_result) - self.value = self.fctx.emit_partial_eval_result( - pe_importer.partial_eval_result) - return - - self.fctx.abort("unhandled attribute access mode: {}".format( - ast.dump(ast_node))) - - def visit_BinOp(self, ast_node): - ic = self.fctx.ic - left = self.sub_evaluate(ast_node.left) - right = self.sub_evaluate(ast_node.right) - self.value = basicpy_ops.BinaryExprOp(ic.unknown_type, - left, - right, - _ir.StringAttr.get( - ast_node.op.__class__.__name__, - context=ic.context), - ip=ic.ip, - loc=ic.loc).result - - def visit_BoolOp(self, ast_node): - ic = self.fctx.ic - if isinstance(ast_node.op, ast.And): - return_first_true = False - elif isinstance(ast_node.op, ast.Or): - return_first_true = True - else: - self.fctx.abort("unknown bool op %r" % (ast.dump(ast_node.op))) - - def emit_next(next_nodes): - next_node = next_nodes[0] - next_nodes = next_nodes[1:] - next_value = self.sub_evaluate(next_node) - if not next_nodes: - return next_value - condition_value = basicpy_ops.AsI1Op(ic.i1_type, next_value, - ip=ic.ip).result - if_op, then_ip, else_ip = ic.scf_IfOp([ic.unknown_type], condition_value, - True) - # Short-circuit return case. - ic.push_ip(then_ip if return_first_true else else_ip) - next_value_casted = basicpy_ops.UnknownCastOp(ic.unknown_type, - next_value, - ip=ic.ip).result - ic.scf_YieldOp([next_value_casted]) - ic.pop_ip() - - # Nested evaluate next case. - ic.push_ip(else_ip if return_first_true else then_ip) - nested_value = emit_next(next_nodes) - nested_value_casted = next_value_casted = basicpy_ops.UnknownCastOp( - ic.unknown_type, nested_value, ip=ic.ip).result - ic.scf_YieldOp([nested_value_casted]) - ic.pop_ip() - - return if_op.result - - with ic.loc: - self.value = emit_next(ast_node.values) - - def visit_Call(self, ast_node): - # Evaluate positional args. - evaluated_args = [] - for raw_arg in ast_node.args: - evaluated_args.append(self.sub_evaluate(raw_arg)) - - # Evaluate keyword args. - keyword_args = [] - for raw_kw_arg in ast_node.keywords: - keyword_args.append((raw_kw_arg.arg, self.sub_evaluate(raw_kw_arg.value))) - - # Perform partial evaluation of the callee. - callee_importer = PartialEvalImporter(self.fctx) - callee_importer.visit(ast_node.func) - callee_result = callee_importer.partial_eval_result - if (callee_result and - callee_result.type == PartialEvalType.YIELDS_LIVE_VALUE): - # This is a function known to the compiler. Perform a template call. - call_result = callee_result.yields.resolve_call(self.fctx.environment, - evaluated_args, - keyword_args) - if call_result.type != PartialEvalType.NOT_EVALUATED: - # Partial evaluation success. - self.fctx.check_partial_evaluated(call_result) - self.value = self.fctx.emit_partial_eval_result(call_result) - return - - # The function is not known to the compiler. - self.fctx.check_partial_evaluated(callee_result) - # TODO: Implement first class functions. - self.fctx.abort("unhandled (potentially first-class function): {}".format( - ast.dump(ast_node))) - - def visit_Compare(self, ast_node): - # Short-circuit comparison (degenerates to binary comparison when just - # two operands). - ic = self.fctx.ic - false_value = basicpy_ops.BoolConstantOp(ic.bool_type, - ic.i1_false, - ip=ic.ip, - loc=ic.loc).result - - def emit_next(left_value, comparisons): - operation, right_node = comparisons[0] - comparisons = comparisons[1:] - right_value = self.sub_evaluate(right_node) - compare_result = basicpy_ops.BinaryCompareOp( - ic.bool_type, - left_value, - right_value, - _ir.StringAttr.get(operation.__class__.__name__), - ip=ic.ip, - loc=ic.loc).result - # Terminate by yielding the final compare result. - if not comparisons: - return compare_result - - # Emit 'if' op and recurse. The if op takes an i1 (core dialect - # requirement) and returns a basicpy.BoolType. Since this is an 'and', - # all else clauses yield a false value. - compare_result_i1 = basicpy_ops.BoolCastOp(ic.i1_type, - compare_result, - ip=ic.ip, - loc=ic.loc).result - if_op, then_ip, else_ip = ic.scf_IfOp([ic.bool_type], compare_result_i1, - True) - # Build the else clause. - ic.push_ip(else_ip) - ic.scf_YieldOp([false_value]) - ic.pop_ip() - - # Build the then clause. - ic.push_ip(then_ip) - nested_result = emit_next(right_value, comparisons) - ic.scf_YieldOp([nested_result]) - ic.pop_ip() - - return if_op.result - - self.value = emit_next(self.sub_evaluate(ast_node.left), - list(zip(ast_node.ops, ast_node.comparators))) - - def visit_IfExp(self, ast_node): - ic = self.fctx.ic - test_result = basicpy_ops.AsI1Op(ic.i1_type, - self.sub_evaluate(ast_node.test), - ip=ic.ip, - loc=ic.loc).result - if_op, then_ip, else_ip = ic.scf_IfOp([ic.unknown_type], test_result, True) - # Build the then clause - ic.push_ip(then_ip) - then_result = self.sub_evaluate(ast_node.body) - ic.scf_YieldOp([ - basicpy_ops.UnknownCastOp(ic.unknown_type, - then_result, - ip=ic.ip, - loc=ic.loc).result - ]) - ic.pop_ip() - - # Build the then clause. - ic.push_ip(else_ip) - orelse_result = self.sub_evaluate(ast_node.orelse) - ic.scf_YieldOp([ - basicpy_ops.UnknownCastOp(ic.unknown_type, - orelse_result, - ip=ic.ip, - loc=ic.loc).result - ]) - ic.pop_ip() - self.value = if_op.result - - def visit_Name(self, ast_node): - if not isinstance(ast_node.ctx, ast.Load): - self.fctx.abort("Unsupported expression name context type %s" % - ast_node.ctx.__class__.__name__) - name_ref = self.fctx.lookup_name(ast_node.id) - pe_result = name_ref.load(self.fctx.environment) - logging.debug("LOAD {} -> {}", name_ref, pe_result) - self.value = self.fctx.emit_partial_eval_result(pe_result) - - def visit_UnaryOp(self, ast_node): - ic = self.fctx.ic - with ic.ip, ic.loc: - op = ast_node.op - operand_value = self.sub_evaluate(ast_node.operand) - if isinstance(op, ast.Not): - # Special handling for logical-not. - condition_value = basicpy_ops.AsI1Op(ic.i1_type, operand_value).result - true_value = basicpy_ops.BoolConstantOp(ic.bool_type, ic.i1_true).result - false_value = basicpy_ops.BoolConstantOp(ic.bool_type, - ic.i1_false).result - self.value = std_ops.SelectOp(ic.bool_type, condition_value, - false_value, true_value).result - else: - self.fctx.abort("Unknown unary op %r", (ast.dump(op))) - - if sys.version_info < (3, 8, 0): - # <3.8 breaks these out into separate AST classes. - def visit_Num(self, ast_node): - self.emit_constant(ast_node.n) - - def visit_Str(self, ast_node): - self.emit_constant(ast_node.s) - - def visit_Bytes(self, ast_node): - self.emit_constant(ast_node.s) - - def visit_NameConstant(self, ast_node): - self.emit_constant(ast_node.value) - - def visit_Ellipsis(self, ast_node): - self.emit_constant(...) - else: - - def visit_Constant(self, ast_node): - self.emit_constant(ast_node.value) - - -class PartialEvalImporter(BaseNodeVisitor): - """Importer for performing greedy partial evaluation. - - Concretely this is used for Attribute.value and Call resolution. - - Attribute resolution is not just treated as a normal expression because it - is first subject to "partial evaluation", allowing the environment's partial - eval hook to operate on live python values from the containing - environment versus naively emitting code for attribute resolution for - entities that can/should be considered constants from the hosting context. - This is used, for example, to resolve attributes from modules without - immediately dereferencing/transforming the intervening chain of attributes. - """ - IMPORTER_TYPE = "partial_eval" - __slots__ = [ - "partial_eval_result", - ] - - def __init__(self, fctx): - super().__init__(fctx) - self.partial_eval_result = None - - def visit_Attribute(self, ast_node): - # Sub-evaluate the 'value'. - sub_eval = PartialEvalImporter(self.fctx) - sub_eval.visit(ast_node.value) - - if sub_eval.partial_eval_result: - # Partial sub-evaluation successful. - sub_result = sub_eval.partial_eval_result - else: - # Need to evaluate it as an expression. - sub_expr = ExpressionImporter(self.fctx) - sub_expr.visit(ast_node.value) - assert sub_expr.value, ( - "Evaluated sub expression did not return a value: %r" % - (ast_node.value)) - sub_result = PartialEvalResult.yields_ir_value(sub_expr.value) - - # Attempt to perform a static getattr as a partial eval if still operating - # on a live value. - self.fctx.check_partial_evaluated(sub_result) - if sub_result.type == PartialEvalType.YIELDS_LIVE_VALUE: - logging.debug("STATIC getattr '{}' on {}", ast_node.attr, sub_result) - getattr_result = sub_result.yields.resolve_getattr( - self.fctx.environment, ast_node.attr) - if getattr_result.type != PartialEvalType.NOT_EVALUATED: - self.fctx.check_partial_evaluated(getattr_result) - self.partial_eval_result = getattr_result - return - # If a non-statically evaluable live value, then convert to a constant - # and dynamic dispatch. - ir_value = self.fctx.emit_const_value(sub_result.yields.live_value) - else: - ir_value = sub_result.yields - - # Yielding an IR value from a recursive partial evaluation means that the - # entire chain needs to be hoisted to IR. - # TODO: Implement. - self.fctx.abort("dynamic-emitted getattr not yet supported: %r" % - (ir_value,)) - - def visit_Name(self, ast_node): - name_ref = self.fctx.lookup_name(ast_node.id) - partial_eval_result = name_ref.load(self.fctx.environment) - logging.debug("PARTIAL EVAL {} -> {}", name_ref, partial_eval_result) - self.partial_eval_result = partial_eval_result diff --git a/python/npcomp/compiler/numpy/interfaces.py b/python/npcomp/compiler/numpy/interfaces.py deleted file mode 100644 index b91948b40..000000000 --- a/python/npcomp/compiler/numpy/interfaces.py +++ /dev/null @@ -1,342 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Base classes and interfaces.""" - -from collections import namedtuple -from enum import Enum -import sys -from typing import List, Optional, Sequence, Tuple, Union - -from .target import * -from ..utils.mlir_utils import * -from ... import ir as _ir - -__all__ = [ - "Configuration", - "EmittedError", - "Environment", - "ImportContext", - "NameReference", - "NameResolver", - "PartialEvalHook", - "PartialEvalType", - "PartialEvalResult", - "LiveValueRef", - "UserReportableError", - "ValueCoder", - "ValueCoderChain", -] - -_NotImplementedType = type(NotImplemented) - -################################################################################ -# Exceptions -################################################################################ - - -class EmittedError(Exception): - """Exception subclass that indicates an error diagnostic has been emitted. - - By throwing, this lets us abort and handle at a higher level so as not - to duplicate diagnostics. - """ - - def __init__(self, loc, message): - super().__init__(loc, message) - - @property - def loc(self): - return self.args[0] - - @property - def message(self): - return self.args[1] - - -class UserReportableError(Exception): - """Used to raise an error with a message that should be reported to the user. - - Raising this error indicates that the error message is well formed and - makes sense without a traceback. - """ - - def __init__(self, message): - super().__init__(message) - - @property - def message(self): - return self.args[0] - - -################################################################################ -# Name resolution -################################################################################ - - -class NameReference: - """Abstract base class for performing operations on a name.""" - __slots__ = [ - "name", - ] - - def __init__(self, name): - super().__init__() - self.name = name - - def load(self, env: "Environment") -> "PartialEvalResult": - """Loads the IR Value associated with the name. - - The load may either be direct, returning an existing value or - side-effecting, causing a read from an external context. - - Returns: - A partial evaluation result. - """ - return PartialEvalResult.not_evaluated() - - def store(self, env: "Environment", value: _ir.Value): - """Stores a new value into the name. - - A subsequent call to 'load' should yield the same value, subject to - typing constraints on value equality. - - Args: - value: The new value to store into the name. - Raises: - NotImplementedError if store is not supported for this name. - """ - raise NotImplementedError() - - -class NameResolver: - """Abstract base class that can resolve a name. - - Name resolvers are typically stacked. - """ - __slots__ = [] - - def checked_resolve_name(self, name: str) -> Optional[NameReference]: - ref = self.resolve_name(name) - assert ref is not None, "Lookup of name {} is required".format(name) - return ref - - def resolve_name(self, name: str) -> Optional[NameReference]: - return None - - -################################################################################ -# Value coding -# Transforms python values into IR values. -################################################################################ - - -class ValueCoder: - """Encodes values in various ways. - - Instances are designed to be daisy-chained and should ignore types that they - don't understand. Functions return NotImplemented if they cannot handle a - case locally. - """ - __slots__ = [] - - def code_py_value_as_const(self, env: "Environment", - py_value) -> Union[_NotImplementedType, _ir.Value]: - return NotImplemented - - -class ValueCoderChain(ValueCoder): - """Codes values by delegating to sub-coders in order.""" - __slots__ = ["_sub_coders"] - - def __init__(self, sub_coders: Sequence[ValueCoder]): - self._sub_coders = tuple(sub_coders) - - def __repr__(self): - return "ValueCoderChain({})".format(self._sub_coders) - - def code_py_value_as_const(self, env: "Environment", - py_value) -> Union[_NotImplementedType, _ir.Value]: - for sc in self._sub_coders: - result = sc.code_py_value_as_const(env, py_value) - if result is not NotImplemented: - return result - return NotImplemented - - -################################################################################ -# Partial evaluation -# When the compiler is extracting from a running program, it is likely that -# evaluations produce live values which can be further partially evaluated -# at import time, in the context of the running instance (versus emitting -# program IR to do so). This behavior is controlled through a PartialEvalHook -# on the environment. -################################################################################ - - -class PartialEvalType(Enum): - # Could not be evaluated immediately and the operation should be - # code-generated. yields NotImplemented. - NOT_EVALUATED = 0 - - # Yields a LiveValueRef - YIELDS_LIVE_VALUE = 1 - - # Yields an IR value - YIELDS_IR_VALUE = 2 - - # Evaluation yielded an error (yields contains exc_info from sys.exc_info()). - ERROR = 3 - - -class PartialEvalResult(namedtuple("PartialEvalResult", "type,yields")): - """Encapsulates the result of a partial evaluation.""" - - def as_partial_eval_result(self) -> "PartialEvalResult": - return self - - @staticmethod - def not_evaluated() -> "PartialEvalResult": - return PartialEvalResult(PartialEvalType.NOT_EVALUATED, NotImplemented) - - @staticmethod - def yields_live_value(live_value) -> "PartialEvalResult": - assert isinstance(live_value, LiveValueRef) - return PartialEvalResult(PartialEvalType.YIELDS_LIVE_VALUE, live_value) - - @staticmethod - def yields_ir_value(ir_value: _ir.Value) -> "PartialEvalResult": - assert isinstance(ir_value, _ir.Value) - return PartialEvalResult(PartialEvalType.YIELDS_IR_VALUE, ir_value) - - @staticmethod - def error() -> "PartialEvalResult": - return PartialEvalResult(PartialEvalType.ERROR, sys.exc_info()) - - @staticmethod - def error_message(message: str) -> "PartialEvalResult": - try: - raise UserReportableError(message) - except UserReportableError: - return PartialEvalResult.error() - - -class LiveValueRef: - """Wraps a live value from the containing environment. - - Typically, when expressions encounter a live value, a limited number of - partial evaluations can be done against it in place (versus emitting the code - to import it and perform the operation). This default base class will not - perform any static evaluations. - """ - __slots__ = [ - "live_value", - ] - - def __init__(self, live_value): - super().__init__() - self.live_value = live_value - - def as_partial_eval_result(self) -> PartialEvalResult: - return PartialEvalResult.yields_live_value(self) - - def resolve_getattr(self, env: "Environment", - attr_name: str) -> PartialEvalResult: - """Gets a named attribute from the live value.""" - return PartialEvalResult.not_evaluated() - - def resolve_call(self, env: "Environment", args: Sequence[_ir.Value], - keywords: Sequence[str]) -> PartialEvalResult: - """Resolves a function call given 'args' and 'keywords'.""" - return PartialEvalResult.not_evaluated() - - def __repr__(self): - return "LiveValueRef({}, {})".format(self.__class__.__name__, - self.live_value) - - -class PartialEvalHook: - """Hook interface for performing partial evaluation.""" - __slots__ = [] - - def partial_evaluate(self, py_value) -> PartialEvalResult: - raise NotImplementedError - - -################################################################################ -# Configuration and environment -################################################################################ - - -class Configuration: - """Base class providing global configuration objects.""" - __slots__ = [ - "target_factory", - "base_name_resolvers", - "value_coder", - "partial_eval_hook", - ] - - def __init__(self, - *, - target_factory: TargetFactory, - base_name_resolvers: Sequence[NameResolver] = (), - value_coder: Optional[ValueCoder] = None, - partial_eval_hook: PartialEvalHook = None): - super().__init__() - self.target_factory = target_factory - self.base_name_resolvers = tuple(base_name_resolvers) - self.value_coder = value_coder if value_coder else ValueCoderChain(()) - self.partial_eval_hook = partial_eval_hook - - def __repr__(self): - return ("Configuration(target_factory={}, base_name_resolvers={}, " - "value_code={}, partial_eval_hook={})").format( - self.target_factory, self.base_name_resolvers, self.value_coder, - self.partial_eval_hook) - - -class Environment: - """Instantiated configuration for emitting code in a specific context. - - This brings together: - - An instantiated target - - Delegating interfaces for other configuration objects. - - Note that this class does not actually implement most of the delegate - interfaces because it hides the fact that some may require more obtuse - APIs than should be exposed to end callers (i.e. expecting environment or - other config objects). - """ - __slots__ = [ - "config", - "ic", - "_name_resolvers", - "target", - ] - - def __init__(self, - *, - config: Configuration, - ic: ImportContext, - name_resolvers: Sequence[NameResolver] = ()): - super().__init__() - self.config = config - self.ic = ic - self.target = config.target_factory(ic) - self._name_resolvers = (tuple(name_resolvers) + - self.config.base_name_resolvers) - - def resolve_name(self, name: str) -> Optional[NameReference]: - for resolver in self._name_resolvers: - ref = resolver.resolve_name(name) - if ref is not None: - return ref - return None - - def partial_evaluate(self, py_value) -> PartialEvalResult: - return self.config.partial_eval_hook.partial_evaluate(py_value) - - def code_py_value_as_const(self, - py_value) -> Union[_NotImplementedType, _ir.Value]: - return self.config.value_coder.code_py_value_as_const(self, py_value) diff --git a/python/npcomp/compiler/numpy/name_resolver_base.py b/python/npcomp/compiler/numpy/name_resolver_base.py deleted file mode 100644 index 3521f4e97..000000000 --- a/python/npcomp/compiler/numpy/name_resolver_base.py +++ /dev/null @@ -1,114 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Name resolvers for common scenarios.""" - -from typing import Optional - -from .interfaces import * - -from ... import ir as _ir - -__all__ = [ - "ConstModuleNameResolver", - "LocalNameResolver", -] - -################################################################################ -# Local name resolution -# This is used for local names that can be managed purely as SSA values. -################################################################################ - - -class LocalNameReference(NameReference): - """Holds an association between a name and SSA value.""" - __slots__ = [ - "_current_value", - ] - - def __init__(self, name, initial_value=None): - super().__init__(name) - self._current_value = initial_value - - def load(self, env: Environment) -> PartialEvalResult: - if self._current_value is None: - return PartialEvalResult.error_message( - "Attempt to access local '{}' before assignment".format(self.name)) - return PartialEvalResult.yields_ir_value(self._current_value) - - def store(self, env: Environment, value: _ir.Value): - self._current_value = value - - def __repr__(self): - return "".format(self.name) - - -class LocalNameResolver(NameResolver): - """Resolves names in a local cache of SSA values. - - This is used to manage locals and arguments (that are not referenced through - a closure). - """ - __slots__ = [ - "_name_refs", - ] - - def __init__(self, names): - super().__init__() - self._name_refs = {name: LocalNameReference(name) for name in names} - - def resolve_name(self, name) -> Optional[NameReference]: - return self._name_refs.get(name) - - -################################################################################ -# Constant name resolution -# For some DSLs, it can be appropriate to treat some containing scopes as -# constants. This strategy typically binds to a module and routes loads -# through the partial evaluation hook. -################################################################################ - - -class ConstNameReference(NameReference): - """Represents a name/value mapping that will emit as a constant.""" - __slots__ = [ - "_py_value", - ] - - def __init__(self, name, py_value): - super().__init__(name) - self._py_value = py_value - - def load(self, env: Environment) -> PartialEvalResult: - return env.partial_evaluate(self._py_value) - - def __repr__(self): - return "".format(self.name, self._py_value) - - -class ConstModuleNameResolver(NameResolver): - """Resolves names from a module by treating them as immutable and loading - them as constants into a function scope. - """ - __slots__ = [ - "_as_dict", - "module", - ] - - def __init__(self, module, *, as_dict=False): - super().__init__() - self.module = module - self._as_dict = as_dict - - def resolve_name(self, name) -> Optional[NameReference]: - if self._as_dict: - if name in self.module: - py_value = self.module[name] - else: - return None - else: - try: - py_value = getattr(self.module, name) - except AttributeError: - return None - return ConstNameReference(name, py_value) diff --git a/python/npcomp/compiler/numpy/partial_eval_base.py b/python/npcomp/compiler/numpy/partial_eval_base.py deleted file mode 100644 index c2c69e03a..000000000 --- a/python/npcomp/compiler/numpy/partial_eval_base.py +++ /dev/null @@ -1,144 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Partial evaluation helpers and support for built-in and common scenarios.""" - -from typing import Any, Callable, Union - -from .interfaces import * -from .py_value_utils import * -from ..utils import logging - -__all__ = [ - "MappedPartialEvalHook", - "ResolveAttrLiveValueRef", - "TemplateCallLiveValueRef", -] - -_Unspec = object() - -################################################################################ -# LiveValueRef specializations for various kinds of access -################################################################################ - - -class ResolveAttrLiveValueRef(LiveValueRef): - """Custom LiveValueRef that will resolve attributes via getattr.""" - __slots__ = [] - - def resolve_getattr(self, env: "Environment", attr_name) -> PartialEvalResult: - logging.debug("RESOLVE_GETATTR '{}' on {}".format(attr_name, - self.live_value)) - try: - attr_py_value = getattr(self.live_value, attr_name) - except: - return PartialEvalResult.error() - return env.partial_evaluate(attr_py_value) - - -class TemplateCallLiveValueRef(LiveValueRef): - """Custom LiveValueRef that resolves calls to a func_template_call op.""" - __slots__ = ["callee_name"] - - def __init__(self, callee_name, live_value): - super().__init__(live_value) - self.callee_name = callee_name - - def resolve_call(self, env: "Environment", args, - keywords) -> PartialEvalResult: - linear_args = list(args) - kw_arg_names = [] - for kw_name, kw_value in keywords: - kw_arg_names.append(kw_name) - linear_args.append(kw_value) - - ic = env.ic - result_ir_value = ic.basicpy_FuncTemplateCallOp( - result_type=ic.unknown_type, - callee_symbol=self.callee_name, - args=linear_args, - arg_names=kw_arg_names).result - return PartialEvalResult.yields_ir_value(result_ir_value) - - -################################################################################ -# PartialEvalHook implementations -################################################################################ - - -class MappedPartialEvalHook(PartialEvalHook): - """A PartialEvalHook that maps rules to produce live values. - - Internally, this implementation binds a predicate to an action. The predicate - can be: - - A python value matched by reference or value equality - - A type that a value must be an instance of - - An arbitrary lambda (should be limited to special cases as it forces - a linear scan). - - An action can be one of - - A `lambda python_value: PartialEvalResult...` - - An object that supports as_partial_eval_result() (either a - PartialEvalResult or LiveValueRef qualify). - - None to indicate that the python value should be processed directly - """ - __slots__ = [ - "_value_map", - ] - - def __init__(self): - super().__init__() - self._value_map = PyValueMap() - - def __repr__(self): - return "MappedPartialEvalHook({})".format(self._value_map) - - def partial_evaluate(self, py_value) -> PartialEvalResult: - """Performs partial evaluation on a python value.""" - logging.debug("LOOKUP: {}", py_value) - action = self._value_map.lookup(py_value) - if action is None: - # Passthrough. - return PartialEvalResult.yields_live_value(LiveValueRef(py_value)) - # Attempt to call. - try: - result = action(py_value).as_partial_eval_result() - assert isinstance(result, PartialEvalResult), ( - "Expected PartialEvalResult but got {}".format(result)) - logging.debug("PARTIAL EVAL RESOLVE {}: {}", py_value, result) - return result - except: - return PartialEvalResult.error() - - def bind_action(self, - action: Union[PartialEvalResult, LiveValueRef, - Callable[[Any], PartialEvalResult]], - *, - for_ref=_Unspec, - for_type=_Unspec, - for_predicate=_Unspec): - if hasattr(action, "as_partial_eval_result"): - # Registers a casting action. - action = lambda pv: pv.as_partial_eval_result() - - if for_ref is not _Unspec: - self._value_map.bind_reference(for_ref, action) - elif for_type is not _Unspec: - self._value_map.bind_type(for_type, action) - elif for_predicate is not _Unspec: - self._value_map.bind_predicate(for_predicate, action) - else: - raise ValueError( - "Must specify one of 'for_ref', 'for_type' or 'for_predicate") - - def enable_getattr(self, **kwargs): - """Enables partial evaluation of getattr.""" - self.bind_action( - lambda pv: PartialEvalResult.yields_live_value( - ResolveAttrLiveValueRef(pv)), **kwargs) - - def enable_template_call(self, callee_name, **kwargs): - """"Enables a global template call.""" - self.bind_action( - lambda pv: PartialEvalResult.yields_live_value( - TemplateCallLiveValueRef(callee_name, pv)), **kwargs) diff --git a/python/npcomp/compiler/numpy/py_value_utils.py b/python/npcomp/compiler/numpy/py_value_utils.py deleted file mode 100644 index 43ca81ea2..000000000 --- a/python/npcomp/compiler/numpy/py_value_utils.py +++ /dev/null @@ -1,142 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Utilities for matching and massaging python values.""" - -from collections import namedtuple -import weakref - -_NotMapped = object() - -__all__ = [ - "PyValueMap", -] - - -class HashableReference(namedtuple("HashableReference", "ref_id,referrent")): - - @staticmethod - def create(referrent): - try: - return HashableReference(id(referrent), weakref.ref(referrent)) - except TypeError: - # Fallback to value equality. - return HashableReference(0, referrent) - - def __eq__(self, other): - try: - return self.ref_id == other.ref_id and self.referrent == other.referrent - except AttributeError: - return False - - def __hash__(self): - return self.ref_id - - -class PyValueMap: - """Maps between predicates that match python values and bound values. - - Maps to specific references: - >>> class Refable: pass - >>> refable1 = Refable() - >>> refable2 = Refable() - >>> pv = PyValueMap() - >>> pv.bind_reference("unrefable", 1) - >>> pv.bind_reference(refable1, 2) - >>> pv.bind_reference(refable2, 3) - >>> pv.lookup("unrefable") - 1 - >>> pv.lookup("nothere") - >>> pv.lookup(refable2) - 3 - >>> pv.lookup(refable1) - 2 - - Lookup by type: - >>> pv.bind_type(Refable, 4) - >>> pv.bind_type(str, 5) - >>> pv.lookup(refable1) - 2 - >>> pv.lookup(Refable()) - 4 - >>> pv.lookup("nothere") - 5 - >>> pv.lookup(999) - >>> pv.bind_type(int, 6) - >>> pv.lookup(999) - 6 - - Predicate: - >>> pv.lookup(1.2) - >>> pv.bind_predicate(lambda x: x == 1.2, 7) - >>> pv.lookup(1.2) - 7 - """ - __slots__ = [ - "_reference_map", - "_type_map", - "_type_filters", - "_fallback_filters", - "_validator", - ] - - def __init__(self, validator=lambda x: True): - super().__init__() - self._reference_map = dict() # of: dict[HashableReference, Any] - self._type_map = dict() # of: dict[Type, Any|_NotMapped] - self._type_filters = list() # of: list[(Type, Any)] - self._fallback_filters = list() # of: list[(lambda v, Any)] - self._validator = validator - - def __repr__(self): - lines = ["refs={"] - for ref, binding in self._reference_map.items(): - lines.append(" {}: {}".format(ref.referrent, binding)) - lines.append("}, types={") - for t, binding in self._type_filters: - lines.append(" {}: {}".format(t, binding)) - lines.append("}, filters={") - for f, binding in self._fallback_filters: - lines.append(" {}: {}".format(f, binding)) - lines.append("}") - return "\n".join(lines) - - def bind_reference(self, match_value, binding): - assert self._validator(binding), "Illegal binding" - self._reference_map[HashableReference.create(match_value)] = binding - - def bind_type(self, match_type, binding): - assert isinstance(match_type, type) - assert self._validator(binding), "Illegal binding" - self._type_filters.append((match_type, binding)) - self._type_map.clear() # Clears cached bindings - - def bind_predicate(self, predicate, binding): - assert self._validator(binding), "Illegal binding" - self._fallback_filters.append((predicate, binding)) - - def lookup(self, value): - # Check for direct reference equality. - ref = HashableReference.create(value) - binding = self._reference_map.get(ref) - if binding is not None: - return binding - - # Check the cached exact type match. - match_type = type(value) - binding = self._type_map.get(match_type) - if binding is not None: - return None if binding is _NotMapped else binding - - # Lookup by type filter. - for predicate_type, binding in self._type_filters: - if issubclass(match_type, predicate_type): - self._type_map[match_type] = binding - return binding - - # Fallback filters. - for predicate, binding in self._fallback_filters: - if predicate(value): - return binding - - return None diff --git a/python/npcomp/compiler/numpy/target.py b/python/npcomp/compiler/numpy/target.py deleted file mode 100644 index 442549941..000000000 --- a/python/npcomp/compiler/numpy/target.py +++ /dev/null @@ -1,83 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -from typing import * - -from ..utils.mlir_utils import * - -from ... import ir as _ir - -__all__ = [ - "GenericTarget32", - "GenericTarget64", - "Target", - "TargetFactory", -] - - -class Target: - """ - Abstract class providing configuration and hooks for a specific compilation - target. - """ - __slots__ = [ - "ic", - ] - - def __init__(self, ic): - self.ic = ic - - @property - def target_name(self) -> str: - return NotImplementedError() - - @property - def impl_int_type(self) -> _ir.Type: - """Gets the default int type for the backend for the Python 'int' type.""" - raise NotImplementedError() - - @property - def impl_float_type(self) -> _ir.Type: - """Gets the implementation's type for the python 'float' type.""" - raise NotImplementedError() - - -class GenericTarget64(Target): - """A generic 64 bit target.""" - - @property - def target_name(self) -> str: - return "generic64" - - @property - def impl_int_type(self) -> _ir.Type: - """Gets the default int type for the backend for the Python 'int' type.""" - return _ir.IntegerType.get_signless(64, context=self.ic.context) - - @property - def impl_float_type(self) -> _ir.Type: - """Gets the implementation's type for the python 'float' type.""" - return _ir.F64Type.get(context=self.ic.context) - - -class GenericTarget32(Target): - """A generic 32 bit target (uses 32bit ints and floats).""" - - @property - def target_name(self) -> str: - return "generic32" - - @property - def impl_int_type(self) -> _ir.Type: - """Gets the default int type for the backend for the Python 'int' type.""" - return _ir.IntegerType.get_signless(32, context=self.ic.context) - - @property - def impl_float_type(self) -> _ir.Type: - """Gets the implementation's type for the python 'float' type.""" - return _ir.F32Type.get(context=self.ic.context) - - -# Factory for producing a target (matches the Target constructor). -TargetFactory = Callable[[ImportContext], Target] diff --git a/python/npcomp/compiler/numpy/test_config.py b/python/npcomp/compiler/numpy/test_config.py deleted file mode 100644 index c984e22c8..000000000 --- a/python/npcomp/compiler/numpy/test_config.py +++ /dev/null @@ -1,86 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Various configuration helpers for testing.""" - -import ast -import functools - -from .frontend import * -from .interfaces import * -from .partial_eval_base import * -from .target import * -from .value_coder_base import * -from .extensions import numpy as npc -from ..utils import logging - - -def create_import_dump_decorator(*, - target_factory: TargetFactory = GenericTarget64 - ): - config = create_test_config(target_factory=target_factory) - logging.debug("Testing with config: {}", config) - - def do_import(f): - fe = ImportFrontend(config=config) - fe.import_global_function(f) - print("// -----") - print(fe.ir_module.operation.get_asm()) - return f - - def decorator(*args, expect_error=None): - if len(args) == 0: - # Higher order decorator. - return functools.partial(decorator, expect_error=expect_error) - - assert len(args) == 1 - try: - return do_import(f=args[0]) - except EmittedError as e: - if expect_error and e.message == expect_error: - print("// EXPECTED_ERROR:", repr(e.message)) - pass - elif expect_error: - print("// MISMATCHED_ERROR:", repr(e.message)) - raise AssertionError("Expected error '{}' but got '{}'".format( - expect_error, e.message)) - else: - print("// UNEXPECTED_ERROR:", repr(e.message)) - raise e - - return decorator - - -def create_test_config(target_factory: TargetFactory = GenericTarget64): - value_coder = ValueCoderChain([ - BuiltinsValueCoder(), - npc.CreateNumpyValueCoder(), - ]) - pe_hook = build_default_partial_eval_hook() - - # Populate numpy partial evaluators. - npc.bind_ufuncs(npc.get_ufuncs_from_module(), pe_hook) - - if logging.debug_enabled: - logging.debug("Partial eval mapping: {}", pe_hook) - - return Configuration(target_factory=target_factory, - value_coder=value_coder, - partial_eval_hook=pe_hook) - - -def build_default_partial_eval_hook() -> PartialEvalHook: - pe = MappedPartialEvalHook() - ### Modules - pe.enable_getattr(for_type=ast.__class__) # The module we use is arbitrary. - - ### Tuples - # Enable attribute resolution on tuple, which includes namedtuple (which is - # really what we want). - pe.enable_getattr(for_type=tuple) - - ### Temp: resolve a function to a template call for testing - import math - pe.enable_template_call("__global$math.ceil", for_ref=math.ceil) - pe.enable_template_call("__global$math.isclose", for_ref=math.isclose) - return pe diff --git a/python/npcomp/compiler/numpy/value_coder_base.py b/python/npcomp/compiler/numpy/value_coder_base.py deleted file mode 100644 index f5a69921f..000000000 --- a/python/npcomp/compiler/numpy/value_coder_base.py +++ /dev/null @@ -1,50 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Value coders for built-in and common scenarios.""" - -from typing import Union - -from .interfaces import * - -from ... import ir as _ir -from ...dialects import std as std_ops, basicpy as basicpy_ops - -__all__ = [ - "BuiltinsValueCoder", -] - -_NotImplementedType = type(NotImplemented) - - -class BuiltinsValueCoder(ValueCoder): - """Value coder for builtin python types.""" - __slots__ = [] - - def code_py_value_as_const(self, env: Environment, - py_value) -> Union[_NotImplementedType, _ir.Value]: - ic = env.ic - with ic.loc, ic.ip: - if py_value is True: - return basicpy_ops.BoolConstantOp(ic.bool_type, ic.i1_true).result - elif py_value is False: - return basicpy_ops.BoolConstantOp(ic.bool_type, ic.i1_false).result - elif py_value is None: - return basicpy_ops.SingletonOp(ic.none_type).result - elif isinstance(py_value, int): - ir_type = env.target.impl_int_type - ir_attr = _ir.IntegerAttr.get(ir_type, py_value) - return std_ops.ConstantOp(ir_type, ir_attr).result - elif isinstance(py_value, float): - ir_type = env.target.impl_float_type - ir_attr = _ir.FloatAttr.get(ir_type, py_value) - return std_ops.ConstantOp(ir_type, ir_attr).result - elif isinstance(py_value, str): - return basicpy_ops.StrConstantOp(ic.str_type, - _ir.StringAttr.get(py_value)).result - elif isinstance(py_value, bytes): - return basicpy_ops.BytesConstantOp(ic.bytes_type, - _ir.StringAttr.get(py_value)).result - elif isinstance(py_value, type(...)): - return basicpy_ops.SingletonOp(ic.ellipsis_type).result - return NotImplemented diff --git a/python/npcomp/compiler/pytorch/backend/frontend_lowering.py b/python/npcomp/compiler/pytorch/backend/frontend_lowering.py deleted file mode 100644 index b9371d057..000000000 --- a/python/npcomp/compiler/pytorch/backend/frontend_lowering.py +++ /dev/null @@ -1,60 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import os - -import torch - -from npcomp.ir import * -from npcomp.passmanager import * -from npcomp.compiler.utils import logging - -__all__ = [ - "lower_object_graph", - "lower_module", -] - -def lower_module(imported_module: Module): - """Compiles an imported module, with a flat list of functions. - - Args: - imported_module: The MLIR module consisting of funcs and globals in - the torch dialect. It is lowered in place. - Returns: - The imported_module, for convenience chaining methods. - """ - with imported_module.context as context: - if logging.debug_enabled(): - logging.debug("Initial PyTorch IR:\n{}", imported_module) - # Frontend. - pipeline_str = "torch-globalized-module-to-npcomp-backend-pipeline" - if logging.debug_enabled(): - logging.debug("Running Torch->backend pipeline '{}'", pipeline_str) - pm = PassManager.parse(pipeline_str) - pm.run(imported_module) - if logging.debug_enabled(): - logging.debug("Backend IR:\n{}", imported_module) - return imported_module - -def lower_object_graph(imported_module: Module): - """Lowers an imported module that has TorchScript object graph semantics. - - Args: - imported_module: The MLIR module consisting of IR as imported by the - torch_mlir.import_module. It is lowered in place. - Returns: - The imported_module, for convenience chaining methods. - """ - with imported_module.context as context: - if logging.debug_enabled(): - logging.debug("Initial PyTorch object graph IR:\n{}", imported_module) - - # Object graph lowering. - pipeline_str = "torchscript-to-npcomp-backend-pipeline" - if logging.debug_enabled(): - logging.debug( - "Running Torch object graph lowering pipeline '{}'", pipeline_str) - pm = PassManager.parse(pipeline_str) - pm.run(imported_module) - return imported_module diff --git a/python/npcomp/compiler/utils/mlir_utils.py b/python/npcomp/compiler/utils/mlir_utils.py deleted file mode 100644 index f378aa5b2..000000000 --- a/python/npcomp/compiler/utils/mlir_utils.py +++ /dev/null @@ -1,167 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""General utilities for working with MLIR.""" - -from typing import Optional, Tuple - -from ... import ir as _ir -from ...dialects import builtin as builtin_ops -from ..._mlir_libs import _npcomp as _cext - -__all__ = [ - "ImportContext", -] - - -class ImportContext: - """Simple container for things that we update while importing. - - This is also where we stash various helpers to work around awkward/missing - MLIR Python API features. - """ - __slots__ = [ - "context", - "loc", - "module", - "_ip_stack", - - # Cached types. - "unknown_type", - "bool_type", - "bytes_type", - "ellipsis_type", - "i1_type", - "index_type", - "none_type", - "str_type", - "unknown_array_type", - "unknown_tensor_type", - - # Cached attributes. - "i1_true", - "i1_false", - ] - - def __init__(self, context: Optional[_ir.Context]): - self.context = _ir.Context() if not context else context - _cext.register_all_dialects(self.context) - - self.loc = _ir.Location.unknown(context=self.context) # type: _ir.Location - self.module = None # type: Optional[_ir.Module] - self._ip_stack = [] - - # Cache some types and attributes. - with self.context: - # Types. - # TODO: Consolidate numpy.any_dtype and basicpy.UnknownType. - self.unknown_type = _ir.Type.parse("!basicpy.UnknownType") - self.bool_type = _ir.Type.parse("!basicpy.BoolType") - self.bytes_type = _ir.Type.parse("!basicpy.BytesType") - self.ellipsis_type = _ir.Type.parse("!basicpy.EllipsisType") - self.none_type = _ir.Type.parse("!basicpy.NoneType") - self.str_type = _ir.Type.parse("!basicpy.StrType") - self.i1_type = _ir.IntegerType.get_signless(1) - self.index_type = _ir.IndexType.get() - self.unknown_tensor_type = _ir.UnrankedTensorType.get(self.unknown_type, - loc=self.loc) - self.unknown_array_type = _cext.shaped_to_ndarray_type( - self.unknown_tensor_type) - - # Attributes. - self.i1_true = _ir.IntegerAttr.get(self.i1_type, 1) - self.i1_false = _ir.IntegerAttr.get(self.i1_type, 0) - - def set_file_line_col(self, file: str, line: int, col: int): - self.loc = _ir.Location.file(file, line, col, context=self.context) - - def push_ip(self, new_ip: _ir.InsertionPoint): - self._ip_stack.append(new_ip) - - def pop_ip(self): - assert self._ip_stack, "Mismatched push_ip/pop_ip: stack is empty on pop" - del self._ip_stack[-1] - - @property - def ip(self): - assert self._ip_stack, "InsertionPoint requested but stack is empty" - return self._ip_stack[-1] - - def insert_before_terminator(self, block: _ir.Block): - self.push_ip(_ir.InsertionPoint.at_block_terminator(block)) - - def insert_end_of_block(self, block: _ir.Block): - self.push_ip(_ir.InsertionPoint(block)) - - def FuncOp(self, name: str, func_type: _ir.Type, - create_entry_block: bool) -> Tuple[_ir.Operation, _ir.Block]: - """Creates a |func| op. - - Returns: - (operation, entry_block) - """ - assert self.loc is not None - # TODO: Fix upstream FuncOp constructor to not require an implicit - # context. - with self.context: - func = builtin_ops.FuncOp(name, func_type, loc=self.loc, ip=self.ip) - entry_block = None - if create_entry_block: - entry_block = func.add_entry_block() - return (func, entry_block) - - def basicpy_ExecOp(self): - """Creates a basicpy.exec op. - - Returns: - Insertion point to the body. - """ - op = _ir.Operation.create("basicpy.exec", - regions=1, - ip=self.ip, - loc=self.loc) - b = op.regions[0].blocks.append() - return _ir.InsertionPoint(b) - - def basicpy_FuncTemplateCallOp(self, result_type, callee_symbol, args, - arg_names): - with self.loc, self.ip: - attributes = { - "callee": - _ir.FlatSymbolRefAttr.get(callee_symbol), - "arg_names": - _ir.ArrayAttr.get([_ir.StringAttr.get(n) for n in arg_names]), - } - op = _ir.Operation.create("basicpy.func_template_call", - results=[result_type], - operands=args, - attributes=attributes, - ip=self.ip) - return op - - def scf_IfOp(self, results, condition: _ir.Value, with_else_region: bool): - """Creates an SCF if op. - - Returns: - (if_op, then_ip, else_ip) if with_else_region, otherwise (if_op, then_ip) - """ - op = _ir.Operation.create("scf.if", - results=results, - operands=[condition], - regions=2 if with_else_region else 1, - loc=self.loc, - ip=self.ip) - then_region = op.regions[0] - then_block = then_region.blocks.append() - if with_else_region: - else_region = op.regions[1] - else_block = else_region.blocks.append() - return op, _ir.InsertionPoint(then_block), _ir.InsertionPoint(else_block) - else: - return op, _ir.InsertionPoint(then_block) - - def scf_YieldOp(self, operands): - return _ir.Operation.create("scf.yield", - operands=operands, - loc=self.loc, - ip=self.ip) diff --git a/python/npcomp/decorators.py b/python/npcomp/decorators.py deleted file mode 100644 index 6f19c9f0a..000000000 --- a/python/npcomp/decorators.py +++ /dev/null @@ -1,3 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception diff --git a/python/npcomp/dialects/BasicpyBind.td b/python/npcomp/dialects/BasicpyBind.td deleted file mode 100644 index e8d2f2764..000000000 --- a/python/npcomp/dialects/BasicpyBind.td +++ /dev/null @@ -1,15 +0,0 @@ -//===-- BasicpyBind.td - Basicpy dialect bind --------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_PYTHON_DIALECTS_BASICPY_BIND -#define NPCOMP_PYTHON_DIALECTS_BASICPY_BIND - -include "mlir/Bindings/Python/Attributes.td" -include "npcomp/Dialect/Basicpy/IR/BasicpyOps.td" - -#endif diff --git a/python/npcomp/dialects/NumpyBind.td b/python/npcomp/dialects/NumpyBind.td deleted file mode 100644 index 97bc10280..000000000 --- a/python/npcomp/dialects/NumpyBind.td +++ /dev/null @@ -1,15 +0,0 @@ -//===-- NumpyOps.td - Numpy dialect bind -------------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef NPCOMP_PYTHON_DIALECTS_NUMPY_BIND -#define NPCOMP_PYTHON_DIALECTS_NUMPY_BIND - -include "mlir/Bindings/Python/Attributes.td" -include "npcomp/Dialect/Numpy/IR/NumpyOps.td" - -#endif diff --git a/python/npcomp/dialects/basicpy.py b/python/npcomp/dialects/basicpy.py deleted file mode 100644 index d5c29c42f..000000000 --- a/python/npcomp/dialects/basicpy.py +++ /dev/null @@ -1,5 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -from ._basicpy_ops_gen import * diff --git a/python/npcomp/dialects/numpy.py b/python/npcomp/dialects/numpy.py deleted file mode 100644 index 0aabfdd93..000000000 --- a/python/npcomp/dialects/numpy.py +++ /dev/null @@ -1,5 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -from ._numpy_ops_gen import * diff --git a/python/npcomp/exporter.py b/python/npcomp/exporter.py deleted file mode 100644 index 2d5921c8f..000000000 --- a/python/npcomp/exporter.py +++ /dev/null @@ -1,284 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import inspect -import numpy as np -from typing import Optional - -from npcomp.types import * - -__all__ = [ - "Exporter", - "ExportFunction", - "ExportPyFunction", -] - - -def _value_type_from_annotation(annotation): - # TODO: This is just enough to recognize ndarrays. - if annotation is np.ndarray: - return ValueType(TypeClass.NdArray) - else: - return ValueType() - - -def _signature_from_pyfunc(pyfunc): - pysig = inspect.signature(pyfunc) - sig = Signature(len(pysig.parameters)) - # Arguments - for i, param in enumerate(pysig.parameters.values()): - if param.kind not in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD): - raise ValueError( - "Currently only positional function signature are supported") - - sig.arg_names[i] = param.name - annot = param.annotation - if annot is param.empty: - continue - sig.args[i] = _value_type_from_annotation(annot) - - # Result - if pysig.return_annotation is not pysig.empty: - sig.result = _value_type_from_annotation(pysig.return_annotation) - - return sig - - -class ExportFunction: - """Base class for functions that can be exported.""" - __slots__ = ["_sig"] - - def __init__(self, sig=None): - self._sig = sig if sig else Signature() - - @property - def sig(self): - return self._sig - - def __repr__(self): - return "def %r" % self._sig - - -class ExportPyFunction(ExportFunction): - """Wraps a fully specialized python function that is staged for export. - - At different phases of compilation, the wrapped function will be - treated differently. At the initial phase, it is just a pass-through - and provides introspection capabilities. - - Basic access: - >>> def simple(a, b): return a + b - >>> ExportPyFunction(simple) - pydef simple(a: Any, b: Any) -> Any - >>> def mul(a: np.ndarray, b: np.ndarray) -> np.ndarray: - ... return a * b - >>> ExportPyFunction(mul) - pydef mul(a: NdArray, b: NdArray) -> NdArray - >>> ExportPyFunction(mul).sig - (a: NdArray, b: NdArray) -> NdArray - - Manipulating the signature: - >>> f = ExportPyFunction(mul) - >>> f.sig.args["a"] += Rank(2) - >>> f.sig.args["b"] = "Any" - >>> f.sig.result += Shape(1, 2) - >>> f - pydef mul(a: NdArray[Rank(2)], b: Any) -> NdArray[Shape(1, 2)] - """ - __slots__ = ExportFunction.__slots__ + ["_pyfunc", "__name__"] - - def __init__(self, pyfunc, name=None): - super().__init__(sig=_signature_from_pyfunc(pyfunc)) - assert (hasattr(pyfunc, "__call__") and - hasattr(pyfunc, "__name__")), "Not a python function" - self._pyfunc = pyfunc - self.__name__ = name if name else pyfunc.__name__ - - @property - def pyfunc(self): - return self._pyfunc - - def __repr__(self): - return "pydef %s%r" % (self.__name__, self._sig) - - def __call__(self, *args, **kwargs): - return self._pyfunc(*args, **kwargs) - - -class _ExpandoNode: - """Expando object that can be indexed into to construct a namespace.""" - __slots__ = [ - "_parent", "_services", "_local_name", "_parent_name", "_children", - "_attached" - ] - - def __init__(self, parent: Optional["_ExpandoNode"], services: "_Services", - local_name: str): - super().__init__() - object.__setattr__(self, "_parent", parent) - object.__setattr__(self, "_services", services) - object.__setattr__(self, "_local_name", local_name) - object.__setattr__(self, "_parent_name", - parent._get_full_name() if parent else "") - object.__setattr__(self, "_children", {}) - object.__setattr__(self, "_attached", parent is None) - - def _attach(self): - if self._attached: - return - if self._local_name in self._parent._children: - raise KeyError("Cannot re-assign '%s'" % (self._get_full_name(),)) - self._parent._attach() - self._parent._children[self._local_name] = self - object.__setattr__(self, "_attached", True) - - def _get_full_name(self): - if not self._parent: - return "" # Root is always empty name. - full_name = (self._parent_name + "." + - self._local_name if self._parent_name else self._local_name) - return full_name - - def _get_child_name(self, child_local_name): - full_name = self._get_full_name() - if not full_name: - return child_local_name - else: - return full_name + "." + child_local_name - - def __repr__(self): - return "Namespace(\"%s\")" % (self._get_full_name()) - - def __contains__(self, key): - return key in self._children - - def __getitem__(self, key): - key = str(key) - existing = self._children.get(key) - if existing is not None: - return existing - # Speculatively create a child expando. - child = _ExpandoNode(self, self._services, key) - return child - - def __setitem__(self, key, value): - if not inspect.isfunction(value): - raise TypeError("Cannot assign value to an exporter: %r" % (value,)) - child_name = self._get_child_name(key) - if key in self._children: - # TODO: Relax this once __delitem__ is implemented. - raise KeyError("Cannot re-assign '%s'" % (child_name)) - self._attach() - self._children[key] = self._services.wrap_function(value, child_name) - - def __getattr__(self, name): - return self[name] - - def __setattr__(self, name, value): - try: - self[name] = value - except KeyError as e: - raise AttributeError(str(e)) from None - - def __dir__(self): - return self._children.keys() - - -class _Services: - """Services and support for the Exporter. - - Exporters are user objects, so most of the functional components are - contained in the associated _Services object. - """ - - def wrap_function(self, f, full_name): - if isinstance(f, ExportFunction): - return f - # TODO: Need to scan through providers and choose. - return ExportPyFunction(f, name=full_name) - - -class Exporter: - """Top-level UI object for assembling a program for export. - - The exporter defines an open namespace of functions to be exported. - Logically, it can be thought of as a dict-of-dicts that is populated - by assignment of functions to leaves. The act of assigning a function - captures it as an ExportFunction and binds it to the exporter. This - ExportFunction exposes the object model that can be manipulated to - refine the compiled form. By default, any calls to such functions will - delegate to the original function, capturing examples that constrain - and allow further optimizations on the compiled form. - - There are several reserved names that can not have functions bound - to them with the dot notation, but can still be referenced by subscripting - if necessary: - TODO: Reserved names. 'captures', etc. - - >>> exp = Exporter() - >>> exp - Exporter() - - Creating namespaces and functions with attribute access: - >>> exp = Exporter() - >>> exp.ns1 - Namespace("ns1") - >>> "ns1" in exp # Not yet attached - False - >>> exp.ns1.ns2.f = lambda x: x - >>> exp.ns1.ns2 # Should be attached - Namespace("ns1.ns2") - >>> exp.ns1.ns2.f - pydef ns1.ns2.f(x: Any) -> Any - - Via index access: - >>> exp = Exporter() - >>> exp["ns1"]["f"] = lambda x: x - >>> dir(exp["ns1"]) - ['f'] - >>> exp["ns1"]["f"] - pydef ns1.f(x: Any) -> Any - - Illegal access: - >>> exp = Exporter() - >>> exp.ns1.ns2.f = lambda x: x - >>> exp.ns1.ns2.f = lambda x: x - Traceback (most recent call last): - ... - AttributeError: "Cannot re-assign 'ns1.ns2.f'" - >>> exp.ns1 = lambda x: x - Traceback (most recent call last): - ... - AttributeError: "Cannot re-assign 'ns1'" - """ - __slots__ = ["_root", "_services"] - - def __init__(self): - super().__init__() - services = _Services() - object.__setattr__(self, "_root", _ExpandoNode(None, services, "")) - object.__setattr__(self, "_services", services) - - def __repr__(self): - return "Exporter()" - - def __contains__(self, key): - return key in self._root - - def __getitem__(self, key): - return self._root[key] - - def __setitem__(self, key, value): - self._root[key] = value - - def __getattr__(self, name): - return getattr(self._root, name) - - def __setattr__(self, name, value): - setattr(self._root, name, value) - - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/python/npcomp/frontends/__init__.py b/python/npcomp/frontends/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python/npcomp/frontends/pytorch/__init__.py b/python/npcomp/frontends/pytorch/__init__.py deleted file mode 100644 index f60a7a72b..000000000 --- a/python/npcomp/frontends/pytorch/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- Python -*- -# This file is licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import torch -import _torch_mlir -from _torch_mlir import _get_mlir -from _torch_mlir import _op_report -from _torch_mlir import _liveness_report -from _torch_mlir import set_debug -from _torch_mlir import lower_to_std - -import json - -_torch_mlir._initialize_aten_bindings() -_torch_mlir.set_debug(False, "") - - -def get_mlir(t): - if not isinstance(t, list): - t = [t] - return _get_mlir(t) - - -def op_report(mlir): - return json.loads(_op_report(mlir)) - - -def liveness_report(mlir): - return json.loads(_liveness_report(mlir)) - - -def get_mlir_supported_devices(devkind=None): - # TODO: define our own device and stop hijacking the xla device. - return ["xla:0"] - - -def mlir_device(devkind=None): - devices = get_mlir_supported_devices(devkind=devkind) - device = devices[0] - return torch.device(device) - - -__all__ = ['get_mlir', 'mlir_device', 'op_report', 'liveness_report'] diff --git a/python/npcomp/frontends/pytorch/core/__init__.py b/python/npcomp/frontends/pytorch/core/__init__.py deleted file mode 100644 index a03d4c390..000000000 --- a/python/npcomp/frontends/pytorch/core/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# -*- Python -*- -# This file is licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception diff --git a/python/npcomp/frontends/pytorch/core/aten_mlir_model.py b/python/npcomp/frontends/pytorch/core/aten_mlir_model.py deleted file mode 100644 index fad270c11..000000000 --- a/python/npcomp/frontends/pytorch/core/aten_mlir_model.py +++ /dev/null @@ -1,7 +0,0 @@ -# -*- Python -*- -# This file is licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import torch -from npcomp.frontends.pytorch import * diff --git a/python/npcomp/frontends/pytorch/test/__init__.py b/python/npcomp/frontends/pytorch/test/__init__.py deleted file mode 100644 index 4e3a2c2a9..000000000 --- a/python/npcomp/frontends/pytorch/test/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# -*- Python -*- -# This file is licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -from .test_infrastructure import * diff --git a/python/npcomp/frontends/pytorch/test/test_infrastructure.py b/python/npcomp/frontends/pytorch/test/test_infrastructure.py deleted file mode 100644 index 472aa7413..000000000 --- a/python/npcomp/frontends/pytorch/test/test_infrastructure.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- Python -*- -# This file is licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import npcomp.frontends.pytorch as torch_mlir -import copy - - -def compare(a, b, test): - print("Computing:" + test) - err = (a.to('cpu') - b.to('cpu')).abs().max() - if (err <= 1e-5): - print("PASS! " + test + " check") - else: - print("FAILED " + test + " check") - - -def compare_eq(a, b, test): - print("Computing:" + test) - if (a == b): - print("PASS! " + test + " check") - else: - print("FAILED " + test + " check") - - -def check_fwd(model, tensor): - device = torch_mlir.mlir_device() - result = model(tensor) - device_model = copy.deepcopy(model).to(device) - device_tensor = tensor.clone().to(device) - device_result = device_model(device_tensor) - - compare(result, device_result, "fwd") - return (device_model, device_result, result) - - -def check_ref(model, tensor): - return check_fwd(model, tensor) - - -def check_back(fwd_path, target, lossmodel): - device = torch_mlir.mlir_device() - (device_model, device_result, result) = fwd_path - device_target = target.clone().to(device) - ref_loss = lossmodel(result, target) - ref_loss.backward() - device_loss = lossmodel(device_result, device_target) - device_loss.backward() - - compare(ref_loss, device_loss, "back") - return (device_model, device_result) diff --git a/python/npcomp/smoketest.py b/python/npcomp/smoketest.py deleted file mode 100644 index 7569ed670..000000000 --- a/python/npcomp/smoketest.py +++ /dev/null @@ -1,2 +0,0 @@ -import _npcomp -print(_npcomp.__doc__) diff --git a/python/npcomp/torch/__init__.py b/python/npcomp/torch/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python/npcomp/types.py b/python/npcomp/types.py deleted file mode 100644 index 84ac3d8f9..000000000 --- a/python/npcomp/types.py +++ /dev/null @@ -1,710 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -from collections import namedtuple -from enum import Enum - -import numpy as np - -__all__ = [ - "Unspec", - "ArrayConstraint", - "ArrayParams", - "DType", - "DimFlag", - "DimFlagEnum", - "DynamicDim", - "Rank", - "Shape", - "Signature", - "TypeClass", - "TypeConstraints", - "ValueType", -] - -# TODO: All supported types -_DTYPE_TO_ASM_DICT = { - np.bool: "i1", # TODO: May need a custom type to signify 8bit storage - np.int8: "s8", - np.int16: "s16", - np.int32: "s32", - np.int64: "s64", - np.float32: "f32", - np.float64: "f64", -} - - -def _dtype_to_mlir_asm(dtype): - return _DTYPE_TO_ASM_DICT.get(dtype) - - -class _LiterateEnum(Enum): - """An enum that can be parsed/printed based on its name. - - >>> class SampleEnum(_LiterateEnum): - ... Red = 1 - ... Blue = 2 - >>> SampleEnum.Red - Red - >>> SampleEnum.parse("Red") - Red - >>> SampleEnum.parse("Mauve") - Traceback (most recent call last): - ... - ValueError: Cannot parse SampleEnum 'Mauve' - >>> SampleEnum.parse("parse") - Traceback (most recent call last): - ... - ValueError: Cannot parse SampleEnum 'parse' - >>> SampleEnum.parse(None) - Traceback (most recent call last): - ... - ValueError: Cannot parse SampleEnum None - >>> SampleEnum.parse(1.0) - Traceback (most recent call last): - ... - ValueError: Cannot parse SampleEnum 1.0 - - """ - - @classmethod - def parse(cls, v): - if isinstance(v, cls): - return v - if not v or not isinstance(v, str) or v[0] == '_' or not hasattr(cls, v): - raise ValueError("Cannot parse %s %r" % ( - cls.__name__.split(".")[-1], - v, - )) - value = getattr(cls, v) - if not isinstance(value, cls): - raise ValueError("Cannot parse %s %r" % ( - cls.__name__.split(".")[-1], - v, - )) - return value - - def __repr__(self): - return self.name - - -# Special "unspecified" value that we use throughout. -class _Unspec: - __slots__ = [] - - def __str__(self): - return "Unspec" - - def __repr__(self): - return "Unspec" - - -Unspec = _Unspec() - - -class TypeClass(_LiterateEnum): - """Top level types in the npcomp language.""" - Any = 0 - NdArray = 1 - - -class ValueType: - """The type a value can take in the npcomp language. - - Types of values in npcomp are always being refined and are therefore - mutable. Instances represent the type derived for a single value, not a - concept of "typeness" generally. - - >>> ValueType() - Any - >>> ValueType('NdArray') - NdArray - >>> ValueType('NdArray', DType(np.float32), Rank(2)) - NdArray[DType(float32), Rank(2)] - >>> vt = ValueType('NdArray') - >>> vt += Rank(3) - >>> vt += DynamicDim(1) - >>> vt - NdArray[Rank(3), DimFlag(Dynamic, (1,))] - >>> vt = ValueType() - >>> vt.type_class = 'NdArray' - >>> vt - NdArray - """ - __slots__ = ["_constraints", "_type_class"] - - def __init__(self, type_class=TypeClass.Any, *constraints): - super().__init__() - self._type_class = TypeClass.parse(type_class) - self._constraints = TypeConstraints(constraints) - - def __iadd__(self, constraint): - assert isinstance( - constraint, TypeConstraint), ("Can only add constraints to a ValueType") - self._constraints.append(constraint) - return self - - def __repr__(self): - if not self._constraints: - return repr(self._type_class) - return "%r[%s]" % (self._type_class, ", ".join( - [repr(c) for c in self._constraints])) - - @property - def type_class(self): - return self._type_class - - @type_class.setter - def type_class(self, type_class): - self._type_class = TypeClass.parse(type_class) - - @property - def constraints(self): - return self._constraints - - -class ValueTypeList: - """Models a list of ValueTypes. - - >>> v3 = ValueTypeList(3) - >>> v3 - (Any, Any, Any) - >>> v3[1] - Any - >>> v3[2] = 'NdArray' - >>> v3 - (Any, Any, NdArray) - >>> v3[2] += Rank(2) - >>> v3 - (Any, Any, NdArray[Rank(2)]) - - With names: - >>> v3 = ValueTypeList(3, [None, "b", None]) - >>> v3[1] = 'NdArray' - >>> v3["b"] - NdArray - >>> v3["b"] = 'Any' - >>> v3 - (Any, Any, Any) - """ - __slots__ = ["_list", "_names"] - - def __init__(self, arity=0, names=None): - self._list = [ValueType() for _ in range(arity)] - self._names = names - - def _key_to_index(self, key): - if isinstance(key, str): - # Scan for the index. - if self._names: - for i, n in enumerate(self._names): - if n == key: - return i - raise KeyError("Unknown key '%s'" % key) - return key - - def __getitem__(self, key): - return self._list[self._key_to_index(key)] - - def __setitem__(self, key, value): - if not isinstance(value, ValueType): - value = ValueType(value) - self._list[self._key_to_index(key)] = value - - def __iter__(self): - return self._list.__iter__() - - def __repr__(self): - return "(%s)" % (", ".join(repr(t) for t in self._list),) - - -class Signature: - """A function signature. - - This currently only models a linear list of positional arguments and - assumes that multiple results will be represented by some form of tuple - type. - - >>> Signature() - () -> Any - >>> Signature(2) - (Any, Any) -> Any - >>> s = Signature(2) - >>> s.args[1] = 'NdArray' - >>> s.args[1] += Rank(2) - >>> s - (Any, NdArray[Rank(2)]) -> Any - >>> s.result = 'NdArray' - >>> s.result += Rank(3) - >>> s - (Any, NdArray[Rank(2)]) -> NdArray[Rank(3)] - >>> s.arg_names[0] = 'a' - >>> s.arg_names[1] = 'b' - >>> s - (a: Any, b: NdArray[Rank(2)]) -> NdArray[Rank(3)] - """ - __slots__ = ["_args", "_arg_names", "_result"] - - def __init__(self, arity=0): - super().__init__() - self._result = ValueType() - self._arg_names = [None] * arity - self._args = ValueTypeList(arity, names=self._arg_names) - - @property - def args(self): - return self._args - - @property - def arg_names(self): - return self._arg_names - - @property - def result(self): - return self._result - - @result.setter - def result(self, value): - if not isinstance(value, ValueType): - value = ValueType(value) - self._result = value - - def __repr__(self): - args_repr = "(%s)" % (", ".join( - ((n + ": " + repr(t)) if n else repr(t)) - for t, n in zip(self._args, self._arg_names)),) - return "%s -> %r" % (args_repr, self._result) - - -class ArrayParams: - """Represents parameters defining how to construct an array. - - >>> ArrayParams() - ArrayParams(dtype=Unspec) - >>> ArrayParams(np.float32) - ArrayParams(dtype=float32) - >>> ArrayParams(np.float32, rank=4) - ArrayParams(dtype=float32, shape=(-1, -1, -1, -1)) - >>> ArrayParams(np.float32, shape=(1, 2, 3)) - ArrayParams(dtype=float32, shape=(1, 2, 3)) - """ - __slots__ = ["dtype", "shape"] - - def __init__(self, dtype=Unspec, shape=Unspec, rank=Unspec): - self.dtype = dtype - if shape is not Unspec: - self.shape = shape - elif rank is not Unspec: - self.shape = [-1 for _ in range(rank)] - else: - self.shape = Unspec - - @property - def rank(self): - if self.shape is Unspec: - return Unspec - return len(self.shape) - - @classmethod - def from_constraints(cls, constraints): - """Constructs params for a TypeConstraints list. - - Unconstrained: - >>> ArrayParams.from_constraints(TypeConstraints()) - ArrayParams(dtype=Unspec) - - DType constrained: - >>> ArrayParams.from_constraints(TypeConstraints(DType(np.float32))) - ArrayParams(dtype=float32) - - Rank constrained: - >>> ArrayParams.from_constraints(TypeConstraints(Rank(2))) - ArrayParams(dtype=Unspec, shape=(-1, -1)) - - Shape constrained: - >>> ArrayParams.from_constraints(TypeConstraints(Shape(1, 2, 3))) - ArrayParams(dtype=Unspec, shape=(1, 2, 3)) - >>> ArrayParams.from_constraints(TypeConstraints( - ... Rank(3), Shape(1, 2, 3))) - ArrayParams(dtype=Unspec, shape=(1, 2, 3)) - - Shape constrained with dynamic dim constraint: - >>> ArrayParams.from_constraints(TypeConstraints( - ... Shape(1, 2, 3), DynamicDim(1))) - ArrayParams(dtype=Unspec, shape=(1, -1, 3)) - >>> ArrayParams.from_constraints(TypeConstraints( - ... Shape(1, 2, 3), DynamicDim((0, 2)))) - ArrayParams(dtype=Unspec, shape=(-1, 2, -1)) - - Errors: - >>> ArrayParams.from_constraints(TypeConstraints( - ... Rank(4), Shape(1, 2, 3))) - Traceback (most recent call last): - ... - ValueError: Conflicting shape and rank: Rank(4) vs Shape(1, 2, 3) - >>> ArrayParams.from_constraints(TypeConstraints( - ... Shape(1, 2, 3), DynamicDim((0, 5)))) - Traceback (most recent call last): - ... - ValueError: Out of range DimFlag(Dynamic, (0, 5)) for shape [-1, 2, 3] - """ - # TODO: Should have a 'canonicalize' method on TypeConstraints which - # reduces and verifies. - dtype_c = constraints.one_of(DType) - shape_c = constraints.one_of(Shape) - rank_c = constraints.one_of(Rank) - dim_flags = constraints.all_of(DimFlag) - - dtype = dtype_c.dtype if dtype_c else Unspec - shape = Unspec - - # Compute shape - if shape_c: - # TODO: Should be in canonicalizer - if rank_c and rank_c.rank != len(shape_c.dims): - raise ValueError("Conflicting shape and rank: %r vs %r" % - (rank_c, shape_c)) - shape = list(shape_c.dims) - elif rank_c: - shape = [-1 for _ in range(rank_c.rank)] - - # Apply dim flags - if shape is not Unspec and dim_flags: - for df in dim_flags: - flag, for_dims = df.dim_flag - for d in for_dims: - if d < 0 or d >= len(shape): - raise ValueError("Out of range %r for shape %r" % (df, shape)) - if flag == DimFlagEnum.Dynamic: - shape[d] = -1 - - return cls(dtype=dtype, shape=shape) - - def __repr__(self): - try: - s = "ArrayParams(dtype=%s" % (self.dtype.__name__ if isinstance( - self.dtype, type) else self.dtype,) - if self.shape is not Unspec: - s += ", shape=%r" % (tuple(self.shape),) - s += ")" - return s - except: - return "ArrayParams(ERROR)" - - @property - def is_concrete(self): - """Returns true if the parameters are sufficient to construct an ndarray. - - >>> ArrayParams().is_concrete - False - >>> ArrayParams(dtype=np.float32).is_concrete - False - >>> ArrayParams(dtype=np.float32, rank=1).is_concrete - False - >>> ArrayParams(dtype=np.float32, shape=(1, 2)).is_concrete - True - """ - if self.dtype is Unspec: - return False - if self.shape is Unspec: - return False - if any(d < 0 for d in self.shape): - return False - return True - - @property - def mlir_tensor_type_asm(self): - """Get a corresponding MLIR tensor type. - - Fully Unspecified: - >>> ArrayParams().mlir_tensor_type_asm - 'tensor<*x!numpy.any_dtype>' - - Unranked: - >>> ArrayParams(dtype=np.float32).mlir_tensor_type_asm - 'tensor<*xf32>' - - Ranked: - >>> ArrayParams(dtype=np.float32, rank=3).mlir_tensor_type_asm - 'tensor' - >>> ArrayParams(dtype=np.float32, shape=(-1, -1)).mlir_tensor_type_asm - 'tensor' - - Scalar: - >>> ArrayParams(dtype=np.float32, rank=0).mlir_tensor_type_asm - 'tensor' - >>> ArrayParams(dtype=np.float32, shape=()).mlir_tensor_type_asm - 'tensor' - - Shaped: - >>> ArrayParams(dtype=np.float32, shape=(2, 3)).mlir_tensor_type_asm - 'tensor<2x3xf32>' - >>> ArrayParams(dtype=np.float32, shape=(-1, 3)).mlir_tensor_type_asm - 'tensor' - """ - if self.dtype is Unspec: - dtype_asm = "!numpy.any_dtype" - else: - dtype_asm = _dtype_to_mlir_asm(self.dtype) - if not dtype_asm: - raise ValueError("Unsupported MLIR tensor element type %r" % - (self.dtype,)) - if self.shape is Unspec: - shape_asm = "*" - else: - shape_asm = "x".join((str(d) if d >= 0 else "?") for d in self.shape) - if shape_asm: - shape_asm += "x" - return "tensor<%s%s>" % (shape_asm, dtype_asm) - - def new_ndarray(self): - """Creates a new ndarray from these params. - - >>> ArrayParams().new_ndarray() - Traceback (most recent call last): - ... - ValueError: ArrayParams(dtype=Unspec) is not concrete - >>> (ArrayParams(np.float32, (1, 2)).new_ndarray() * 0.0 + 1.0) * 0.0 - array([[0., 0.]], dtype=float32) - """ - if not self.is_concrete: - raise ValueError("%r is not concrete" % (self,)) - return np.ndarray(dtype=self.dtype, shape=self.shape) - - -class TypeConstraint: - """Base class for type constraints.""" - pass - - -class TypeConstraints(list): - """Collection of type constraints. - - >>> TypeConstraints([DynamicDim()]) - TypeConstraints(DimFlag(Dynamic, Unspec)) - >>> TypeConstraints([DynamicDim(), Rank(4)]) - TypeConstraints(DimFlag(Dynamic, Unspec), Rank(4)) - >>> TypeConstraints(DynamicDim(), Rank(4)) - TypeConstraints(DimFlag(Dynamic, Unspec), Rank(4)) - >>> TypeConstraints(Rank(4)) - TypeConstraints(Rank(4)) - >>> TypeConstraints("foobar") - Traceback (most recent call last): - ... - AssertionError - """ - - def __init__(self, *constraints): - if len(constraints) == 1 and not isinstance(constraints[0], - ArrayConstraint): - constraints = constraints[0] - super().__init__(constraints) - assert (all(isinstance(c, ArrayConstraint) for c in self)) - - def __repr__(self): - return "TypeConstraints(%s)" % (", ".join([repr(c) for c in self])) - - def all_of(self, clazz): - """Finds all of the given class.""" - return [c for c in self if isinstance(c, clazz)] - - def one_of(self, clazz): - """Finds at most one constraint of the given class.""" - found = [c for c in self if isinstance(c, clazz)] - if not found: - return None - if len(found) > 1: - raise ValueError("Conflicting constraints. Expected one of %r. Got %r" % - (clazz, found)) - return found[0] - - -class ArrayConstraint(TypeConstraint): - """Base class for a constraint on an array's characteristics.""" - - def implies_dtype(self): - return False - - @property - def dtype(self): - raise NotImplementedError() - - def implies_rank(self): - return False - - @property - def rank(self): - raise NotImplementedError() - - def implies_dims(self): - return False - - @property - def dims(self): - raise NotImplementedError() - - def implies_dim_flag(self): - return False - - @property - def dim_flag(self): - raise NotImplementedError() - - -class DType(ArrayConstraint): - """A constraint on a dtype. - - DType constraints are exclusive with only one permitted in a set. - - >>> DType(np.float32) - DType(float32) - >>> DType("foobar") - Traceback (most recent call last): - ... - AssertionError - """ - __slots__ = ["_dtype"] - - def __init__(self, dtype): - super().__init__() - assert isinstance(dtype, type) - self._dtype = dtype - - @property - def dtype(self): - return self._dtype - - def implies_dtype(self): - return True - - def __repr__(self): - return "DType(%s)" % (self._dtype.__name__,) - - -class Rank(ArrayConstraint): - """Establishes a fixed rank for the array. - - >>> Rank(1) - Rank(1) - >>> Rank(0) - Rank(0) - >>> Rank(-1) - Traceback (most recent call last): - ... - AssertionError - >>> Rank("foobar") - Traceback (most recent call last): - ... - AssertionError - - """ - __slots__ = ["_rank"] - - def __init__(self, rank): - super().__init__() - assert (isinstance(rank, int) and rank >= 0) - self._rank = rank - - @property - def rank(self): - return self._rank - - def implies_rank(self): - return True - - def __repr__(self): - return "Rank(%d)" % (self._rank) - - -class Shape(ArrayConstraint): - """Establishes a static shape for an array. - - All dimensions must be a non-negative integer or Unspec. - - >>> Shape(1, 2, 3) - Shape(1, 2, 3) - >>> Shape(Unspec, 1) - Shape(Unspec, 1) - >>> Shape() - Shape() - >>> Shape(-1, 1) - Traceback (most recent call last): - ... - AssertionError - """ - __slots__ = ["_dims"] - - def __init__(self, *dims): - super().__init__() - assert (all(d is Unspec or (isinstance(d, int) and d >= 0) for d in dims)) - self._dims = tuple(dims) - - @property - def dims(self): - return self._dims - - def implies_dims(self): - return True - - @property - def rank(self): - return len(self._dims) - - def implies_rank(self): - return True - - def __repr__(self): - return "Shape(%s)" % (", ".join(str(d) for d in self._dims)) - - -class DimFlagEnum(_LiterateEnum): - """Flag for the kind of DimFlag constraint.""" - Dynamic = 1 - - -class DimFlag(ArrayConstraint): - """Generic flag applying to one or more dimensions. - - If dims is Unspec, the flag applies to all dims. - - >>> DimFlag("Dynamic") - DimFlag(Dynamic, Unspec) - >>> DimFlag("Dynamic", 1) - DimFlag(Dynamic, (1,)) - >>> DimFlag("Dynamic", (0, 1)) - DimFlag(Dynamic, (0, 1)) - """ - __slots__ = ["_flag", "_dims"] - - def __init__(self, flag, dims=Unspec): - super().__init__() - self._flag = DimFlagEnum.parse(flag) - if isinstance(dims, int): - assert (dims >= 0) - self._dims = (dims,) - elif dims is Unspec: - self._dims = Unspec - else: - self._dims = tuple(dims) - assert (all(isinstance(d, int) and d >= 0 for d in self._dims)) - - def implies_dim_flag(self): - return False - - @property - def dim_flag(self): - return self._flag, self._dims - - def __repr__(self): - return "DimFlag(%r, %r)" % (self._flag, self._dims) - - -def DynamicDim(dims=Unspec): - """Dim flag that signals a dimension should be considered dynamic.""" - return DimFlag(DimFlagEnum.Dynamic, dims) - - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/python/npcomp/utils/__init__.py b/python/npcomp/utils/__init__.py deleted file mode 100644 index 8ba4ae08d..000000000 --- a/python/npcomp/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import test_utils as test diff --git a/python/npcomp/utils/test_utils.py b/python/npcomp/utils/test_utils.py deleted file mode 100644 index 206e264bc..000000000 --- a/python/npcomp/utils/test_utils.py +++ /dev/null @@ -1,78 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -import contextlib -import io -import os -import subprocess -import sys - -_disable_var = "NPCOMP_DISABLE_FILECHECK" -_filecheck_binary_var = "FILECHECK_BINARY" -_redirect_io = None -_redirect_context = None - - -def is_filecheck_disabled(): - return _disable_var in os.environ - - -def start_filecheck_test(): - if is_filecheck_disabled(): - print("WARNING:FileCheck disabled due to", - _disable_var, - "in the environment", - file=sys.stderr) - return - global _redirect_io - global _redirect_context - _redirect_io = io.StringIO() - _redirect_context = contextlib.redirect_stdout(_redirect_io) - _redirect_context.__enter__() - - -def end_filecheck_test(main_file): - if is_filecheck_disabled(): - return - global _redirect_io - global _redirect_context - _redirect_context.__exit__(None, None, None) - _redirect_context = None - _redirect_io.flush() - filecheck_input = _redirect_io.getvalue() - _redirect_io = None - filecheck_binary = "FileCheck" - if _filecheck_binary_var in os.environ: - filecheck_binary = os.environ[_filecheck_binary_var] - filecheck_args = [filecheck_binary, main_file, "--dump-input=fail"] - p = subprocess.Popen(filecheck_args, stdin=subprocess.PIPE) - p.communicate(filecheck_input.encode("UTF-8")) - sys.exit(p.returncode) - - -def run_under_filecheck(main_file, callback, disable_filecheck=False): - """Runs a callback under a FileCheck sub-process. - - This is typically called from a main context and will sys.exit on - completion. - - Args: - main_file: The file to process filecheck directives on. Typically - __file__ from the caller's perspective. - callback: The no-argument callback to invoke. - disable_filecheck: Whether to disable filecheck. - """ - if disable_filecheck or is_filecheck_disabled(): - print("WARNING:FileCheck disabled due to", - _disable_var, - "in the environment", - file=sys.stderr) - callback() - sys.exit(0) - - try: - start_filecheck_test() - callback() - finally: - end_filecheck_test(main_file) diff --git a/python/samples/ast_extraction.py b/python/samples/ast_extraction.py deleted file mode 100644 index c3ba763ce..000000000 --- a/python/samples/ast_extraction.py +++ /dev/null @@ -1,20 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -from npcomp.compiler.frontend import * - - -def binary_expression(): - a = 1 - b = 100 - c = a * b + 4 - c = c * 2.0 - return c - - -fe = ImportFrontend() -try: - f = fe.import_global_function(binary_expression) -finally: - print(fe.ir_module.to_asm(debug_info=True)) diff --git a/python/samples/compiled_add.py b/python/samples/compiled_add.py deleted file mode 100644 index c13abb067..000000000 --- a/python/samples/compiled_add.py +++ /dev/null @@ -1,35 +0,0 @@ -import numpy as np - -from npcomp.compiler.numpy import test_config -from npcomp.compiler.numpy.backend import refjit -from npcomp.compiler.numpy.frontend import * -from npcomp.compiler.numpy.target import * - - -def compile_function(f): - fe = ImportFrontend(config=test_config.create_test_config( - target_factory=GenericTarget32)) - fe.import_global_function(f) - compiler = refjit.CompilerBackend() - vm_blob = compiler.compile(fe.ir_module) - loaded_m = compiler.load(vm_blob) - return loaded_m[f.__name__] - - -global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0], - (2, 1))) - -a = np.asarray([1.0, 2.0], dtype=np.float32) -b = np.asarray([3.0, 4.0], dtype=np.float32) - - -@compile_function -def global_add(): - return np.add(a, np.add(b, a)) - - -assert global_add.__isnpcomp__ - -# CHECK: GLOBAL_ADD: [5. 8.] -result = global_add() -print("GLOBAL_ADD:", result) diff --git a/test/Backend/Iree/Sample/simple_invoke.py b/test/Backend/Iree/Sample/simple_invoke.py deleted file mode 100644 index 1c43b8cab..000000000 --- a/test/Backend/Iree/Sample/simple_invoke.py +++ /dev/null @@ -1,41 +0,0 @@ -# RUN: %PYTHON %s -# TODO: Numpy compiler has bitrotted. -# XFAIL: * - -from npcomp.compiler.numpy.backend import iree -from npcomp.compiler.numpy.frontend import * -from npcomp.compiler.numpy import test_config -from npcomp.compiler.numpy.target import * -from npcomp.compiler.utils import logging - -# TODO: This should all exist in a high level API somewhere. -from _npcomp import mlir - - -logging.enable() - - -def compile_function(f): - fe = ImportFrontend(config=test_config.create_test_config( - target_factory=GenericTarget32)) - fe.import_global_function(f) - compiler = iree.CompilerBackend() - vm_blob = compiler.compile(fe.ir_module) - loaded_m = compiler.load(vm_blob) - return loaded_m[f.__name__] - - -@compile_function -def int_add(a: int, b: int): - return a + b - -result = int_add(5, 6) -assert result == 11 - - -@compile_function -def simple_control_flow(a: int, b: int): - return (a * b) and (a - b) - -assert simple_control_flow(5, 6) == -1 -assert simple_control_flow(-1, 0) == 0 diff --git a/test/Backend/Iree/lit.local.cfg b/test/Backend/Iree/lit.local.cfg deleted file mode 100644 index 373766acc..000000000 --- a/test/Backend/Iree/lit.local.cfg +++ /dev/null @@ -1,8 +0,0 @@ -from lit.llvm import llvm_config - -if config.npcomp_enable_iree: - llvm_config.with_environment('PYTHONPATH', [ - os.path.join(config.npcomp_obj_root, "iree", "bindings", "python"), - ], append_path=True) -else: - config.unsupported = True diff --git a/test/CAPI/CMakeLists.txt b/test/CAPI/CMakeLists.txt deleted file mode 100644 index 6b67082a7..000000000 --- a/test/CAPI/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -add_npcomp_executable(npcomp-capi-ir-test ir.c) -llvm_update_compile_flags(npcomp-capi-ir-test) - -target_link_libraries(npcomp-capi-ir-test - PRIVATE - NPCOMPCAPI -) diff --git a/test/CAPI/ir.c b/test/CAPI/ir.c deleted file mode 100644 index 8bb9d0020..000000000 --- a/test/CAPI/ir.c +++ /dev/null @@ -1,80 +0,0 @@ -/*===- ir.c - Simple test of C APIs ---------------------------------------===*\ -|* *| -|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *| -|* Exceptions. *| -|* See https://llvm.org/LICENSE.txt for license information. *| -|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *| -|* *| -\*===----------------------------------------------------------------------===*/ - -/* RUN: npcomp-capi-ir-test 2>&1 | FileCheck %s - */ - -#include "mlir-c/IR.h" -#include "mlir-c/Registration.h" -#include "npcomp-c/BasicpyTypes.h" -#include "npcomp-c/NumpyTypes.h" -#include "npcomp-c/Registration.h" - -#include -#include -#include -#include -#include - -// Dumps an instance of all NPComp types. -static int printStandardTypes(MlirContext ctx) { - // Bool type. - MlirType boolType = npcompBasicpyBoolTypeGet(ctx); - if (!npcompTypeIsABasicpyBool(boolType)) - return 1; - mlirTypeDump(boolType); - fprintf(stderr, "\n"); - - // Bytes type. - MlirType bytesType = npcompBasicpyBytesTypeGet(ctx); - if (!npcompTypeIsABasicpyBytes(bytesType)) - return 1; - mlirTypeDump(bytesType); - fprintf(stderr, "\n"); - - // Any dtype. - MlirType anyDtype = npcompAnyDtypeTypeGet(ctx); - if (!npcompTypeIsANumpyAnyDtype(anyDtype)) - return 2; - mlirTypeDump(anyDtype); - fprintf(stderr, "\n"); - - // Ranked NdArray. - int64_t fourDim = 4; - MlirType rankedNdArray = - npcompNumpyNdArrayTypeGetRanked(1, &fourDim, boolType); - if (!npcompTypeIsANumpyNdArray(rankedNdArray)) - return 3; - mlirTypeDump(rankedNdArray); - fprintf(stderr, "\n"); - - return 0; -} - -int main() { - MlirContext ctx = mlirContextCreate(); - mlirRegisterAllDialects(ctx); - npcompRegisterAllDialects(ctx); - - // clang-format off - // CHECK-LABEL: @types - // CHECK: !basicpy.BoolType - // CHECK: !basicpy.BytesType - // CHECK: !numpy.any_dtype - // CHECK: !numpy.ndarray<[4]:!basicpy.BoolType> - // CHECK: 0 - // clang-format on - fprintf(stderr, "@types\n"); - int errcode = printStandardTypes(ctx); - fprintf(stderr, "%d\n", errcode); - - mlirContextDestroy(ctx); - - return 0; -} diff --git a/test/CAPI/lit.local.cfg b/test/CAPI/lit.local.cfg deleted file mode 100644 index f08a0de48..000000000 --- a/test/CAPI/lit.local.cfg +++ /dev/null @@ -1 +0,0 @@ -config.suffixes.add('.c') diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 8e2112cfd..9a0c99059 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,5 +1,3 @@ -add_subdirectory(CAPI) - llvm_canonicalize_cmake_booleans( NPCOMP_ENABLE_IREE ) @@ -13,7 +11,6 @@ configure_lit_site_cfg( set(NPCOMP_TEST_DEPENDS FileCheck count not - npcomp-capi-ir-test npcomp-opt refback-run NPCOMPPythonModules diff --git a/test/Dialect/Basicpy/canonicalize.mlir b/test/Dialect/Basicpy/canonicalize.mlir deleted file mode 100644 index 8643a2ecc..000000000 --- a/test/Dialect/Basicpy/canonicalize.mlir +++ /dev/null @@ -1,92 +0,0 @@ -// RUN: npcomp-opt -split-input-file -canonicalize %s | FileCheck %s - -// CHECK-LABEL: func @unknown_cast_elide -func @unknown_cast_elide(%arg0 : i32) -> i32 { - // CHECK-NOT: basicpy.unknown_cast - %0 = basicpy.unknown_cast %arg0 : i32 -> i32 - return %0 : i32 -} - -// ----- -// CHECK-LABEL: func @unknown_cast_preserve -func @unknown_cast_preserve(%arg0 : i32) -> !basicpy.UnknownType { - // CHECK: basicpy.unknown_cast - %0 = basicpy.unknown_cast %arg0 : i32 -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -// ----- -// CHECK-LABEL: @numeric_constant_si32 -func @numeric_constant_si32() -> si32 { - // CHECK: %num-1_si32 = basicpy.numeric_constant -1 : si32 - %0 = basicpy.numeric_constant -1 : si32 - return %0 : si32 -} - -// ----- -// CHECK-LABEL: @numeric_constant_ui32 -func @numeric_constant_ui32() -> ui32 { - // CHECK: %num1_ui32 = basicpy.numeric_constant 1 : ui32 - %0 = basicpy.numeric_constant 1 : ui32 - return %0 : ui32 -} - -// ----- -// CHECK-LABEL: @numeric_constant_f32 -func @numeric_constant_f32() -> f32 { - // CHECK: %num = basicpy.numeric_constant 2.000000e+00 : f32 - %0 = basicpy.numeric_constant 2.0 : f32 - return %0 : f32 -} - -// ----- -// CHECK-LABEL: @numeric_constant_complex_f32 -func @numeric_constant_complex_f32() -> complex { - // CHECK: %num = basicpy.numeric_constant [2.000000e+00 : f32, 3.000000e+00 : f32] : complex - %0 = basicpy.numeric_constant [2.0 : f32, 3.0 : f32] : complex - return %0 : complex -} - -// ----- -// CHECK-LABEL: @bool_constant -func @bool_constant() -> !basicpy.BoolType { - // CHECK: %bool_true = basicpy.bool_constant true - %0 = basicpy.bool_constant true - return %0 : !basicpy.BoolType -} - -// ----- -// CHECK-LABEL: @bytes_constant -func @bytes_constant() -> !basicpy.BytesType { - // CHECK: %bytes = basicpy.bytes_constant "foobar" - %0 = basicpy.bytes_constant "foobar" - return %0 : !basicpy.BytesType -} - -// ----- -// CHECK-LABEL: @str_constant -func @str_constant() -> !basicpy.StrType { - // CHECK: %str = basicpy.str_constant "foobar" - %0 = basicpy.str_constant "foobar" - return %0 : !basicpy.StrType -} - -// ----- -// CHECK-LABEL: @bool_cast -func @bool_cast() -> i1 { - // CHECK: %[[CTRUE:.*]] = constant true - %0 = basicpy.bool_constant true - %1 = basicpy.bool_cast %0 : !basicpy.BoolType -> i1 - // CHECK: return %[[CTRUE]] : i1 - return %1 : i1 -} - -// ----- -// CHECK-LABEL: func @singleton_coalesce -func @singleton_coalesce() -> (!basicpy.NoneType, !basicpy.NoneType) { - %0 = basicpy.singleton : !basicpy.NoneType - %1 = basicpy.singleton : !basicpy.NoneType - // CHECK-NEXT: %[[RET:.*]] = basicpy.singleton - // CHECK-NEXT: return %[[RET]], %[[RET]] - return %0, %1 : !basicpy.NoneType, !basicpy.NoneType -} diff --git a/test/Dialect/Basicpy/functions.mlir b/test/Dialect/Basicpy/functions.mlir deleted file mode 100644 index a60bb3ba0..000000000 --- a/test/Dialect/Basicpy/functions.mlir +++ /dev/null @@ -1,81 +0,0 @@ -// RUN: npcomp-opt -split-input-file -verify-diagnostics %s | npcomp-opt -canonicalize | FileCheck --dump-input=fail %s - -//===----------------------------------------------------------------------===// -// func_template_call -//===----------------------------------------------------------------------===// - -// CHECK-LABEL: func @positional -builtin.func @positional(%arg0 : !basicpy.UnknownType, %arg1 : !basicpy.UnknownType) -> !basicpy.UnknownType { - // CHECK: basicpy.func_template_call @foobar(%arg0, %arg1) kw [] - %0 = basicpy.func_template_call @foobar(%arg0, %arg1) kw [] : (!basicpy.UnknownType, !basicpy.UnknownType) -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -// ----- -// CHECK-LABEL: func @kwValid -builtin.func @kwValid(%arg0 : !basicpy.UnknownType, %arg1 : !basicpy.UnknownType) -> !basicpy.UnknownType { - // CHECK: basicpy.func_template_call @foobar(%arg0, %arg1) kw ["second"] - %0 = basicpy.func_template_call @foobar(%arg0, %arg1) kw ["second"] : (!basicpy.UnknownType, !basicpy.UnknownType) -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -// ----- -// CHECK-LABEL: func @posArgPack -builtin.func @posArgPack(%arg0 : !basicpy.UnknownType, %arg1 : !basicpy.UnknownType) -> !basicpy.UnknownType { - // CHECK: basicpy.func_template_call @foobar(%arg0, %arg1) kw ["*"] - %0 = basicpy.func_template_call @foobar(%arg0, %arg1) kw ["*"] : (!basicpy.UnknownType, !basicpy.UnknownType) -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -// ----- -// CHECK-LABEL: func @kwArgPack -builtin.func @kwArgPack(%arg0 : !basicpy.UnknownType, %arg1 : !basicpy.UnknownType) -> !basicpy.UnknownType { - // CHECK: basicpy.func_template_call @foobar(%arg0, %arg1) kw ["**"] - %0 = basicpy.func_template_call @foobar(%arg0, %arg1) kw ["**"] : (!basicpy.UnknownType, !basicpy.UnknownType) -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -// ----- -builtin.func @kwOverflow(%arg0 : !basicpy.UnknownType, %arg1 : !basicpy.UnknownType) -> !basicpy.UnknownType { - // expected-error @+1 {{expected <= kw arg names vs args}} - %0 = basicpy.func_template_call @foobar(%arg0, %arg1) kw ["second", "third", "fourth"] : (!basicpy.UnknownType, !basicpy.UnknownType) -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -// ----- -builtin.func @badPosArgPack(%arg0 : !basicpy.UnknownType, %arg1 : !basicpy.UnknownType) -> !basicpy.UnknownType { - // expected-error @+1 {{positional arg pack must be the first kw arg}} - %0 = basicpy.func_template_call @foobar(%arg0, %arg1) kw ["*", "*"] : (!basicpy.UnknownType, !basicpy.UnknownType) -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -// ----- -builtin.func @badKwArgPack(%arg0 : !basicpy.UnknownType, %arg1 : !basicpy.UnknownType) -> !basicpy.UnknownType { - // expected-error @+1 {{kw arg pack must be the last kw arg}} - %0 = basicpy.func_template_call @foobar(%arg0, %arg1) kw ["**", "next"] : (!basicpy.UnknownType, !basicpy.UnknownType) -> !basicpy.UnknownType - return %0 : !basicpy.UnknownType -} - -//===----------------------------------------------------------------------===// -// func_template -//===----------------------------------------------------------------------===// - -// ----- -// CHECK-LABEL: module @valid_template -builtin.module @valid_template { - // CHECK: basicpy.func_template @__global$pkg.foobar attributes {py_bind = ["#abs"]} { - basicpy.func_template @__global$pkg.foobar attributes {py_bind = ["#abs"]} { - // CHECK: func @forInts(%arg0: i32) -> i32 - builtin.func @forInts(%arg0 : i32) -> i32 { - return %arg0 : i32 - } - } -} - -// ----- -builtin.module @invalid_template { - basicpy.func_template @__global$pkg.foobar { - // expected-error @+1 {{illegal operation in func_template}} - builtin.module {} - } -} diff --git a/test/Dialect/Basicpy/ops-invalid.mlir b/test/Dialect/Basicpy/ops-invalid.mlir deleted file mode 100644 index 416c364a1..000000000 --- a/test/Dialect/Basicpy/ops-invalid.mlir +++ /dev/null @@ -1,49 +0,0 @@ -// RUN: npcomp-opt -split-input-file -verify-diagnostics %s - -func @numeric_constant_string_attr() { - // expected-error @+1 {{op requires 'value' to be an integer constant}} - %0 = "basicpy.numeric_constant"() {value="somestring" : i32} : () -> (i32) - return -} - -// ----- -func @numeric_constant_bool() { - // expected-error @+1 {{cannot have an i1 type}} - %0 = "basicpy.numeric_constant"() {value = true} : () -> (i1) - return -} - -// ----- -func @numeric_constant_mismatch_int() { - // expected-error @+1 {{op requires 'value' to be a floating point constant}} - %0 = "basicpy.numeric_constant"() {value = 1 : i32} : () -> (f64) - return -} - -// ----- -func @numeric_constant_mismatch_float() { - // expected-error @+1 {{op requires 'value' to be an integer constant}} - %0 = "basicpy.numeric_constant"() {value = 1.0 : f32} : () -> (i32) - return -} - -// ----- -func @numeric_constant_complex_wrong_arity() { - // expected-error @+1 {{op requires 'value' to be a two element array of floating point complex number components}} - %3 = basicpy.numeric_constant [2.0 : f32] : complex - return -} - -// ----- -func @numeric_constant_complex_mismatch_type_real() { - // expected-error @+1 {{op requires 'value' to be a two element array of floating point complex number components}} - %3 = basicpy.numeric_constant [2.0 : f64, 3.0 : f32] : complex - return -} - -// ----- -func @numeric_constant_complex_mismatch_type_imag() { - // expected-error @+1 {{op requires 'value' to be a two element array of floating point complex number components}} - %3 = basicpy.numeric_constant [2.0 : f32, 3.0 : f16] : complex - return -} diff --git a/test/Dialect/Basicpy/ops.mlir b/test/Dialect/Basicpy/ops.mlir deleted file mode 100644 index c0c1c4038..000000000 --- a/test/Dialect/Basicpy/ops.mlir +++ /dev/null @@ -1,43 +0,0 @@ -// RUN: npcomp-opt -split-input-file %s | npcomp-opt | FileCheck --dump-input=fail %s - -// ----- -// CHECK-LABEL: @build_dict_generic -func @build_dict_generic() -> !basicpy.DictType { - // CHECK: basicpy.build_dict : () -> !basicpy.DictType - %0 = basicpy.build_dict : () -> !basicpy.DictType - return %0 : !basicpy.DictType -} - -// ----- -// CHECK-LABEL: @build_list_generic -func @build_list_generic(%arg0 : si32, %arg1 : si32) -> !basicpy.ListType { - // CHECK: basicpy.build_list %arg0, %arg1 : (si32, si32) -> !basicpy.ListType - %0 = basicpy.build_list %arg0, %arg1 : (si32, si32) -> !basicpy.ListType - return %0 : !basicpy.ListType -} - -// ----- -// CHECK-LABEL: @build_tuple_generic -func @build_tuple_generic(%arg0 : si32, %arg1 : si32) -> !basicpy.TupleType { - // CHECK: basicpy.build_tuple %arg0, %arg1 : (si32, si32) -> !basicpy.TupleType - %0 = basicpy.build_tuple %arg0, %arg1 : (si32, si32) -> !basicpy.TupleType - return %0 : !basicpy.TupleType -} - -// ----- -// CHECK-LABEL: @numeric_constant -func @numeric_constant() { - // CHECK: %num-1_si32 = basicpy.numeric_constant -1 : si32 - %0 = basicpy.numeric_constant -1 : si32 - // CHECK: %num1_ui32 = basicpy.numeric_constant 1 : ui32 - %1 = basicpy.numeric_constant 1 : ui32 - // CHECK: %num = basicpy.numeric_constant 2.000000e+00 : f32 - %2 = basicpy.numeric_constant 2.0 : f32 - // CHECK: %num_0 = basicpy.numeric_constant [2.000000e+00 : f32, 3.000000e+00 : f32] : complex - %3 = basicpy.numeric_constant [2.0 : f32, 3.0 : f32] : complex - // CHECK: %bool_true = basicpy.bool_constant true - %4 = basicpy.bool_constant true - // CHECK: %bool_false = basicpy.bool_constant false - %5 = basicpy.bool_constant false - return -} diff --git a/test/Dialect/Basicpy/slot_object.mlir b/test/Dialect/Basicpy/slot_object.mlir deleted file mode 100644 index 8836b8f6a..000000000 --- a/test/Dialect/Basicpy/slot_object.mlir +++ /dev/null @@ -1,24 +0,0 @@ -// RUN: npcomp-opt -split-input-file %s | npcomp-opt | FileCheck --dump-input=fail %s - -// CHECK-LABEL: @slot_object_make -func @slot_object_make() -> (!basicpy.SlotObject) { - // CHECK: %[[N:.+]] = basicpy.singleton - %0 = basicpy.singleton : !basicpy.NoneType - // CHECK: basicpy.slot_object_make(%[[N]], %[[N]], %[[N]]) -> !basicpy.SlotObject - %1 = "basicpy.slot_object_make"(%0, %0, %0) {className = "slice" } : - (!basicpy.NoneType, !basicpy.NoneType, !basicpy.NoneType) -> - (!basicpy.SlotObject) - return %1 : !basicpy.SlotObject -} - -// ----- -func @slot_object_get() -> (!basicpy.NoneType) { - %0 = basicpy.singleton : !basicpy.NoneType - // CHECK: %[[OBJ:.+]] = basicpy.slot_object_make - %1 = basicpy.slot_object_make(%0, %0) -> (!basicpy.SlotObject) - // CHECK: basicpy.slot_object_get %[[OBJ]][1] : !basicpy.SlotObject - %2 = basicpy.slot_object_get %1[1] : !basicpy.SlotObject - return %2 : !basicpy.NoneType -} - -// TODO: Verify illegal forms diff --git a/test/Dialect/Basicpy/types_attrs.mlir b/test/Dialect/Basicpy/types_attrs.mlir deleted file mode 100644 index 31cb59340..000000000 --- a/test/Dialect/Basicpy/types_attrs.mlir +++ /dev/null @@ -1,16 +0,0 @@ -// RUN: npcomp-opt -split-input-file %s | npcomp-opt | FileCheck --dump-input=fail %s - -// CHECK-LABEL: @const_none -func @const_none() -> (!basicpy.NoneType) { - // CHECK: basicpy.singleton : !basicpy.NoneType - %0 = basicpy.singleton : !basicpy.NoneType - return %0 : !basicpy.NoneType -} - -// ----- -// CHECK-LABEL: @const_ellipsis -func @const_ellipsis() -> (!basicpy.EllipsisType) { - // CHECK: basicpy.singleton : !basicpy.EllipsisType - %0 = basicpy.singleton : !basicpy.EllipsisType - return %0 : !basicpy.EllipsisType -} diff --git a/test/Dialect/Numpy/canonicalizers.mlir b/test/Dialect/Numpy/canonicalizers.mlir deleted file mode 100644 index 7913ae5bd..000000000 --- a/test/Dialect/Numpy/canonicalizers.mlir +++ /dev/null @@ -1,38 +0,0 @@ -// RUN: npcomp-opt -split-input-file %s -canonicalize | FileCheck --dump-input=fail %s - -// CHECK-LABEL: func @elideCreateRedundantArrayFromTensor -func @elideCreateRedundantArrayFromTensor() -> tensor<2xf64> { - // CHECK: %[[CST:.*]] = constant - // CHECK-NOT: numpy.create_array_from_tensor - // CHECK-NOT: numpy.copy_to_tensor - %cst = constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64> - %0 = numpy.create_array_from_tensor %cst : (tensor<2xf64>) -> !numpy.ndarray<[2]:f64> - %1 = numpy.copy_to_tensor %0 : (!numpy.ndarray<[2]:f64>) -> tensor<2xf64> - // CHECK: return %[[CST]] - return %1 : tensor<2xf64> -} - -// This test verifies that the very trivial elision is not overly aggressive. -// Note that in this example, it is still safe to remove the copy, but the -// analysis has not yet been written to do that safely. -// CHECK-LABEL: func @elideCreateRedundantArrayFromTensorNonTrivial -func @elideCreateRedundantArrayFromTensorNonTrivial() -> (tensor<2xf64>, tensor<2xf64>) { - // CHECK: numpy.create_array_from_tensor - // CHECK: numpy.copy_to_tensor - %cst = constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64> - %0 = numpy.create_array_from_tensor %cst : (tensor<2xf64>) -> !numpy.ndarray<[2]:f64> - %1 = numpy.copy_to_tensor %0 : (!numpy.ndarray<[2]:f64>) -> tensor<2xf64> - %2 = numpy.copy_to_tensor %0 : (!numpy.ndarray<[2]:f64>) -> tensor<2xf64> - return %1, %2 : tensor<2xf64>, tensor<2xf64> -} - -// CHECK-LABEL: func @commuteStaticInfoCastOpWithCreateArrayFromTensorOp( -// CHECK-SAME: %[[TENSOR:.*]]: tensor<2x3x?xf32>) -> !numpy.ndarray<*:!numpy.any_dtype> { -// CHECK: %[[ERASED_TENSOR:.*]] = numpy.tensor_static_info_cast %[[TENSOR]] : tensor<2x3x?xf32> to tensor<*x!numpy.any_dtype> -// CHECK: %[[ERASED_ARRAY:.*]] = numpy.create_array_from_tensor %[[ERASED_TENSOR]] : (tensor<*x!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> -// CHECK: return %[[ERASED_ARRAY]] : !numpy.ndarray<*:!numpy.any_dtype> -func @commuteStaticInfoCastOpWithCreateArrayFromTensorOp(%arg0: tensor<2x3x?xf32>) -> !numpy.ndarray<*:!numpy.any_dtype> { - %0 = numpy.create_array_from_tensor %arg0 : (tensor<2x3x?xf32>) -> !numpy.ndarray<[2,3,?]:f32> - %1 = numpy.static_info_cast %0 : !numpy.ndarray<[2,3,?]:f32> to !numpy.ndarray<*:!numpy.any_dtype> - return %1 : !numpy.ndarray<*:!numpy.any_dtype> -} diff --git a/test/Dialect/Numpy/ops.mlir b/test/Dialect/Numpy/ops.mlir deleted file mode 100644 index eadc0cfca..000000000 --- a/test/Dialect/Numpy/ops.mlir +++ /dev/null @@ -1,41 +0,0 @@ -// RUN: npcomp-opt -split-input-file %s | npcomp-opt | FileCheck --dump-input=fail %s - -// ----- -// CHECK-LABEL: @builtin_ufunc -func @builtin_ufunc(%arg0 : tensor<3xf64>, %arg1 : tensor<3xf64>) -> tensor<3xf64> { - %0 = numpy.builtin_ufunc_call<"numpy.add"> (%arg0, %arg1) : (tensor<3xf64>, tensor<3xf64>) -> tensor<3xf64> - return %0 : tensor<3xf64> -} - -// CHECK-LABEL: @ndarray_tensor_bridging -func @ndarray_tensor_bridging(%arg0: !numpy.ndarray<[2,3]:f32>, %arg1: !numpy.ndarray<[2,3]:f32>, %arg2: tensor<2x3xf32>) { - // CHECK-NEXT: numpy.copy_to_tensor - %t = numpy.copy_to_tensor %arg1 : (!numpy.ndarray<[2,3]:f32>) -> tensor<2x3xf32> - // CHECK-NEXT: numpy.create_array_from_tensor - %a = numpy.create_array_from_tensor %arg2 : (tensor<2x3xf32>) -> !numpy.ndarray<[2,3]:f32> - // CHECK-NEXT: numpy.overwrite_array - numpy.overwrite_array %arg2 overwrites %arg0 : tensor<2x3xf32>, !numpy.ndarray<[2,3]:f32> - return -} - -// CHECK-LABEL: @static_info_cast -func @static_info_cast(%arg0: !numpy.ndarray<[2,3]:f32>, %arg1: !numpy.ndarray<[?,3]:f32>, %arg2: !numpy.ndarray<*:f32>) { - // CHECK-NEXT: numpy.static_info_cast %arg0 : !numpy.ndarray<[2,3]:f32> to !numpy.ndarray<*:!numpy.any_dtype> - %0 = numpy.static_info_cast %arg0 : !numpy.ndarray<[2,3]:f32> to !numpy.ndarray<*:!numpy.any_dtype> - // CHECK-NEXT: numpy.static_info_cast %arg1 : !numpy.ndarray<[?,3]:f32> to !numpy.ndarray<[7,3]:f32> - %1 = numpy.static_info_cast %arg1 : !numpy.ndarray<[?,3]:f32> to !numpy.ndarray<[7,3]:f32> - // CHECK-NEXT: numpy.static_info_cast %arg2 : !numpy.ndarray<*:f32> to !numpy.ndarray<[?,?]:f32> - %2 = numpy.static_info_cast %arg2 : !numpy.ndarray<*:f32> to !numpy.ndarray<[?,?]:f32> - return -} - -// CHECK-LABEL: @tensor_static_info_cast -func @tensor_static_info_cast(%arg0: tensor<2x3xf32>, %arg1: tensor, %arg2: tensor<*xf32>) { - // CHECK-NEXT: numpy.tensor_static_info_cast %arg0 : tensor<2x3xf32> to tensor<*x!numpy.any_dtype> - %0 = numpy.tensor_static_info_cast %arg0 : tensor<2x3xf32> to tensor<*x!numpy.any_dtype> - // CHECK-NEXT: numpy.tensor_static_info_cast %arg1 : tensor to tensor<7x3xf32> - %1 = numpy.tensor_static_info_cast %arg1 : tensor to tensor<7x3xf32> - // CHECK-NEXT: numpy.tensor_static_info_cast %arg2 : tensor<*xf32> to tensor - %2 = numpy.tensor_static_info_cast %arg2 : tensor<*xf32> to tensor - return -} diff --git a/test/Dialect/Numpy/public_functions_to_tensor.mlir b/test/Dialect/Numpy/public_functions_to_tensor.mlir deleted file mode 100644 index 0a1702189..000000000 --- a/test/Dialect/Numpy/public_functions_to_tensor.mlir +++ /dev/null @@ -1,43 +0,0 @@ -// RUN: npcomp-opt -split-input-file %s -verify-diagnostics -allow-unregistered-dialect -numpy-public-functions-to-tensor | FileCheck --dump-input=fail %s - -// CHECK-LABEL: legalConversion -module @legalConversion { - // CHECK: @f(%arg0: tensor<3x?xf32>, %arg1: i32, %arg2: tensor<*xf32>) -> (i32, tensor<3x?xf32>, tensor<*xf32>) - func @f(%arg0: !numpy.ndarray<[3,?]:f32>, %arg1: i32, %arg2: !numpy.ndarray<*:f32>) -> - (i32, !numpy.ndarray<[3,?]:f32>, !numpy.ndarray<*:f32>) { - // CHECK: %[[CREATE0:.+]] = numpy.create_array_from_tensor %arg0 - // CHECK: %[[CREATE1:.+]] = numpy.create_array_from_tensor %arg2 - // CHECK: %[[R0:.+]] = "unfoldable_arg0"(%[[CREATE0]]) : (!numpy.ndarray<[3,?]:f32>) -> !numpy.ndarray<[3,?]:f32> - // CHECK: %[[R1:.+]] = "unfoldable_arg1"(%[[CREATE1]]) : (!numpy.ndarray<*:f32>) -> !numpy.ndarray<*:f32> - %0 = "unfoldable_arg0"(%arg0) : (!numpy.ndarray<[3,?]:f32>) -> !numpy.ndarray<[3,?]:f32> - %1 = "unfoldable_arg1"(%arg2) : (!numpy.ndarray<*:f32>) -> !numpy.ndarray<*:f32> - // CHECK: %[[COPY0:.+]] = numpy.copy_to_tensor %[[R0]] - // CHECK: %[[COPY1:.+]] = numpy.copy_to_tensor %[[R1]] - // CHECK: return %arg1, %[[COPY0]], %[[COPY1]] : i32, tensor<3x?xf32>, tensor<*xf32> - return %arg1, %0, %1 : i32, !numpy.ndarray<[3,?]:f32>, !numpy.ndarray<*:f32> - } -} - -// ----- -// CHECK-LABEL: @nonPublic -module @nonPublic { - // CHECK: @f(%arg0: !numpy.ndarray<[3,?]:f32>) -> !numpy.ndarray<[3,?]:f32> - func private @f(%arg0: !numpy.ndarray<[3,?]:f32>) -> (!numpy.ndarray<[3,?]:f32>) { - return %arg0 : !numpy.ndarray<[3,?]:f32> - } -} - -// ----- -// CHECK-LABEL: @called -module @called { - // CHECK: @f(%arg0: !numpy.ndarray<*:f32>) -> !numpy.ndarray<*:f32> - // expected-warning @+1 {{unimplemented: cannot convert}} - func @f(%arg0: !numpy.ndarray<*:f32>) -> !numpy.ndarray<*:f32> { - return %arg0 : !numpy.ndarray<*:f32> - } - - func private @caller(%arg0: !numpy.ndarray<*:f32>) -> !numpy.ndarray<*:f32> { - %0 = call @f(%arg0) : (!numpy.ndarray<*:f32>) -> !numpy.ndarray<*:f32> - return %0 : !numpy.ndarray<*:f32> - } -} diff --git a/test/Dialect/Numpy/types_attrs.mlir b/test/Dialect/Numpy/types_attrs.mlir deleted file mode 100644 index d834c67e7..000000000 --- a/test/Dialect/Numpy/types_attrs.mlir +++ /dev/null @@ -1,21 +0,0 @@ -// RUN: npcomp-opt -split-input-file %s | npcomp-opt | FileCheck --dump-input=fail %s - -// CHECK-LABEL: @ndarray_no_dtype -// CHECK: !numpy.ndarray<*:?> -func @ndarray_no_dtype(%arg0 : !numpy.ndarray<*:?>) -> !numpy.ndarray<*:?> { - return %arg0 : !numpy.ndarray<*:?> -} - -// ----- -// CHECK-LABEL: @ndarray_dtype -// CHECK: !numpy.ndarray<*:i32> -func @ndarray_dtype(%arg0 : !numpy.ndarray<*:i32>) -> !numpy.ndarray<*:i32> { - return %arg0 : !numpy.ndarray<*:i32> -} - -// ----- -// CHECK-LABEL: @ndarray_ranked -// CHECK: !numpy.ndarray<[1,?,3]:i32> -func @ndarray_ranked(%arg0 : !numpy.ndarray<[1,?,3]:i32>) -> !numpy.ndarray<[1,?,3]:i32> { - return %arg0 : !numpy.ndarray<[1,?,3]:i32> -} diff --git a/test/Python/Backend/RefJIT/simple_invoke_numpy.py b/test/Python/Backend/RefJIT/simple_invoke_numpy.py deleted file mode 100644 index e3f109903..000000000 --- a/test/Python/Backend/RefJIT/simple_invoke_numpy.py +++ /dev/null @@ -1,44 +0,0 @@ -# RUN: %PYTHON %s | FileCheck %s --dump-input=fail - -# TODO: Rebase this path on linalg-on-tensors or Torch dialect. -# XFAIL: * - -import numpy as np - -from npcomp.compiler.numpy.backend import refjit -from npcomp.compiler.numpy.frontend import * -from npcomp.compiler.numpy import test_config -from npcomp.compiler.numpy.target import * -from npcomp.compiler.utils import logging - -logging.enable() - - -def compile_function(f): - fe = ImportFrontend(config=test_config.create_test_config( - target_factory=GenericTarget32)) - fe.import_global_function(f) - compiler = refjit.CompilerBackend() - blob = compiler.compile(fe.ir_module) - loaded_m = compiler.load(blob) - return loaded_m[f.__name__] - - -global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0], - (2, 1))) - -a = np.asarray([1.0, 2.0], dtype=np.float32) -b = np.asarray([3.0, 4.0], dtype=np.float32) - - -@compile_function -def global_add(): - return np.add(a, np.add(b, a)) - - -# Make sure we aren't accidentally invoking the python function :) -assert global_add.__isnpcomp__ - -# CHECK: GLOBAL_ADD: [5. 8.] -result = global_add() -print("GLOBAL_ADD:", result) diff --git a/test/Python/Compiler/Numpy/array_basics.py b/test/Python/Compiler/Numpy/array_basics.py deleted file mode 100644 index 9b3423f17..000000000 --- a/test/Python/Compiler/Numpy/array_basics.py +++ /dev/null @@ -1,18 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -import numpy as np -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - -global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0], - (2, 1))) - - -# CHECK-LABEL: func @global_array_to_const -@import_global -def global_array_to_const(): - # CHECK: %[[CST:.*]] = constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [2.000000e+00, 4.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> - # CHECK: numpy.create_array_from_tensor %[[CST]] : (tensor<2x3xf64>) -> !numpy.ndarray<[2,3]:f64> - local_data = global_data - return local_data diff --git a/test/Python/Compiler/Numpy/binary_expressions.py b/test/Python/Compiler/Numpy/binary_expressions.py deleted file mode 100644 index bd9a94804..000000000 --- a/test/Python/Compiler/Numpy/binary_expressions.py +++ /dev/null @@ -1,94 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# Full checking for add. Others just check validity. -# CHECK-LABEL: func @add -@import_global -def add(): - # CHECK: %[[A:.*]] = constant 1 : i64 - # CHECK: %[[B:.*]] = constant 2 : i64 - a = 1 - b = 2 - # CHECK: {{.*}} = basicpy.binary_expr %[[A]] "Add" %[[B]] : (i64, i64) -> !basicpy.UnknownType - return a + b - - -# CHECK-LABEL: func @sub -@import_global -def sub(): - # CHECK: basicpy.binary_expr {{.*}} "Sub" - return 4 - 2 - - -# CHECK-LABEL: func @mult -@import_global -def mult(): - # CHECK: basicpy.binary_expr {{.*}} "Mult" - return 4 * 2 - - -# CHECK-LABEL: func @div -@import_global -def div(): - # CHECK: basicpy.binary_expr {{.*}} "Div" - return 4 / 2 - - -# CHECK-LABEL: func @floor_div -@import_global -def floor_div(): - # CHECK: basicpy.binary_expr {{.*}} "FloorDiv" - return 4 // 2 - - -# CHECK-LABEL: func @matmul -@import_global -def matmul(): - # CHECK: basicpy.binary_expr {{.*}} "MatMult" - return 4 @ 2 - - -# CHECK-LABEL: func @modulo -@import_global -def modulo(): - # CHECK: basicpy.binary_expr {{.*}} "Mod" - return 4 % 2 - - -# CHECK-LABEL: func @left_shift -@import_global -def left_shift(): - # CHECK: basicpy.binary_expr {{.*}} "LShift" - return 4 << 2 - - -# CHECK-LABEL: func @right_shift -@import_global -def right_shift(): - # CHECK: basicpy.binary_expr {{.*}} "RShift" - return 4 >> 2 - - -# CHECK-LABEL: func @bit_and -@import_global -def bit_and(): - # CHECK: basicpy.binary_expr {{.*}} "BitAnd" - return 4 & 2 - - -# CHECK-LABEL: func @bit_xor -@import_global -def bit_xor(): - # CHECK: basicpy.binary_expr {{.*}} "BitXor" - return 4 ^ 2 - - -# CHECK-LABEL: func @bit_or -@import_global -def bit_or(): - # CHECK: basicpy.binary_expr {{.*}} "BitOr" - return 4 | 2 diff --git a/test/Python/Compiler/Numpy/booleans.py b/test/Python/Compiler/Numpy/booleans.py deleted file mode 100644 index 7c76eadff..000000000 --- a/test/Python/Compiler/Numpy/booleans.py +++ /dev/null @@ -1,91 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @logical_and -@import_global -def logical_and(): - # CHECK: %[[X:.*]] = constant 1 - # CHECK: %[[Y:.*]] = constant 0 - # CHECK: %[[Z:.*]] = constant 2 - x = 1 - y = 0 - z = 2 - # CHECK: %[[XBOOL:.*]] = basicpy.as_i1 %[[X]] - # CHECK: %[[IF0:.*]] = scf.if %[[XBOOL]] -> (!basicpy.UnknownType) { - # CHECK: %[[YBOOL:.*]] = basicpy.as_i1 %[[Y]] - # CHECK: %[[IF1:.*]] = scf.if %[[YBOOL]] -> (!basicpy.UnknownType) { - # CHECK: %[[ZCAST:.*]] = basicpy.unknown_cast %[[Z]] - # CHECK: scf.yield %[[ZCAST]] - # CHECK: } else { - # CHECK: %[[YCAST:.*]] = basicpy.unknown_cast %[[Y]] - # CHECK: scf.yield %[[YCAST]] - # CHECK: } - # CHECK: %[[IF1CAST:.*]] = basicpy.unknown_cast %[[IF1]] - # CHECK: scf.yield %[[IF1CAST]] - # CHECK: } else { - # CHECK: %[[XCAST:.*]] = basicpy.unknown_cast %[[X]] - # CHECK: scf.yield %[[XCAST]] - # CHECK: } - return x and y and z - - -# CHECK-LABEL: func @logical_or -@import_global -def logical_or(): - # CHECK: %[[X:.*]] = constant 0 - # CHECK: %[[Y:.*]] = constant 1 - # CHECK: %[[Z:.*]] = constant 2 - # CHECK: %[[XBOOL:.*]] = basicpy.as_i1 %[[X]] - # CHECK: %[[IF0:.*]] = scf.if %[[XBOOL]] -> (!basicpy.UnknownType) { - # CHECK: %[[XCAST:.*]] = basicpy.unknown_cast %[[X]] - # CHECK: scf.yield %[[XCAST]] - # CHECK: } else { - # CHECK: %[[YBOOL:.*]] = basicpy.as_i1 %[[Y]] - # CHECK: %[[IF1:.*]] = scf.if %[[YBOOL]] -> (!basicpy.UnknownType) { - # CHECK: %[[YCAST:.*]] = basicpy.unknown_cast %[[Y]] - # CHECK: scf.yield %[[YCAST]] - # CHECK: } else { - # CHECK: %[[ZCAST:.*]] = basicpy.unknown_cast %[[Z]] - # CHECK: scf.yield %[[ZCAST]] - # CHECK: } - # CHECK: %[[IF1CAST:.*]] = basicpy.unknown_cast %[[IF1]] - # CHECK: scf.yield %[[IF1CAST]] - # CHECK: } - x = 0 - y = 1 - z = 2 - return x or y or z - - -# CHECK-LABEL: func @logical_not -@import_global -def logical_not(): - # CHECK: %[[X:.*]] = constant 1 - x = 1 - # CHECK-DAG: %[[TRUE:.*]] = basicpy.bool_constant true - # CHECK-DAG: %[[FALSE:.*]] = basicpy.bool_constant false - # CHECK-DAG: %[[CONDITION:.*]] = basicpy.as_i1 %[[X]] - # CHECK-DAG: %{{.*}} = select %[[CONDITION]], %[[FALSE]], %[[TRUE]] : !basicpy.BoolType - return not x - - -# CHECK-LABEL: func @conditional -@import_global -def conditional(): - # CHECK: %[[X:.*]] = constant 1 - x = 1 - # CHECK: %[[CONDITION:.*]] = basicpy.as_i1 %[[X]] - # CHECK: %[[IF0:.*]] = scf.if %[[CONDITION]] -> (!basicpy.UnknownType) { - # CHECK: %[[TWO:.*]] = constant 2 : i64 - # CHECK: %[[TWO_CAST:.*]] = basicpy.unknown_cast %[[TWO]] - # CHECK: scf.yield %[[TWO_CAST]] - # CHECK: } else { - # CHECK: %[[THREE:.*]] = constant 3 : i64 - # CHECK: %[[THREE_CAST:.*]] = basicpy.unknown_cast %[[THREE]] - # CHECK: scf.yield %[[THREE_CAST]] - # CHECK: } - return 2 if x else 3 diff --git a/test/Python/Compiler/Numpy/comparisons.py b/test/Python/Compiler/Numpy/comparisons.py deleted file mode 100644 index 2ab9c4c58..000000000 --- a/test/Python/Compiler/Numpy/comparisons.py +++ /dev/null @@ -1,141 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @binary_lt_ -@import_global -def binary_lt_(): - # CHECK: %[[A:.*]] = constant 1 : i64 - # CHECK: %[[B:.*]] = constant 2 : i64 - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare %[[A]] "Lt" %[[B]] : i64, i64 - return x < y - - -# CHECK-LABEL: func @binary_gt_ -@import_global -def binary_gt_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "Gt" {{.*}} : i64, i64 - return x > y - - -# CHECK-LABEL: func @binary_lte_ -@import_global -def binary_lte_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "LtE" {{.*}} : i64, i64 - return x <= y - - -# CHECK-LABEL: func @binary_gte_ -@import_global -def binary_gte_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "GtE" {{.*}} : i64, i64 - return x >= y - - -# CHECK-LABEL: func @binary_eq_ -@import_global -def binary_eq_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "Eq" {{.*}} : i64, i64 - return x == y - - -# CHECK-LABEL: func @binary_neq_ -@import_global -def binary_neq_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "NotEq" {{.*}} : i64, i64 - return x != y - - -# CHECK-LABEL: func @binary_is_ -@import_global -def binary_is_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "Is" {{.*}} : i64, i64 - return x is y - - -# CHECK-LABEL: func @binary_is_not_ -@import_global -def binary_is_not_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "IsNot" {{.*}} : i64, i64 - return x is not y - - -# CHECK-LABEL: func @binary_in_ -@import_global -def binary_in_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "In" {{.*}} : i64, i64 - return x in y - - -# CHECK-LABEL: func @binary_not_in_ -@import_global -def binary_not_in_(): - x = 1 - y = 2 - # CHECK: {{.*}} = basicpy.binary_compare {{.*}} "NotIn" {{.*}} : i64, i64 - return x not in y - - -@import_global -def short_circuit(): - # CHECK: %[[X:.*]] = constant 1 : i64 - # CHECK: %[[Y:.*]] = constant 2 : i64 - # CHECK: %[[Z:.*]] = constant 3 : i64 - # CHECK: %[[OMEGA:.*]] = constant 5 : i64 - x = 1 - y = 2 - z = 3 - omega = 5 - # CHECK: %[[FALSE:.*]] = basicpy.bool_constant false - # CHECK: %[[CMP0:.*]] = basicpy.binary_compare %[[X]] "Lt" %[[Y]] - # CHECK: %[[CMP0_CAST:.*]] = basicpy.bool_cast %[[CMP0]] : !basicpy.BoolType -> i1 - # CHECK: %[[IF0:.*]] = scf.if %[[CMP0_CAST]] -> (!basicpy.BoolType) { - # CHECK: %[[CMP1:.*]] = basicpy.binary_compare %[[Y]] "Eq" %[[Z]] - # CHECK: %[[CMP1_CAST:.*]] = basicpy.bool_cast %[[CMP1]] : !basicpy.BoolType -> i1 - # CHECK: %[[IF1:.*]] = scf.if %[[CMP1_CAST]] {{.*}} { - # CHECK: %[[CMP2:.*]] = basicpy.binary_compare %[[Z]] "GtE" %[[OMEGA]] - # CHECK: scf.yield %[[CMP2]] - # CHECK: } else { - # CHECK: scf.yield %[[FALSE]] - # CHECK: } - # CHECK: scf.yield %[[IF1]] - # CHECK: } else { - # CHECK: scf.yield %[[FALSE]] - # CHECK: } - # CHECK: %[[RESULT:.*]] = basicpy.unknown_cast %[[IF0]] - # CHECK: return %[[RESULT]] - return x < y == z >= omega - - -# CHECK-LABEL: nested_short_circuit_expression -@import_global -def nested_short_circuit_expression(): - x = 1 - y = 2 - z = 3 - # Verify that the (z + 5) gets nested into the if. - # CHECK: scf.if {{.*}} { - # CHECK-NEXT: constant 6 - # CHECK-NEXT: binary_expr - return x < y == (z + 6) diff --git a/test/Python/Compiler/Numpy/constants.py b/test/Python/Compiler/Numpy/constants.py deleted file mode 100644 index f58678d42..000000000 --- a/test/Python/Compiler/Numpy/constants.py +++ /dev/null @@ -1,88 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @integer_constants -@import_global -def integer_constants(): - # CHECK: %[[A:.*]] = constant 100 : i64 - a = 100 - # CHECK: %[[A_CAST:.*]] = basicpy.unknown_cast %[[A]] : i64 -> !basicpy.UnknownType - # CHECK: return %[[A_CAST]] - return a - - -# CHECK-LABEL: func @float_constants -@import_global -def float_constants(): - # CHECK: %[[A:.*]] = constant 2.200000e+00 : f64 - a = 2.2 - # CHECK: %[[A_CAST:.*]] = basicpy.unknown_cast %[[A]] : f64 -> !basicpy.UnknownType - # CHECK: return %[[A_CAST]] - return a - - -# CHECK-LABEL: func @bool_true_constant -@import_global -def bool_true_constant(): - # CHECK: %[[A:.*]] = basicpy.bool_constant true - # CHECK: basicpy.unknown_cast %[[A]] - a = True - return a - - -# CHECK-LABEL: func @bool_false_constant -@import_global -def bool_false_constant(): - # CHECK: %[[A:.*]] = basicpy.bool_constant false - # CHECK: basicpy.unknown_cast %[[A]] - a = False - return a - - -# CHECK-LABEL: func @string_constant -@import_global -def string_constant(): - # CHECK: %[[A:.*]] = basicpy.str_constant "foobar" - # CHECK: basicpy.unknown_cast %[[A]] - a = "foobar" - return a - - -# CHECK-LABEL: func @joined_string_constant -@import_global -def joined_string_constant(): - # CHECK: %[[A:.*]] = basicpy.str_constant "I am still here" - # CHECK: basicpy.unknown_cast %[[A]] - a = "I am" " still here" - return a - - -# CHECK-LABEL: func @bytes_constant -@import_global -def bytes_constant(): - # CHECK: %[[A:.*]] = basicpy.bytes_constant "foobar" - # CHECK: basicpy.unknown_cast %[[A]] - a = b"foobar" - return a - - -# CHECK-LABEL: func @ellipsis -@import_global -def ellipsis(): - # CHECK: %[[A:.*]] = basicpy.singleton : !basicpy.EllipsisType - # CHECK: basicpy.unknown_cast %[[A]] - a = ... - return a - - -# CHECK-LABEL: func @none_constant -@import_global -def none_constant(): - # CHECK: %[[A:.*]] = basicpy.singleton : !basicpy.NoneType - # CHECK: basicpy.unknown_cast %[[A]] - a = None - return a diff --git a/test/Python/Compiler/Numpy/constants32.py b/test/Python/Compiler/Numpy/constants32.py deleted file mode 100644 index 77107e752..000000000 --- a/test/Python/Compiler/Numpy/constants32.py +++ /dev/null @@ -1,25 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -# Subset of constant tests which verify against a GenericTarget32. - -from npcomp.compiler.numpy import test_config -from npcomp.compiler.numpy.target import * - -import_global = test_config.create_import_dump_decorator( - target_factory=GenericTarget32) - - -# CHECK-LABEL: func @integer_constants -@import_global -def integer_constants(): - # CHECK: %[[A:.*]] = constant 100 : i32 - a = 100 - return a - - -# CHECK-LABEL: func @float_constants -@import_global -def float_constants(): - # CHECK: %[[A:.*]] = constant 2.200000e+00 : f32 - a = 2.2 - return a diff --git a/test/Python/Compiler/Numpy/cpa_type_inference.py b/test/Python/Compiler/Numpy/cpa_type_inference.py deleted file mode 100644 index 0251e6304..000000000 --- a/test/Python/Compiler/Numpy/cpa_type_inference.py +++ /dev/null @@ -1,23 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file -npcomp-cpa-type-inference | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @arithmetic_expression -@import_global -def arithmetic_expression(): - return 1 + 2 - 3 * 4 - - -# CHECK-LABEL: func @arg_inference -@import_global -def arg_inference(a: int, b: int): - return a + 2 * b - - -# CHECK-LABEL: func @conditional_inference -@import_global -def conditional_inference(cond: int, a: bool, b: int): - return a if cond + 1 else not (b * 4) diff --git a/test/Python/Compiler/Numpy/ndarray_inference.py b/test/Python/Compiler/Numpy/ndarray_inference.py deleted file mode 100644 index e3754c8d7..000000000 --- a/test/Python/Compiler/Numpy/ndarray_inference.py +++ /dev/null @@ -1,26 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file -npcomp-cpa-type-inference -canonicalize | FileCheck %s --dump-input=fail - -import numpy as np -from npcomp.compiler.numpy import test_config -from npcomp.compiler.numpy.frontend import EmittedError - -import_global = test_config.create_import_dump_decorator() - -global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0], - (2, 1))) - -a = np.asarray([1.0, 2.0]) -b = np.asarray([3.0, 4.0]) - - -# Test the basic flow of invoking a ufunc call with constants captured from -# a global using explicit function syntax (np.add(a, b)). -# CHECK-LABEL: func @global_add -# CHECK-SAME: -> !numpy.ndarray<*:f64> -@import_global -def global_add(): - # CHECK-NOT: UnknownType - # CHECK: numpy.builtin_ufunc_call<"numpy.multiply"> ({{.*}}, {{.*}}) : (tensor<2xf64>, tensor<2xf64>) -> tensor<*xf64> - # CHECK: numpy.builtin_ufunc_call<"numpy.add"> ({{.*}}, {{.*}}) : (tensor<2xf64>, tensor<*xf64>) -> tensor<*xf64> - # CHECK-NOT: UnknownType - return np.add(a, np.multiply(a, b)) diff --git a/test/Python/Compiler/Numpy/partial_eval_getattr.py b/test/Python/Compiler/Numpy/partial_eval_getattr.py deleted file mode 100644 index c375ffdba..000000000 --- a/test/Python/Compiler/Numpy/partial_eval_getattr.py +++ /dev/null @@ -1,27 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -import collections -import math -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @module_constant -@import_global -def module_constant(): - # CHECK: constant 3.1415926535897931 : f64 - return math.pi - - -Sub = collections.namedtuple("Sub", "term") -Record = collections.namedtuple("Record", "fielda,fieldb,inner") -record = Record(5, 25, Sub(6)) - - -# CHECK-LABEL: func @namedtuple_attributes -@import_global -def namedtuple_attributes(): - # CHECK: constant 6 - # CHECK: constant 25 - return record.inner.term - record.fieldb diff --git a/test/Python/Compiler/Numpy/primitive_ops_to_std.py b/test/Python/Compiler/Numpy/primitive_ops_to_std.py deleted file mode 100644 index d3cd1c5b8..000000000 --- a/test/Python/Compiler/Numpy/primitive_ops_to_std.py +++ /dev/null @@ -1,261 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file -basicpy-type-inference -convert-basicpy-to-std | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - -################################################################################ -# Integer tests -################################################################################ - - -# CHECK-LABEL: func @int_add -@import_global -def int_add(a: int, b: int): - # CHECK: %0 = addi %arg0, %arg1 : i64 - return a + b - - -# CHECK-LABEL: func @int_sub -@import_global -def int_sub(a: int, b: int): - # CHECK: %0 = subi %arg0, %arg1 : i64 - return a - b - - -# CHECK-LABEL: func @int_mult -@import_global -def int_mult(a: int, b: int): - # CHECK: %0 = muli %arg0, %arg1 : i64 - return a * b - - -# CHECK-LABEL: func @int_floordiv -@import_global -def int_floordiv(a: int, b: int): - # CHECK: %0 = divi_signed %arg0, %arg1 : i64 - return a // b - - -# CHECK-LABEL: func @int_modulo -@import_global -def int_modulo(a: int, b: int): - # CHECK: %0 = remi_signed %arg0, %arg1 : i64 - return a % b - - -# CHECK-LABEL: func @int_left_shift -@import_global -def int_left_shift(a: int, b: int): - # CHECK: %0 = shift_left %arg0, %arg1 : i64 - return a << b - - -# CHECK-LABEL: func @int_right_shift -@import_global -def int_right_shift(a: int, b: int): - # CHECK: %0 = shift_right_signed %arg0, %arg1 : i64 - return a >> b - - -# CHECK-LABEL: func @int_and -@import_global -def int_and(a: int, b: int): - # CHECK: %0 = and %arg0, %arg1 : i64 - return a & b - - -# CHECK-LABEL: func @int_xor -@import_global -def int_xor(a: int, b: int): - # CHECK: %0 = xor %arg0, %arg1 : i64 - return a ^ b - - -# CHECK-LABEL: func @int_or -@import_global -def int_or(a: int, b: int): - # CHECK: %0 = or %arg0, %arg1 : i64 - return a | b - - -################################################################################ -# Floating point -################################################################################ - - -# CHECK-LABEL: func @float_add -@import_global -def float_add(a: float, b: float): - # CHECK: %0 = addf %arg0, %arg1 : f64 - return a + b - - -# CHECK-LABEL: func @float_sub -@import_global -def float_sub(a: float, b: float): - # CHECK: %0 = subf %arg0, %arg1 : f64 - return a - b - - -# CHECK-LABEL: func @float_mult -@import_global -def float_mult(a: float, b: float): - # CHECK: %0 = mulf %arg0, %arg1 : f64 - return a * b - - -# CHECK-LABEL: func @float_floordiv -@import_global -def float_floordiv(a: float, b: float): - # CHECK: %0 = divf %arg0, %arg1 : f64 - return a / b - - -################################################################################ -# Bool conversions -################################################################################ - - -# CHECK-LABEL: func @to_boolean -@import_global -def to_boolean(x: int): - # CHECK: %[[ZERO:.*]] = constant 0 : i64 - # CHECK: %[[BOOL:.*]] = cmpi ne, %arg0, %[[ZERO]] : i64 - # CHECK: select %[[BOOL]] - # Note that the not operator is just used to force a bool conversion. - return not x - - -# CHECK-LABEL: func @to_boolean_float -@import_global -def to_boolean_float(x: float): - # CHECK: %[[ZERO:.*]] = constant 0.000000e+00 : f64 - # CHECK: %[[BOOL:.*]] = cmpf one, %arg0, %[[ZERO]] : f64 - # CHECK: select %[[BOOL]] - # Note that the not operator is just used to force a bool conversion. - return not x - - -################################################################################ -# Integer comparisons -################################################################################ - - -# CHECK-LABEL: func @int_lt_ -@import_global -def int_lt_(x: int, y: int): - # CHECK: %[[CMP:.*]] = cmpi slt, %arg0, %arg1 : i64 - # CHECK: %{{.*}} = basicpy.bool_cast %[[CMP]] : i1 -> !basicpy.BoolType - return x < y - - -# CHECK-LABEL: func @int_gt_ -@import_global -def int_gt_(x: int, y: int): - # CHECK: cmpi sgt - return x > y - - -# CHECK-LABEL: func @int_lte_ -@import_global -def int_lte_(x: int, y: int): - # CHECK: cmpi sle - return x <= y - - -# CHECK-LABEL: func @int_gte_ -@import_global -def int_gte_(x: int, y: int): - # CHECK: cmpi sge - return x >= y - - -# CHECK-LABEL: func @int_eq_ -@import_global -def int_eq_(x: int, y: int): - # CHECK: cmpi eq - return x == y - - -# CHECK-LABEL: func @int_neq_ -@import_global -def int_neq_(x: int, y: int): - # CHECK: cmpi ne - return x != y - - -# CHECK-LABEL: func @int_is_ -@import_global -def int_is_(x: int, y: int): - # CHECK: cmpi eq - return x is y - - -# CHECK-LABEL: func @int_is_not_ -@import_global -def int_is_not_(x: int, y: int): - # CHECK: cmpi ne - return x is not y - - -################################################################################ -# Float comparisons -################################################################################ - - -# CHECK-LABEL: func @float_lt_ -@import_global -def float_lt_(x: float, y: float): - # CHECK: %[[CMP:.*]] = cmpf olt, %arg0, %arg1 : f64 - # CHECK: %{{.*}} = basicpy.bool_cast %[[CMP]] : i1 -> !basicpy.BoolType - return x < y - - -# CHECK-LABEL: func @float_gt_ -@import_global -def float_gt_(x: float, y: float): - # CHECK: cmpf ogt - return x > y - - -# CHECK-LABEL: func @float_lte_ -@import_global -def float_lte_(x: float, y: float): - # CHECK: cmpf ole - return x <= y - - -# CHECK-LABEL: func @float_gte_ -@import_global -def float_gte_(x: float, y: float): - # CHECK: cmpf oge - return x >= y - - -# CHECK-LABEL: func @float_eq_ -@import_global -def float_eq_(x: float, y: float): - # CHECK: cmpf oeq - return x == y - - -# CHECK-LABEL: func @float_neq_ -@import_global -def float_neq_(x: float, y: float): - # CHECK: cmpf one - return x != y - - -# CHECK-LABEL: func @float_is_ -@import_global -def float_is_(x: float, y: float): - # CHECK: cmpf oeq - return x is y - - -# CHECK-LABEL: func @float_is_not_ -@import_global -def float_is_not_(x: float, y: float): - # CHECK: cmpf one - return x is not y diff --git a/test/Python/Compiler/Numpy/resolve_const.py b/test/Python/Compiler/Numpy/resolve_const.py deleted file mode 100644 index edf02eca4..000000000 --- a/test/Python/Compiler/Numpy/resolve_const.py +++ /dev/null @@ -1,30 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail -"""Module docstring.""" - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - -OUTER_ONE = 1 -OUTER_STRING = "Hello" - - -# CHECK-LABEL: func @global_int -@import_global -def global_int(): - # CHECK: constant 1 : i64 - return OUTER_ONE - - -# CHECK-LABEL: func @module_string -@import_global -def module_string(): - # CHECK: basicpy.str_constant "Hello" - return OUTER_STRING - - -# CHECK-LABEL: func @builtin_debug -@import_global -def builtin_debug(): - # CHECK: basicpy.bool_constant - return __debug__ diff --git a/test/Python/Compiler/Numpy/structure.py b/test/Python/Compiler/Numpy/structure.py deleted file mode 100644 index 5664c76d8..000000000 --- a/test/Python/Compiler/Numpy/structure.py +++ /dev/null @@ -1,35 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @positional_args -# CHECK-SAME: (%arg0: !basicpy.UnknownType, %arg1: !basicpy.UnknownType) -> !basicpy.UnknownType -@import_global -def positional_args(a, b): - # CHECK: basicpy.binary_expr %arg0 "Add" %arg1 - return a + b - - -# CHECK-LABEL: func @pass_no_return -@import_global -def pass_no_return(): - # CHECK: %[[NONE:.*]] = basicpy.singleton : !basicpy.NoneType - # CHECK: %[[NONE_CAST:.*]] = basicpy.unknown_cast %[[NONE]] : !basicpy.NoneType -> !basicpy.UnknownType - # CHECK: return %[[NONE_CAST]] - # CHECK-NOT: return - pass - - -# CHECK-LABEL: func @expr_statement -@import_global -def expr_statement(): - # CHECK: basicpy.exec { - # CHECK: %[[V:.*]] = basicpy.binary_expr - # CHECK: basicpy.exec_discard %[[V]] - # CHECK: } - # CHECK: return - a = 1 - a + 2 diff --git a/test/Python/Compiler/Numpy/template_call.py b/test/Python/Compiler/Numpy/template_call.py deleted file mode 100644 index 539bf8d47..000000000 --- a/test/Python/Compiler/Numpy/template_call.py +++ /dev/null @@ -1,22 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -import math -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @call_ceil_positional -@import_global -def call_ceil_positional(n): - # CHECK: basicpy.func_template_call @__global$math.ceil(%arg0) kw [] : (!basicpy.UnknownType) -> !basicpy.UnknownType - return math.ceil(n) - - -# CHECK-LABEL: func @call_isclose_kw -@import_global -def call_isclose_kw(n): - # CHECK-DAG: %[[RTOL:.*]] = constant 2.000000e-06 - # CHECK-DAG: %[[ABSTOL:.*]] = constant 2.000000e-01 - # CHECK: basicpy.func_template_call @__global$math.isclose(%arg0, %[[RTOL]], %[[ABSTOL]]) kw ["rtol", "abs_tol"] : (!basicpy.UnknownType, f64, f64) -> !basicpy.UnknownType - return math.isclose(n, rtol=2e-6, abs_tol=0.2) diff --git a/test/Python/Compiler/Numpy/type_inference.py b/test/Python/Compiler/Numpy/type_inference.py deleted file mode 100644 index 0585c798d..000000000 --- a/test/Python/Compiler/Numpy/type_inference.py +++ /dev/null @@ -1,36 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file -basicpy-type-inference | FileCheck %s --dump-input=fail - -from npcomp.compiler.numpy import test_config - -import_global = test_config.create_import_dump_decorator() - - -# CHECK-LABEL: func @arithmetic_expression -# CHECK-SAME: () -> i64 -@import_global -def arithmetic_expression(): - # CHECK: basicpy.binary_expr{{.*}} : (i64, i64) -> i64 - # CHECK: basicpy.binary_expr{{.*}} : (i64, i64) -> i64 - # CHECK: basicpy.binary_expr{{.*}} : (i64, i64) -> i64 - # CHECK: basicpy.unknown_cast{{.*}} : i64 -> i64 - # CHECK: return{{.*}} : i64 - return 1 + 2 - 3 * 4 - - -# CHECK-LABEL: func @arg_inference -# CHECK-SAME: (%arg0: i64, %arg1: i64) -> i64 -@import_global -def arg_inference(a, b): - # CHECK: basicpy.binary_expr{{.*}} : (i64, i64) -> i64 - # CHECK: basicpy.binary_expr{{.*}} : (i64, i64) -> i64 - # CHECK: basicpy.unknown_cast{{.*}} : i64 -> i64 - # CHECK: return{{.*}} : i64 - return a + 2 * b - - -# CHECK-LABEL: func @conditional_inference -# CHECK-SAME: (%arg0: i64, %arg1: !basicpy.BoolType, %arg2: i64) -> !basicpy.BoolType -@import_global -def conditional_inference(cond, a, b): - # CHECK-NOT: UnknownType - return a if cond + 1 else not (b * 4) diff --git a/test/Python/Compiler/Numpy/ufunc.py b/test/Python/Compiler/Numpy/ufunc.py deleted file mode 100644 index 8e35e67e6..000000000 --- a/test/Python/Compiler/Numpy/ufunc.py +++ /dev/null @@ -1,40 +0,0 @@ -# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail - -import numpy as np -from npcomp.compiler.numpy import test_config -from npcomp.compiler.numpy.frontend import EmittedError - -import_global = test_config.create_import_dump_decorator() - -global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0], - (2, 1))) - -a = np.asarray([1.0, 2.0]) -b = np.asarray([3.0, 4.0]) - - -# Test the basic flow of invoking a ufunc call with constants captured from -# a global using explicit function syntax (np.add(a, b)). -# CHECK-LABEL: func @global_add -@import_global -def global_add(): - # CHECK-DAG: %[[CST_A_TENSOR:.*]] = constant dense<[1.000000e+00, 2.000000e+00]> - # CHECK-DAG: %[[CST_B_TENSOR:.*]] = constant dense<[3.000000e+00, 4.000000e+00]> - # CHECK-DAG: %[[A_ARRAY:.*]] = numpy.create_array_from_tensor %[[CST_A_TENSOR]] - # CHECK-DAG: %[[B_ARRAY:.*]] = numpy.create_array_from_tensor %[[CST_B_TENSOR]] - # CHECK-DAG: %[[A:.*]] = numpy.copy_to_tensor %[[A_ARRAY]] - # CHECK-DAG: %[[B:.*]] = numpy.copy_to_tensor %[[B_ARRAY]] - # CHECK: %[[R_TENSOR:.*]] = numpy.builtin_ufunc_call<"numpy.add"> (%[[A]], %[[B]]) : (tensor<2xf64>, tensor<2xf64>) -> tensor<*x!basicpy.UnknownType> - # CHECK: numpy.create_array_from_tensor %[[R_TENSOR]] : (tensor<*x!basicpy.UnknownType>) -> !numpy.ndarray<*:?> - return np.add(a, b) - - -@import_global(expect_error="ufunc call does not currently support keyword args" - ) -def keywords_not_supported(): - return np.add(a, b, out=b) - - -@import_global(expect_error="ufunc numpy.add expected 2 inputs but got 1") -def mismatched_arg_count(): - return np.add(a) diff --git a/test/Python/doctests.py b/test/Python/doctests.py deleted file mode 100644 index 71d98ba0c..000000000 --- a/test/Python/doctests.py +++ /dev/null @@ -1,32 +0,0 @@ -# RUN: %PYTHON %s - -import os -os.environ["NUMPY_EXPERIMENTAL_ARRAY_FUNCTION"] = "1" - -import traceback - - -def run_doctest(mod): - print("\n\nTESTING:", mod) - print("--------") - import doctest - import sys - import importlib - try: - m = importlib.import_module(mod) - except: - print("ERROR IMPORTING MODULE:", mod) - sys.exit(1) - fc, _ = doctest.testmod(m) - if fc: - sys.exit(1) - - -TEST_MODULES = ( - "npcomp.compiler.numpy.py_value_utils", - "npcomp.types", - "npcomp.exporter", -) - -for mname in TEST_MODULES: - run_doctest(mname) diff --git a/test/lit.cfg.py b/test/lit.cfg.py index f89756c6f..d91ef0c96 100644 --- a/test/lit.cfg.py +++ b/test/lit.cfg.py @@ -68,7 +68,6 @@ tool_dirs = [ tools = [ 'npcomp-opt', 'refback-run', - 'npcomp-capi-ir-test', ToolSubst('%npcomp_runtime_shlib', config.npcomp_runtime_shlib), ]