mirror of https://github.com/llvm/torch-mlir
Rework the python build to a static assembly of MLIR+NPCOMP (#251)
* Adapt to python build system updates. * Bump llvm to 310c9496d80961188e8d8f8ad306cdf44bd7541f (includes python build updates) * Adds refback C-API. * Re-layers all python builds. * Rework CI.pull/255/head
parent
2ecbcbf8c7
commit
2dbab50444
|
@ -2,104 +2,44 @@ name: Build and Test
|
|||
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
LLVM: external/llvm-project
|
||||
jobs:
|
||||
build-llvm:
|
||||
name: Build LLVM
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Configure Environment
|
||||
run: echo "$GITHUB_WORKSPACE/${LLVM}/install/bin" >> $GITHUB_PATH
|
||||
- name: Get npcomp
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install python depends
|
||||
run: |
|
||||
python3 -m pip install pytest pybind11 numpy
|
||||
- name: Get LLVM Hash
|
||||
id: get-llvm-hash
|
||||
run: echo "::set-output name=hash::$(git submodule status)"
|
||||
shell: bash
|
||||
- name: Get workflow spec hash
|
||||
id: get-workflow-hash
|
||||
run: echo "::set-output name=hash::$(md5sum $GITHUB_WORKSPACE/.github/workflows/buildAndTest.yml)"
|
||||
shell: bash
|
||||
- name: Cache LLVM
|
||||
id: cache-llvm
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.LLVM }}
|
||||
key: ${{ runner.os }}-llvm-20.04-install-${{ steps.get-llvm-hash.outputs.hash }}-${{ steps.get-workflow-hash.outputs.hash }}
|
||||
- name: Rebuild and Install LLVM
|
||||
if: steps.cache-llvm.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
mkdir -p ${LLVM}/build
|
||||
mkdir -p ${LLVM}/install
|
||||
cd ${LLVM}/build
|
||||
cmake ../llvm -DLLVM_BUILD_EXAMPLES=OFF -DLLVM_TARGETS_TO_BUILD="host" -DCMAKE_INSTALL_PREFIX=../install -DLLVM_ENABLE_PROJECTS='mlir' -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_ENABLE_OCAMLDOC=OFF -DLLVM_ENABLE_BINDINGS=OFF -DLLVM_INSTALL_UTILS=ON -DLLVM_BUILD_TOOLS=ON -DLLVM_INCLUDE_TESTS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_LINK_LLVM_DYLIB=ON -DMLIR_ENABLE_BINDINGS_PYTHON=ON
|
||||
cmake --build . --target install -- -j$(nproc)
|
||||
build:
|
||||
name: Build and Test
|
||||
needs: build-llvm
|
||||
name: Build and Test (Release Asserts)
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Configure Environment
|
||||
run: echo "$GITHUB_WORKSPACE/${LLVM}/install/bin" >> $GITHUB_PATH
|
||||
- name: Get npcomp
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install python depends
|
||||
run: |
|
||||
python3 -m pip install pytest pybind11 numpy
|
||||
python3 -m pip install -r $GITHUB_WORKSPACE/external/llvm-project/mlir/python/requirements.txt
|
||||
- name: Install pytorch_nightly depends
|
||||
run: |
|
||||
python3 -m pip install --pre 'torch==1.10.0.dev20210630+cpu' 'torchvision==0.11.0.dev20210630+cpu' -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
|
||||
- name: Install IREE snapshot depends
|
||||
run: |
|
||||
python3 -m pip install iree-compiler-snapshot iree-runtime-snapshot -f https://github.com/google/iree/releases
|
||||
- name: Get LLVM Hash
|
||||
id: get-llvm-hash
|
||||
run: echo "::set-output name=hash::$(git submodule status)"
|
||||
- name: Install Ninja
|
||||
uses: llvm/actions/install-ninja@55d844821959226fab4911f96f37071c1d4c3268
|
||||
- name: Get Submodule Hash
|
||||
id: get-submodule-hash
|
||||
run: echo "::set-output name=hash::$(md5sum $(git submodule status))"
|
||||
shell: bash
|
||||
- name: Get workflow spec hash
|
||||
id: get-workflow-hash
|
||||
run: echo "::set-output name=hash::$(md5sum $GITHUB_WORKSPACE/.github/workflows/buildAndTest.yml)"
|
||||
shell: bash
|
||||
- name: Cache LLVM
|
||||
id: cache-llvm
|
||||
uses: actions/cache@v2
|
||||
- name: Ccache for C++ compilation
|
||||
uses: hendrikmuhs/ccache-action@4687d037e4d7cf725512d9b819137a3af34d39b3
|
||||
with:
|
||||
path: ${{ env.LLVM }}
|
||||
key: ${{ runner.os }}-llvm-20.04-install-${{ steps.get-llvm-hash.outputs.hash }}-${{ steps.get-workflow-hash.outputs.hash }}
|
||||
- name: Rebuild and Install LLVM
|
||||
if: steps.cache-llvm.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
mkdir ${LLVM}/build
|
||||
mkdir ${LLVM}/install
|
||||
cd ${LLVM}/build
|
||||
cmake ../llvm -DLLVM_BUILD_EXAMPLES=OFF -DLLVM_TARGETS_TO_BUILD="host" -DCMAKE_INSTALL_PREFIX=../install -DLLVM_ENABLE_PROJECTS='mlir' -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_ENABLE_OCAMLDOC=OFF -DLLVM_ENABLE_BINDINGS=OFF -DLLVM_INSTALL_UTILS=ON -DLLVM_BUILD_TOOLS=ON -DLLVM_INCLUDE_TESTS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_LINK_LLVM_DYLIB=ON -DMLIR_ENABLE_BINDINGS_PYTHON=ON
|
||||
cmake --build . --target install -- -j$(nproc)
|
||||
key: ${{ runner.os }}-${{ steps.get-submodule-hash.outputs.hash }}
|
||||
- name: Build and Test npcomp (Assert)
|
||||
run: |
|
||||
mkdir build_assert
|
||||
cd build_assert
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Debug -DLLVM_ENABLE_ASSERTIONS=ON -DMLIR_DIR=../${LLVM}/install/lib/cmake/mlir/ -DLLVM_DIR=../${LLVM}/install/lib/cmake/llvm/ -DCMAKE_LINKER=lld -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DPYTHON_EXECUTABLE=/usr/bin/python3 -DLLVM_EXTERNAL_LIT=`pwd`/../${LLVM}/build/bin/llvm-lit
|
||||
make check-npcomp check-frontends-pytorch -j$(nproc)
|
||||
- name: Build and Test npcomp (Release)
|
||||
run: |
|
||||
mkdir build_release
|
||||
cd build_release
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=OFF -DMLIR_DIR=../${LLVM}/install/lib/cmake/mlir/ -DLLVM_DIR=../${LLVM}/install/lib/cmake/llvm/ -DCMAKE_LINKER=lld -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DPYTHON_EXECUTABLE=/usr/bin/python3 -DLLVM_EXTERNAL_LIT=`pwd`/../${LLVM}/build/bin/llvm-lit
|
||||
make check-npcomp check-frontends-pytorch -j$(nproc)
|
||||
- name: Set up .env file.
|
||||
run: |
|
||||
echo "PYTHONPATH=\"$(realpath build_release/python):$(realpath ${LLVM}/install/python)\"" >.env
|
||||
- name: Run RefBackend E2E Tests
|
||||
run: |
|
||||
tools/torchscript_e2e_test.sh --config=refbackend
|
||||
- name: Run IREE E2E Tests
|
||||
run: |
|
||||
tools/torchscript_e2e_test.sh --config=iree
|
||||
cd $GITHUB_WORKSPACE
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_LINKER=lld \
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
||||
-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DPython3_EXECUTABLE=/usr/bin/python3 \
|
||||
-DLLVM_ENABLE_ASSERTIONS=ON \
|
||||
-DLLVM_TARGETS_TO_BUILD=host \
|
||||
-DNPCOMP_ENABLE_PYTORCH=ON
|
||||
ninja
|
||||
ninja check-npcomp check-frontends-pytorch
|
||||
|
|
149
CMakeLists.txt
149
CMakeLists.txt
|
@ -1,4 +1,4 @@
|
|||
cmake_minimum_required(VERSION 3.12)
|
||||
cmake_minimum_required(VERSION 3.13.4)
|
||||
|
||||
if(POLICY CMP0068)
|
||||
cmake_policy(SET CMP0068 NEW)
|
||||
|
@ -16,12 +16,11 @@ endif()
|
|||
#-------------------------------------------------------------------------------
|
||||
# Options and settings
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
set(NPCOMP_MINIMUM_PYTHON_VERSION 3.6)
|
||||
option(NPCOMP_ENABLE_IREE "Enables the IREE backend (must configure location via IREE_DIR)." OFF)
|
||||
option(NPCOMP_ENABLE_REFJIT "Enables the reference JIT backend." ON)
|
||||
option(NPCOMP_BUILD_NPCOMP_DYLIB "Enables shared build of NPCOMP dylib (depends on LLVM/MLIR dylib support)" ON)
|
||||
set(NPCOMP_IREE_BUILDDIR "../iree-build" CACHE STRING "If building IREE, then setting this elects to build from a source directory (versus installed package)")
|
||||
set(NPCOMP_ENABLE_PYTORCH "OPTIONAL" CACHE STRING "Enables the PyTorch frontend (OFF, OPTIONAL, REQUIRED)")
|
||||
option(NPCOMP_ENABLE_PYTORCH "Enables PyTorch integration." OFF)
|
||||
|
||||
# Turn on -gsplit-dwarf if requested in debug builds.
|
||||
if (NPCOMP_USE_SPLIT_DWARF AND
|
||||
|
@ -48,7 +47,8 @@ endif()
|
|||
|
||||
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
||||
# If we are not building as a part of LLVM, build NPCOMP as a standalone
|
||||
# project, using LLVM as an external library.
|
||||
# project, including LLVM as a subdirectory. This gives us the most control
|
||||
# and is used for standalone releases.
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Project setup and globals
|
||||
|
@ -59,16 +59,84 @@ if( CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR )
|
|||
set(CMAKE_CXX_STANDARD 14)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# MLIR/LLVM Configuration
|
||||
# Default and required options.
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
find_package(MLIR REQUIRED CONFIG)
|
||||
message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}")
|
||||
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
|
||||
# CMake library generation settings.
|
||||
set(BUILD_SHARED_LIBS OFF CACHE BOOL "We are actually building a static mondo-lib")
|
||||
set(CMAKE_PLATFORM_NO_VERSIONED_SONAME ON CACHE BOOL "Python soname linked libraries are bad")
|
||||
set(CMAKE_VISIBILITY_INLINES_HIDDEN ON CACHE BOOL "Hide inlines")
|
||||
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
|
||||
|
||||
#set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
|
||||
# Improved LLVM defaults for a standalone build.
|
||||
set(LLVM_ENABLE_PROJECTS mlir CACHE STRING "LLVM projects")
|
||||
set(LLVM_ENABLE_Z3_SOLVER OFF CACHE BOOL "Disable Z3")
|
||||
set(LLVM_ENABLE_ZLIB OFF CACHE BOOL "Disable ZLIB")
|
||||
set(LLVM_TARGETS_TO_BUILD "host" CACHE STRING "Only build for the host")
|
||||
set(LLVM_INCLUDE_EXAMPLES OFF CACHE BOOL "Disable examples")
|
||||
# TODO: MLIR is a "tool"
|
||||
set(LLVM_INCLUDE_TOOLS ON CACHE BOOL "Disable tools")
|
||||
set(LLVM_INCLUDE_TESTS ON CACHE BOOL "Disable tests")
|
||||
set(MLIR_BINDINGS_PYTHON_LOCK_VERSION ON CACHE BOOL "Link against libpython for development (should be disabled for production)")
|
||||
|
||||
# Required LLVM settings.
|
||||
set(MLIR_ENABLE_BINDINGS_PYTHON ON CACHE BOOL "Enable MLIR python bindings" FORCE)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# MLIR/LLVM Build Setup
|
||||
# TODO: It would be nice to have a better setup than this for sub including
|
||||
# MLIR.
|
||||
#-------------------------------------------------------------------------------
|
||||
if(NOT LLVM_MAIN_SRC_DIR)
|
||||
set(LLVM_MAIN_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/llvm-project/llvm")
|
||||
endif()
|
||||
if(NOT MLIR_MAIN_SRC_DIR)
|
||||
set(MLIR_MAIN_SRC_DIR "${LLVM_MAIN_SRC_DIR}/../mlir")
|
||||
endif()
|
||||
|
||||
set(LLVM_MAIN_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/llvm")
|
||||
set(MLIR_MAIN_BINARY_DIR "${LLVM_MAIN_BINARY_DIR}/tools/mlir")
|
||||
|
||||
set(LLVM_INCLUDE_DIR "${LLVM_MAIN_SRC_DIR}/include")
|
||||
set(LLVM_GENERATED_INCLUDE_DIR "${LLVM_MAIN_BINARY_DIR}/include")
|
||||
set(MLIR_INCLUDE_DIR "${MLIR_MAIN_SRC_DIR}/include")
|
||||
set(MLIR_GENERATED_INCLUDE_DIR "${MLIR_MAIN_BINARY_DIR}/include")
|
||||
set(MLIR_TABLEGEN_EXE "$<TARGET_FILE:mlir-tblgen>")
|
||||
include_directories(SYSTEM "${LLVM_INCLUDE_DIR}")
|
||||
include_directories(SYSTEM "${LLVM_GENERATED_INCLUDE_DIR}")
|
||||
include_directories(SYSTEM "${MLIR_INCLUDE_DIR}")
|
||||
include_directories(SYSTEM "${MLIR_GENERATED_INCLUDE_DIR}")
|
||||
list(APPEND CMAKE_MODULE_PATH "${MLIR_MAIN_SRC_DIR}/cmake/modules")
|
||||
list(APPEND CMAKE_MODULE_PATH "${LLVM_MAIN_SRC_DIR}/cmake/modules")
|
||||
|
||||
# Pre-configure the Python environment using the MLIR macros so that they
|
||||
# are in scope and subsequent include of LLVM will match them.
|
||||
include(MLIRDetectPythonEnv)
|
||||
find_package(Python3 ${NPCOMP_MINIMUM_PYTHON_VERSION}
|
||||
COMPONENTS Interpreter Development NumPy REQUIRED)
|
||||
message(STATUS "Found python include dirs: ${Python3_INCLUDE_DIRS}")
|
||||
message(STATUS "Found python libraries: ${Python3_LIBRARIES}")
|
||||
mlir_detect_pybind11_install()
|
||||
find_package(pybind11 2.6 CONFIG REQUIRED)
|
||||
message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIR}")
|
||||
message(STATUS "Python prefix = '${PYTHON_MODULE_PREFIX}', "
|
||||
"suffix = '${PYTHON_MODULE_SUFFIX}', "
|
||||
"extension = '${PYTHON_MODULE_EXTENSION}")
|
||||
|
||||
# LLVM configuration.
|
||||
message(STATUS "*** ADDING LLVM ***")
|
||||
add_subdirectory(
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/external/llvm-project/llvm"
|
||||
"${LLVM_MAIN_BINARY_DIR}"
|
||||
EXCLUDE_FROM_ALL)
|
||||
message(STATUS "*** LLVM DONE ***")
|
||||
|
||||
set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
|
||||
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib)
|
||||
|
||||
set(LLVM_EXTERNAL_LIT "${LLVM_MAIN_BINARY_DIR}/bin/llvm-lit")
|
||||
set(LLVM_TOOLS_DIR "${LLVM_MAIN_BINARY_DIR}/bin")
|
||||
|
||||
# Define the default arguments to use with 'lit', and an option for the user to
|
||||
# override.
|
||||
set(LIT_ARGS_DEFAULT "-sv")
|
||||
|
@ -77,33 +145,29 @@ if( CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR )
|
|||
endif()
|
||||
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")
|
||||
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
|
||||
include(TableGen)
|
||||
include(AddLLVM)
|
||||
include(AddMLIR)
|
||||
include(AddMLIRPython)
|
||||
include(HandleLLVMOptions)
|
||||
|
||||
set(NPCOMP_BUILT_STANDALONE 1)
|
||||
set(BACKEND_PACKAGE_STRING "LLVM ${LLVM_PACKAGE_VERSION}")
|
||||
else()
|
||||
# TODO: RE-ENABLE EXTERNAL UNIFIED BUILD
|
||||
message(FATAL_ERROR "External project builds of npcomp are currently not available")
|
||||
# Otherwise, we are building as a part of LLVM, and we need to set up some
|
||||
# variables and includes.
|
||||
# set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root
|
||||
# set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir
|
||||
# set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include)
|
||||
# set(MLIR_TABLEGEN_EXE $<TARGET_FILE:mlir-tblgen>)
|
||||
# include_directories(SYSTEM ${MLIR_INCLUDE_DIR})
|
||||
# include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR})
|
||||
|
||||
set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root
|
||||
set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir
|
||||
set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include)
|
||||
set(MLIR_TABLEGEN_EXE $<TARGET_FILE:mlir-tblgen>)
|
||||
include_directories(SYSTEM ${MLIR_INCLUDE_DIR})
|
||||
include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR})
|
||||
|
||||
set(BACKEND_PACKAGE_STRING "${PACKAGE_STRING}")
|
||||
# set(BACKEND_PACKAGE_STRING "${PACKAGE_STRING}")
|
||||
endif()
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
|
||||
include(AddNPCOMP)
|
||||
include(NpcompDetectPythonEnv)
|
||||
include(ConfigurePyTorch)
|
||||
include_directories(${LLVM_INCLUDE_DIRS})
|
||||
include_directories(${MLIR_INCLUDE_DIRS})
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||
|
@ -157,40 +221,13 @@ if(NPCOMP_ENABLE_IREE)
|
|||
symlink_iree(bindings/python/pyiree/rt python/pyiree/rt)
|
||||
endif()
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Python Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
set(NPCOMP_PYTHON_BINDINGS_VERSION_LOCKED 1 CACHE BOOL
|
||||
"Links to specific python libraries, resolving all symbols.")
|
||||
find_package(Python3 COMPONENTS Interpreter Development REQUIRED)
|
||||
message(STATUS "Found python include dirs: ${Python3_INCLUDE_DIRS}")
|
||||
message(STATUS "Found python libraries: ${Python3_LIBRARIES}")
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Pytorch Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
NpcompFindPyTorch(${NPCOMP_ENABLE_PYTORCH})
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Pybind11 Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
npcomp_detect_pybind11_install()
|
||||
find_package(pybind11 2.6 CONFIG REQUIRED)
|
||||
set(pybind11_INCLUDE_DIR ${pybind11_INCLUDE_DIR})
|
||||
message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIR}")
|
||||
message(STATUS "Python prefix = '${PYTHON_MODULE_PREFIX}', "
|
||||
"suffix = '${PYTHON_MODULE_SUFFIX}', "
|
||||
"extension = '${PYTHON_MODULE_EXTENSION}")
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Directory setup
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
set(MLIR_NPCOMP_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
set(MLIR_NPCOMP_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
|
||||
set(MLIR_NPCOMP_PYTHON_PACKAGES_DIR ${CMAKE_CURRENT_BINARY_DIR}/python_packages)
|
||||
|
||||
add_custom_target(check-npcomp)
|
||||
add_custom_target(check-npcomp-all)
|
||||
|
@ -200,13 +237,9 @@ add_subdirectory(include/npcomp)
|
|||
add_subdirectory(lib)
|
||||
add_subdirectory(python)
|
||||
add_subdirectory(test)
|
||||
|
||||
# Tools needs to come late to ensure that NPCOMP_ALL_LIBS is populated.
|
||||
# Generally things after this point may depend on NPCOMP_ALL_LIBS or libNPCOMP.so.
|
||||
add_subdirectory(tools)
|
||||
|
||||
if(${TORCH_FOUND})
|
||||
if(NPCOMP_ENABLE_PYTORCH)
|
||||
message(STATUS "Adding PyTorch frontent support...")
|
||||
add_subdirectory(frontends/pytorch)
|
||||
else()
|
||||
message("Skipping pytorch frontend, because PyTorch not found!")
|
||||
endif()
|
||||
|
|
24
README.md
24
README.md
|
@ -101,16 +101,18 @@ LLVM_VERSION=10
|
|||
export CC=clang-$LLVM_VERSION
|
||||
export CXX=clang++-$LLVM_VERSION
|
||||
export LDFLAGS=-fuse-ld=$(which ld.lld-$LLVM_VERSION)
|
||||
|
||||
# Build and install LLVM/MLIR into the ./install-mlir directory
|
||||
./build_tools/install_mlir.sh
|
||||
```
|
||||
|
||||
### Vanilla - numpy-only, no pytorch
|
||||
|
||||
```shell
|
||||
# Follow common prep above.
|
||||
./build_tools/cmake_configure.sh
|
||||
# Install PyTorch. We currently track and require the nighly build.
|
||||
# If a usable PyTorch package is installed, the default cmake settings will
|
||||
# enable the PyTorch frontend.
|
||||
pip3 install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
|
||||
|
||||
# Configure npcomp.
|
||||
cmake -GNinja -Bbuild -DCMAKE_BUILD_TYPE=Release .
|
||||
|
||||
# Build and run tests
|
||||
# ./build_tools/test_all.sh runs all of these commands.
|
||||
|
@ -123,14 +125,11 @@ ninja check-npcomp
|
|||
source .env
|
||||
```
|
||||
|
||||
### PyTorch Frontend
|
||||
### With PyTorch integration
|
||||
|
||||
```shell
|
||||
# Install PyTorch. We currently track and require the nighly build.
|
||||
pip3 install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
|
||||
# Build/test npcomp.
|
||||
./build_tools/cmake_configure.sh
|
||||
cmake --build build --target check-npcomp check-frontends-pytorch
|
||||
cmake -DNPCOMP_ENABLE_PYTORCH=ON ...
|
||||
ninja check-frontends-pytorch # If building with PyTorch
|
||||
```
|
||||
|
||||
### PyTorch Frontend (via docker container)
|
||||
|
@ -159,8 +158,7 @@ Build/test npcomp (from within docker image):
|
|||
```shell
|
||||
# From within the docker image.
|
||||
cd /src/mlir-npcomp
|
||||
./build_tools/install_mlir.sh
|
||||
./build_tools/cmake_configure.sh
|
||||
cmake -GNinja -Bbuild -DCMAKE_BUILD_TYPE=Release -DNPCOMP_ENABLE_PYTORCH=ON .
|
||||
cmake --build /build/npcomp --target check-npcomp check-frontends-pytorch
|
||||
```
|
||||
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Configures the project with default options.
|
||||
# LLVM/MLIR should be installed into the build directory first by running
|
||||
# ./build_tools/install_mlir.sh.
|
||||
#
|
||||
# Usage (for in-tree build/ directory):
|
||||
# ./build_tools/cmake_configure.sh [ARGS...]
|
||||
# For arbitrary build/install directories, set the env variables:
|
||||
# - NPCOMP_BUILD_DIR
|
||||
# - LLVM_BUILD_DIR
|
||||
# - LLVM_INSTALL_DIR
|
||||
set -e
|
||||
|
||||
portable_realpath() {
|
||||
# Create the directory if needed so that the `cd` doesn't fail.
|
||||
mkdir -p $1 && cd $1 && pwd
|
||||
}
|
||||
|
||||
# Setup directories.
|
||||
td="$(portable_realpath $(dirname $0)/..)"
|
||||
build_dir="$(portable_realpath "${NPCOMP_BUILD_DIR:-$td/build}")"
|
||||
build_mlir="${LLVM_BUILD_DIR-$build_dir/build-mlir}"
|
||||
install_mlir="${LLVM_INSTALL_DIR-$build_dir/install-mlir}"
|
||||
declare -a extra_opts
|
||||
|
||||
if ! [ -d "$install_mlir/include/mlir" ]; then
|
||||
echo "MLIR install path does not appear valid: $install_mlir"
|
||||
exit 1
|
||||
fi
|
||||
mkdir -p "$build_dir"
|
||||
|
||||
# Make sure we are using python3.
|
||||
function probe_python() {
|
||||
local python_exe="$1"
|
||||
local found
|
||||
local command
|
||||
command="import sys
|
||||
if sys.version_info.major >= 3: print(sys.executable)"
|
||||
set +e
|
||||
found="$("$python_exe" -c "$command")"
|
||||
if ! [ -z "$found" ]; then
|
||||
echo "$found"
|
||||
fi
|
||||
}
|
||||
|
||||
python_exe=""
|
||||
for python_candidate in python3 python; do
|
||||
python_exe="$(probe_python "$python_candidate")"
|
||||
if ! [ -z "$python_exe" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Using python: $python_exe"
|
||||
if [ -z "$python_exe" ]; then
|
||||
echo "Could not find python3"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Detect windows.
|
||||
if (which cygpath 2>/dev/null); then
|
||||
echo "Using windows path mangling and flags"
|
||||
DEBUG_FLAGS=""
|
||||
function translate_path() {
|
||||
cygpath --windows "$1"
|
||||
}
|
||||
else
|
||||
DEBUG_FLAGS="-g3 -gdwarf-2"
|
||||
function translate_path() {
|
||||
echo "$1"
|
||||
}
|
||||
fi
|
||||
|
||||
# Find llvm-lit.
|
||||
LLVM_LIT=""
|
||||
for candidate_lit in "$build_mlir/bin/llvm-lit" "$build_mlir/bin/llvm-lit.py"
|
||||
do
|
||||
if [ -f "$candidate_lit" ]; then
|
||||
LLVM_LIT="$candidate_lit"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$LLVM_LIT" ]; then
|
||||
echo "WARNING: Unable to find llvm-lit"
|
||||
fi
|
||||
echo "Using llvm-lit: $LLVM_LIT"
|
||||
|
||||
# Write a .env file for python tooling.
|
||||
function write_env_file() {
|
||||
echo "Updating $build_dir/.env file"
|
||||
echo "PYTHONPATH=\"$(portable_realpath "$build_dir/python"):$(portable_realpath "$install_mlir/python")\"" > "$build_dir/.env"
|
||||
echo "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1" >> "$build_dir/.env"
|
||||
if ! cp "$build_dir/.env" "$td/.env"; then
|
||||
echo "WARNING: Failed to write $td/.env"
|
||||
fi
|
||||
}
|
||||
write_env_file
|
||||
|
||||
set -x
|
||||
cmake -GNinja \
|
||||
"-H$td" \
|
||||
"-B$build_dir" \
|
||||
"-DCMAKE_BUILD_TYPE=Debug" \
|
||||
"-DNPCOMP_USE_SPLIT_DWARF=ON" \
|
||||
"-DCMAKE_CXX_FLAGS_DEBUG=$DEBUG_FLAGS" \
|
||||
"-DPYTHON_EXECUTABLE=$python_exe" \
|
||||
"-DPython3_EXECUTABLE=$python_exe" \
|
||||
"-DMLIR_DIR=$install_mlir/lib/cmake/mlir" \
|
||||
"-DLLVM_EXTERNAL_LIT=$LLVM_LIT" \
|
||||
"-DLLVM_ENABLE_WARNINGS=ON" \
|
||||
"-DCMAKE_EXPORT_COMPILE_COMMANDS=TRUE" \
|
||||
"${extra_opts[@]}" \
|
||||
"$@"
|
|
@ -1,81 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Usage (for in-tree build/ directory):
|
||||
# ./build_tools/install_mlir.sh
|
||||
# For arbitrary build/install directories, set the env variables:
|
||||
# - NPCOMP_BUILD_DIR
|
||||
# - LLVM_BUILD_DIR
|
||||
# - LLVM_INSTALL_DIR
|
||||
set -e
|
||||
|
||||
portable_realpath() {
|
||||
# Create the directory if needed so that the `cd` doesn't fail.
|
||||
mkdir -p $1 && cd $1 && pwd
|
||||
}
|
||||
|
||||
td="$(portable_realpath $(dirname $0)/..)"
|
||||
build_dir="$(portable_realpath "${NPCOMP_BUILD_DIR:-$td/build}")"
|
||||
build_mlir="${LLVM_BUILD_DIR-$build_dir/build-mlir}"
|
||||
install_mlir="${LLVM_INSTALL_DIR-$build_dir/install-mlir}"
|
||||
|
||||
# Find LLVM source (assumes it is adjacent to this directory).
|
||||
LLVM_SRC_DIR="$(portable_realpath "${LLVM_SRC_DIR:-$td/external/llvm-project}")"
|
||||
|
||||
if ! [ -f "$LLVM_SRC_DIR/llvm/CMakeLists.txt" ]; then
|
||||
echo "Expected LLVM_SRC_DIR variable to be set correctly (got '$LLVM_SRC_DIR')"
|
||||
exit 1
|
||||
fi
|
||||
echo "Using LLVM source dir: $LLVM_SRC_DIR"
|
||||
# Setup directories.
|
||||
echo "Building MLIR in $build_mlir"
|
||||
echo "Install MLIR to $install_mlir"
|
||||
mkdir -p "$build_mlir"
|
||||
mkdir -p "$install_mlir"
|
||||
|
||||
echo "Beginning build (commands will echo)"
|
||||
set -x
|
||||
|
||||
function probe_python() {
|
||||
local python_exe="$1"
|
||||
local found
|
||||
local command
|
||||
command="import sys
|
||||
if sys.version_info.major >= 3: print(sys.executable)"
|
||||
found="$("$python_exe" -c "$command")"
|
||||
if ! [ -z "$found" ]; then
|
||||
echo "$found"
|
||||
fi
|
||||
}
|
||||
|
||||
python_exe=""
|
||||
for python_candidate in python3 python; do
|
||||
python_exe="$(probe_python "$python_candidate")"
|
||||
if ! [ -z "$python_exe" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Using python: $python_exe"
|
||||
if [ -z "$python_exe" ]; then
|
||||
echo "Could not find python3"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cmake -GNinja \
|
||||
"-H$LLVM_SRC_DIR/llvm" \
|
||||
"-B$build_mlir" \
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=TRUE \
|
||||
"-DPython3_EXECUTABLE=$python_exe" \
|
||||
-DLLVM_BUILD_LLVM_DYLIB=ON \
|
||||
-DLLVM_LINK_LLVM_DYLIB=ON \
|
||||
-DLLVM_INSTALL_UTILS=ON \
|
||||
-DLLVM_ENABLE_PROJECTS=mlir \
|
||||
-DLLVM_TARGETS_TO_BUILD="X86;AArch64" \
|
||||
-DLLVM_INCLUDE_TOOLS=ON \
|
||||
"-DCMAKE_INSTALL_PREFIX=$install_mlir" \
|
||||
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
|
||||
-DLLVM_USE_SPLIT_DWARF=ON \
|
||||
-DLLVM_ENABLE_ASSERTIONS=On \
|
||||
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
|
||||
"$@"
|
||||
|
||||
cmake --build "$build_mlir" --target install
|
|
@ -1 +1 @@
|
|||
Subproject commit a085c23aa3c8f91866d7f4588d4f683407dc775d
|
||||
Subproject commit 310c9496d80961188e8d8f8ad306cdf44bd7541f
|
|
@ -1,3 +1,45 @@
|
|||
#-------------------------------------------------------------------------------
|
||||
# Sub project setup
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
cmake_minimum_required(VERSION 3.13.4)
|
||||
|
||||
if(POLICY CMP0068)
|
||||
cmake_policy(SET CMP0068 NEW)
|
||||
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
|
||||
endif()
|
||||
|
||||
if(POLICY CMP0075)
|
||||
cmake_policy(SET CMP0075 NEW)
|
||||
endif()
|
||||
|
||||
if(POLICY CMP0077)
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
endif()
|
||||
|
||||
project(npcomp_pytorch LANGUAGES CXX C)
|
||||
set(CMAKE_C_STANDARD 11)
|
||||
set(CMAKE_CXX_STANDARD 14)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Setup PyTorch
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
|
||||
include(NpcompPyTorch)
|
||||
NpcompProbeForPyTorchInstall()
|
||||
find_package(Torch 1.8 REQUIRED)
|
||||
|
||||
NpcompConfigurePyTorch()
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Output paths
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
if(NOT MLIR_NPCOMP_PYTHON_PACKAGES_DIR)
|
||||
set(MLIR_NPCOMP_PYTHON_PACKAGES_DIR "${CMAKE_CURRENT_BINARY_DIR}/python_packages")
|
||||
endif()
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Subdirectories
|
||||
#-------------------------------------------------------------------------------
|
||||
|
|
|
@ -1,23 +1,3 @@
|
|||
# NpcompFindPyTorch
|
||||
# Calls find_package on Torch and does any needed post-processing.
|
||||
# The enable_pytorch flag can be OFF, ON or OPTIONAL.
|
||||
macro(NpcompFindPyTorch enable_pytorch)
|
||||
if(${enable_pytorch} OR ${enable_pytorch} STREQUAL "OPTIONAL")
|
||||
NpcompProbeForPyTorchInstall()
|
||||
if(${enable_pytorch} STREQUAL "OPTIONAL")
|
||||
find_package(Torch 1.8)
|
||||
else()
|
||||
find_package(Torch 1.8 REQUIRED)
|
||||
endif()
|
||||
|
||||
if(${TORCH_FOUND})
|
||||
NpcompConfigurePyTorch()
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "Not configuring PyTorch (disabled)")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# NpcompProbeForPyTorchInstall
|
||||
# Attempts to find a Torch installation and set the Torch_ROOT variable
|
||||
# based on introspecting the python environment. This allows a subsequent
|
||||
|
@ -26,15 +6,15 @@ function(NpcompProbeForPyTorchInstall)
|
|||
if(Torch_ROOT)
|
||||
message(STATUS "Using cached Torch root = ${Torch_ROOT}")
|
||||
else()
|
||||
message(STATUS "Checking for PyTorch using ${PYTHON_EXECUTABLE} ...")
|
||||
message(STATUS "Checking for PyTorch using ${Python3_EXECUTABLE} ...")
|
||||
execute_process(
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
COMMAND ${Python3_EXECUTABLE}
|
||||
-c "import os;import torch;print(torch.utils.cmake_prefix_path, end='')"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE PYTORCH_STATUS
|
||||
OUTPUT_VARIABLE PYTORCH_PACKAGE_DIR)
|
||||
if(NOT PYTORCH_STATUS EQUAL "0")
|
||||
message(STATUS "Unable to 'import torch' with ${PYTHON_EXECUTABLE} (fallback to explicit config)")
|
||||
message(STATUS "Unable to 'import torch' with ${Python3_EXECUTABLE} (fallback to explicit config)")
|
||||
return()
|
||||
endif()
|
||||
message(STATUS "Found PyTorch installation at ${PYTORCH_PACKAGE_DIR}")
|
||||
|
@ -59,7 +39,7 @@ function(NpcompConfigurePyTorch)
|
|||
# Linux specific libstdcpp ABI checking.
|
||||
message(STATUS "Checking if Torch is an official binary ...")
|
||||
execute_process(
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
COMMAND ${Python3_EXECUTABLE}
|
||||
-c "from torch.utils import cpp_extension as c; import sys; sys.exit(0 if c._is_binary_build() else 1)"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE _is_binary_build)
|
|
@ -1,3 +1,5 @@
|
|||
include(NpcompPython)
|
||||
|
||||
# Sharp edge: Torch extensions need to use the same pybind11 that torch
|
||||
# was compiled with, or else there will be issues in cross module exception
|
||||
# handling (which will abort instead of raise). We circumvent the possibility
|
||||
|
@ -26,24 +28,15 @@ add_library(NPCOMPTorchMLIRExt SHARED
|
|||
)
|
||||
|
||||
target_link_libraries(NPCOMPTorchMLIRExt
|
||||
# NPCOMP shared library.
|
||||
# TODO: Debug why order matters here (if NPCOMP is included last a large
|
||||
# amount of LLVM/MLIR/NPCOMP ends up compiled into this library).
|
||||
NPCOMP
|
||||
|
||||
NPCOMPPythonCAPI
|
||||
${TORCH_LIBRARIES}
|
||||
${Python3_LIBRARIES}
|
||||
torch_python
|
||||
)
|
||||
add_dependencies(NPCOMPTorchMLIRExt
|
||||
# Uses of the torch_mlir extension also require the npcomp extension to
|
||||
# be built.
|
||||
NPCOMPNativePyExt
|
||||
)
|
||||
|
||||
message(STATUS "TORCH_CXXFLAGS=${TORCH_CXXFLAGS}")
|
||||
set_target_properties(NPCOMPTorchMLIRExt PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/python
|
||||
LIBRARY_OUTPUT_DIRECTORY "${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_torch"
|
||||
OUTPUT_NAME _torch_mlir
|
||||
PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||
SUFFIX "${PYTHON_MODULE_EXTENSION}"
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include "mlir-c/BuiltinAttributes.h"
|
||||
#include "mlir-c/BuiltinTypes.h"
|
||||
#include "npcomp-c/TorchTypes.h"
|
||||
#include "npcomp/Python/PybindUtils.h"
|
||||
|
||||
#include <ATen/core/function_schema.h>
|
||||
#include <ATen/core/ivalue.h>
|
||||
|
@ -107,8 +106,7 @@ void AcapController::contextExit(py::object exc_type, py::object exc_val,
|
|||
py::object exc_tb) {
|
||||
auto &stack = getThreadLocalActiveStack();
|
||||
if (stack.empty() || stack.front().controller.get() != this) {
|
||||
throw py::raisePyError(PyExc_RuntimeError,
|
||||
"Mismatched context manager __exit__");
|
||||
throw std::runtime_error("Mismatched context manager __exit__");
|
||||
}
|
||||
stack.pop_front();
|
||||
|
||||
|
|
|
@ -3,4 +3,5 @@
|
|||
# Collapse all local python sources to the project level python/ directory.
|
||||
################################################################################
|
||||
|
||||
npcomp_python_create_symlinks(${CMAKE_BINARY_DIR}/python ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
npcomp_python_create_symlinks(
|
||||
${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_torch ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
|
|
@ -12,7 +12,6 @@ print(f"PYTHONPATH={sys.path}")
|
|||
|
||||
import mlir
|
||||
import npcomp
|
||||
import _npcomp
|
||||
import _torch_mlir
|
||||
import torch_mlir
|
||||
|
||||
print("Extensions all loaded")
|
||||
|
|
|
@ -61,8 +61,8 @@ config.npcomp_tools_dir = os.path.join(config.npcomp_obj_root, 'bin')
|
|||
npcomp_python_dir = "python" if config.npcomp_built_standalone else "tools/npcomp/python"
|
||||
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
|
||||
llvm_config.with_environment('PYTHONPATH', [
|
||||
os.path.join(config.llvm_obj_root, "python"),
|
||||
os.path.join(config.npcomp_obj_root, npcomp_python_dir)],
|
||||
os.path.join(config.npcomp_python_packages_dir, 'npcomp_core'),
|
||||
os.path.join(config.npcomp_python_packages_dir, 'npcomp_torch')],
|
||||
append_path=True)
|
||||
|
||||
|
||||
|
|
|
@ -10,13 +10,15 @@ config.host_triple = "@LLVM_HOST_TRIPLE@"
|
|||
config.target_triple = "@TARGET_TRIPLE@"
|
||||
config.llvm_src_root = "@LLVM_SOURCE_DIR@"
|
||||
config.llvm_obj_root = "@LLVM_BINARY_DIR@"
|
||||
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
|
||||
# TODO: Fix tools dir to find FileCheck.
|
||||
#config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
|
||||
config.llvm_tools_dir = "@LLVM_BINARY_DIR@/bin"
|
||||
config.llvm_lib_dir = "@LLVM_LIBRARY_DIR@"
|
||||
config.llvm_shlib_dir = "@SHLIBDIR@"
|
||||
config.llvm_shlib_ext = "@SHLIBEXT@"
|
||||
config.llvm_exe_ext = "@EXEEXT@"
|
||||
config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@"
|
||||
config.python_executable = "@PYTHON_EXECUTABLE@"
|
||||
config.python_executable = "@Python3_EXECUTABLE@"
|
||||
config.gold_executable = "@GOLD_EXECUTABLE@"
|
||||
config.ld64_executable = "@LD64_EXECUTABLE@"
|
||||
config.enable_shared = @ENABLE_SHARED@
|
||||
|
@ -35,6 +37,7 @@ config.host_arch = "@HOST_ARCH@"
|
|||
config.npcomp_src_root = "@CMAKE_SOURCE_DIR@"
|
||||
config.npcomp_obj_root = "@CMAKE_BINARY_DIR@"
|
||||
config.npcomp_built_standalone = bool("@NPCOMP_BUILT_STANDALONE@")
|
||||
config.npcomp_python_packages_dir = "@MLIR_NPCOMP_PYTHON_PACKAGES_DIR@"
|
||||
|
||||
# Support substitution of the tools_dir with user parameters. This is
|
||||
# used when we can't determine the tool dir at configuration time.
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define NPCOMP_C_BASICPYTYPES_H
|
||||
|
||||
#include "mlir-c/IR.h"
|
||||
#include "mlir-c/Support.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -21,58 +22,57 @@ extern "C" {
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is the Python "bool" type.
|
||||
bool npcompTypeIsABasicpyBool(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyBool(MlirType t);
|
||||
|
||||
/// Gets the Python "bool" type.
|
||||
MlirType npcompBasicpyBoolTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompBasicpyBoolTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// !basicpy.BytesType
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is the Python "bytes" type.
|
||||
bool npcompTypeIsABasicpyBytes(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyBytes(MlirType t);
|
||||
|
||||
/// Gets the Python "bytes" type.
|
||||
MlirType npcompBasicpyBytesTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompBasicpyBytesTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// !basicpy.DictType
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is the Python "dict" type.
|
||||
bool npcompTypeIsABasicpyDict(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyDict(MlirType t);
|
||||
|
||||
/// Gets the generic Python "dict" type.
|
||||
MlirType npcompBasicpyDictTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompBasicpyDictTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// List type
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is the Python "list" type.
|
||||
bool npcompTypeIsABasicpyList(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyList(MlirType t);
|
||||
|
||||
/// Gets the generic Python "list" type.
|
||||
MlirType npcompBasicpyListTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompBasicpyListTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// !basicpy.NoneType type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a `!basicpy.NoneType`.
|
||||
bool npcompTypeIsABasicpyNone(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyNone(MlirType t);
|
||||
|
||||
/// Gets the `!basicpy.NoneType` type.
|
||||
MlirType npcompBasicpyNoneTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompBasicpyNoneTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SlotObject type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
MlirType npcompBasicPySlotObjectTypeGet(MlirContext context,
|
||||
MlirStringRef className,
|
||||
intptr_t slotTypeCount,
|
||||
MLIR_CAPI_EXPORTED MlirType npcompBasicPySlotObjectTypeGet(
|
||||
MlirContext context, MlirStringRef className, intptr_t slotTypeCount,
|
||||
const MlirType *slotTypes);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -80,10 +80,10 @@ MlirType npcompBasicPySlotObjectTypeGet(MlirContext context,
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a `!basicpy.TupleType`.
|
||||
bool npcompTypeIsABasicpyTuple(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsABasicpyTuple(MlirType t);
|
||||
|
||||
/// Gets the generic Python "tuple" type.
|
||||
MlirType npcompBasicpyTupleTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompBasicpyTupleTypeGet(MlirContext context);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -10,13 +10,15 @@
|
|||
#ifndef NPCOMP_C_INITLLVM_H
|
||||
#define NPCOMP_C_INITLLVM_H
|
||||
|
||||
#include "mlir-c/Support.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** Initializes LLVM codegen infrastructure and related MLIR bridge components.
|
||||
*/
|
||||
void npcompInitializeLLVMCodegen();
|
||||
MLIR_CAPI_EXPORTED void npcompInitializeLLVMCodegen();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define NPCOMP_C_NUMPYTYPES_H
|
||||
|
||||
#include "mlir-c/IR.h"
|
||||
#include "mlir-c/Support.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -22,31 +23,34 @@ extern "C" {
|
|||
|
||||
/// Checks whether the given type is the special "any dtype" type that is used
|
||||
// to signal an NDArray or tensor of unknown type.
|
||||
bool npcompTypeIsANumpyAnyDtype(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsANumpyAnyDtype(MlirType t);
|
||||
|
||||
/// Gets the "any dtype" type.
|
||||
MlirType npcompAnyDtypeTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompAnyDtypeTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// NDArray type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is an NdArray type.
|
||||
bool npcompTypeIsANumpyNdArray(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsANumpyNdArray(MlirType t);
|
||||
|
||||
/// Gets a numpy.NdArray type that is unranked.
|
||||
MlirType npcompNumpyNdArrayTypeGetUnranked(MlirType elementType);
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompNumpyNdArrayTypeGetUnranked(MlirType elementType);
|
||||
|
||||
/// Gets a numpy.NdArray type that is ranked. Any dimensions that are -1 are
|
||||
/// unknown.
|
||||
MlirType npcompNumpyNdArrayTypeGetRanked(intptr_t rank, const int64_t *shape,
|
||||
MlirType elementType);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompNumpyNdArrayTypeGetRanked(
|
||||
intptr_t rank, const int64_t *shape, MlirType elementType);
|
||||
|
||||
/// Helper that gets an equivalent NdArrayType from a ShapedType.
|
||||
MlirType npcompNumpyNdArrayTypeGetFromShaped(MlirType shapedType);
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompNumpyNdArrayTypeGetFromShaped(MlirType shapedType);
|
||||
|
||||
/// Helper that converts an NdArrayType to a TensorType.
|
||||
MlirType npcompNumpyNdArrayTypeToTensor(MlirType ndarrayType);
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompNumpyNdArrayTypeToTensor(MlirType ndarrayType);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
//===-- npcomp-c/RefJITBackend.h - C API for the reference JIT ----*- C -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM
|
||||
// Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef NPCOMP_C_REFJITBACKEND_H
|
||||
#define NPCOMP_C_REFJITBACKEND_H
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "mlir-c/Pass.h"
|
||||
#include "mlir-c/Support.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Define opaque API structs.
|
||||
#define DEFINE_C_API_STRUCT(name, storage) \
|
||||
struct name { \
|
||||
storage *ptr; \
|
||||
}; \
|
||||
typedef struct name name
|
||||
|
||||
DEFINE_C_API_STRUCT(NpcompRefJitModule, void);
|
||||
DEFINE_C_API_STRUCT(NpcompRefJitValueList, void);
|
||||
|
||||
#undef DEFINE_C_API_STRUCT
|
||||
|
||||
// Must be kept in sync with C++ side.
|
||||
enum NpcompRefJitElementType {
|
||||
NPCOMP_REFJIT_NONE = 0,
|
||||
NPCOMP_REFJIT_F32 = 1,
|
||||
};
|
||||
|
||||
/// Populates a PassManager with a pipeline that performs backend compilation.
|
||||
/// The resulting module can be passed to npcompRefJitModuleCreate().
|
||||
MLIR_CAPI_EXPORTED void
|
||||
npcompRefJitBuildBackendCompilationPipeline(MlirPassManager passManager,
|
||||
bool optimize);
|
||||
|
||||
/// Creates a RefJit module from an MlirModule (as compiled from the above
|
||||
/// pipeline). On success, returns a !null NpcompRefJitModule. On failure,
|
||||
/// returns null and malloc() allocates an error message into *errorMessage.
|
||||
/// The caller must free these messages.
|
||||
MLIR_CAPI_EXPORTED NpcompRefJitModule
|
||||
npcompRefJitModuleCreate(MlirModule module, MlirStringRef *sharedLibs,
|
||||
intptr_t sharedLibsSize, char **errorMessage);
|
||||
|
||||
/// Whether the module is null.
|
||||
static inline bool npcompRefJitModuleIsNull(NpcompRefJitModule m) {
|
||||
return !m.ptr;
|
||||
}
|
||||
|
||||
/// Destroys a refjit module.
|
||||
MLIR_CAPI_EXPORTED void npcompRefJitModuleDestroy(NpcompRefJitModule module);
|
||||
|
||||
/// Invokes a function on a RefJit module. On success, returns true and malloc()
|
||||
/// and adds all outputs to the passed outputs list. On failure, returns false
|
||||
/// and populates *errorMessage with a malloc() allocated error message, which
|
||||
/// must be caller freed.
|
||||
MLIR_CAPI_EXPORTED bool
|
||||
npcompRefJitModuleInvoke(NpcompRefJitModule m, MlirStringRef functionName,
|
||||
NpcompRefJitValueList inputOutputs,
|
||||
char **errorMessage);
|
||||
|
||||
/// Creates an empty value list.
|
||||
MLIR_CAPI_EXPORTED NpcompRefJitValueList npcompRefJitValueListCreate();
|
||||
|
||||
/// Destroys a value list.
|
||||
MLIR_CAPI_EXPORTED void
|
||||
npcompRefJitValueListDestroy(NpcompRefJitValueList list);
|
||||
|
||||
/// Returns the size of the value list.
|
||||
MLIR_CAPI_EXPORTED intptr_t
|
||||
npcompRefJitValueListSize(NpcompRefJitValueList list);
|
||||
|
||||
/// Adds values to the list.
|
||||
MLIR_CAPI_EXPORTED void npcompRefJitValueAddTensorCopy(
|
||||
NpcompRefJitValueList list, NpcompRefJitElementType elementType,
|
||||
const int32_t *extents, intptr_t extentsSize, const void *data);
|
||||
|
||||
// Reads Tensor from a list.
|
||||
MLIR_CAPI_EXPORTED bool npcompRefJitValueIsaTensor(NpcompRefJitValueList list,
|
||||
intptr_t i);
|
||||
MLIR_CAPI_EXPORTED void *
|
||||
npcompRefJitValueGetTensor(NpcompRefJitValueList list, intptr_t i,
|
||||
NpcompRefJitElementType *elementType, intptr_t *rank,
|
||||
const int32_t **extents);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // NPCOMP_C_REFJITBACKEND_H
|
|
@ -11,6 +11,7 @@
|
|||
#define NPCOMP_C_REGISTRATION_H
|
||||
|
||||
#include "mlir-c/IR.h"
|
||||
#include "mlir-c/Support.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -19,10 +20,10 @@ extern "C" {
|
|||
/** Registers all NPComp dialects with a context.
|
||||
* This is needed before creating IR for these Dialects.
|
||||
*/
|
||||
void npcompRegisterAllDialects(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED void npcompRegisterAllDialects(MlirContext context);
|
||||
|
||||
/** Registers all NPComp passes for symbolic access with the global registry. */
|
||||
void npcompRegisterAllPasses();
|
||||
MLIR_CAPI_EXPORTED void npcompRegisterAllPasses();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define NPCOMP_C_TORCHTYPES_H
|
||||
|
||||
#include "mlir-c/IR.h"
|
||||
#include "mlir-c/Support.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -21,10 +22,10 @@ extern "C" {
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a torch.nn.Module type
|
||||
bool npcompTypeIsATorchNnModule(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchNnModule(MlirType t);
|
||||
|
||||
/// Gets the !torch.nn.Module type of the specified class.
|
||||
MlirType npcompTorchNnModuleTypeGet(MlirContext context,
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchNnModuleTypeGet(MlirContext context,
|
||||
MlirStringRef className);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -32,21 +33,21 @@ MlirType npcompTorchNnModuleTypeGet(MlirContext context,
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.optional<T> type
|
||||
bool npcompTypeIsATorchOptional(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchOptional(MlirType t);
|
||||
|
||||
/// Gets the !torch.optional<T> type with subtype T.
|
||||
MlirType npcompTorchOptionalTypeGet(MlirType containedType);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchOptionalTypeGet(MlirType containedType);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.tuple<T1, T2, T3> type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.tuple type
|
||||
bool npcompTypeIsATorchTuple(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchTuple(MlirType t);
|
||||
|
||||
/// Gets the !torch.tuple type with contained types `containedTypes`.
|
||||
MlirType npcompTorchTupleTypeGet(MlirContext context,
|
||||
intptr_t numContainedTypes,
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompTorchTupleTypeGet(MlirContext context, intptr_t numContainedTypes,
|
||||
MlirType const *containedTypes);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -54,77 +55,77 @@ MlirType npcompTorchTupleTypeGet(MlirContext context,
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.list<T> type
|
||||
bool npcompTypeIsATorchList(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchList(MlirType t);
|
||||
|
||||
/// Gets the !torch.list<T> type with contained T.
|
||||
MlirType npcompTorchListTypeGet(MlirType containedType);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchListTypeGet(MlirType containedType);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.Device type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.Device type
|
||||
bool npcompTypeIsATorchDevice(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchDevice(MlirType t);
|
||||
|
||||
/// Gets the !torch.Device type.
|
||||
MlirType npcompTorchDeviceTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchDeviceTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.bool type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.bool type
|
||||
bool npcompTypeIsATorchBool(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchBool(MlirType t);
|
||||
|
||||
/// Gets the !torch.bool type.
|
||||
MlirType npcompTorchBoolTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchBoolTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.int type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.int type
|
||||
bool npcompTypeIsATorchInt(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchInt(MlirType t);
|
||||
|
||||
/// Gets the !torch.int type.
|
||||
MlirType npcompTorchIntTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchIntTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.float type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.float type
|
||||
bool npcompTypeIsATorchFloat(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchFloat(MlirType t);
|
||||
|
||||
/// Gets the !torch.float type.
|
||||
MlirType npcompTorchFloatTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchFloatTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.LinearParams type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.LinearParams type
|
||||
bool npcompTypeIsATorchLinearParams(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchLinearParams(MlirType t);
|
||||
|
||||
/// Gets the !torch.LinearParams type.
|
||||
MlirType npcompTorchLinearParamsTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchLinearParamsTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.qint8 type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.qint8 type
|
||||
bool npcompTypeIsATorchQInt8(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchQInt8(MlirType t);
|
||||
|
||||
/// Gets the !torch.qint8 type.
|
||||
MlirType npcompTorchQInt8TypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchQInt8TypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.tensor type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.tensor type
|
||||
bool npcompTypeIsATorchNonValueTensor(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchNonValueTensor(MlirType t);
|
||||
|
||||
/// Gets a !torch.tensor type.
|
||||
///
|
||||
|
@ -132,24 +133,24 @@ bool npcompTypeIsATorchNonValueTensor(MlirType t);
|
|||
/// information is present (and `numSizes` is ignored in that case). -
|
||||
/// `optionalDtype` is allowed to be null, meaning that no dtype
|
||||
/// information is present.
|
||||
MlirType npcompTorchNonValueTensorTypeGet(MlirContext context,
|
||||
intptr_t numSizes,
|
||||
const int64_t *optionalSizes,
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchNonValueTensorTypeGet(
|
||||
MlirContext context, intptr_t numSizes, const int64_t *optionalSizes,
|
||||
MlirType optionalDtype);
|
||||
|
||||
/// Gets the !torch.tensor type with the least static information.
|
||||
MlirType
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompTorchNonValueTensorTypeGetWithLeastStaticInformation(MlirContext context);
|
||||
|
||||
/// Gets a !torch.tensor type, taking shape/dtype from a ShapedType `type`.
|
||||
MlirType npcompTorchNonValueTensorTypeGetFromShaped(MlirType type);
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompTorchNonValueTensorTypeGetFromShaped(MlirType type);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// torch.vtensor type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.vtensor type
|
||||
bool npcompTypeIsATorchValueTensor(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchValueTensor(MlirType t);
|
||||
|
||||
/// Gets a !torch.vtensor type.
|
||||
///
|
||||
|
@ -157,36 +158,37 @@ bool npcompTypeIsATorchValueTensor(MlirType t);
|
|||
/// information is present (and `numSizes` is ignored in that case).
|
||||
/// - `optionalDtype` is allowed to be null, meaning that no dtype
|
||||
/// information is present.
|
||||
MlirType npcompTorchValueTensorTypeGet(MlirContext context, intptr_t numSizes,
|
||||
const int64_t *optionalSizes,
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchValueTensorTypeGet(
|
||||
MlirContext context, intptr_t numSizes, const int64_t *optionalSizes,
|
||||
MlirType optionalDtype);
|
||||
|
||||
/// Gets the !torch.tensor type with the least static information.
|
||||
MlirType
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompTorchValueTensorTypeGetWithLeastStaticInformation(MlirContext context);
|
||||
|
||||
/// Gets a !torch.tensor type, taking shape/dtype from a ShapedType `type`.
|
||||
MlirType npcompTorchValueTensorTypeGetFromShaped(MlirType type);
|
||||
MLIR_CAPI_EXPORTED MlirType
|
||||
npcompTorchValueTensorTypeGetFromShaped(MlirType type);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// !torch.none type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.none type
|
||||
bool npcompTypeIsATorchNone(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchNone(MlirType t);
|
||||
|
||||
/// Gets the !torch.none type.
|
||||
MlirType npcompTorchNoneTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchNoneTypeGet(MlirContext context);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// !torch.str type.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks whether the given type is a !torch.str type
|
||||
bool npcompTypeIsATorchString(MlirType t);
|
||||
MLIR_CAPI_EXPORTED bool npcompTypeIsATorchString(MlirType t);
|
||||
|
||||
/// Gets the !torch.str type.
|
||||
MlirType npcompTorchStringTypeGet(MlirContext context);
|
||||
MLIR_CAPI_EXPORTED MlirType npcompTorchStringTypeGet(MlirContext context);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
add_subdirectory(Common)
|
||||
|
||||
if(NPCOMP_ENABLE_REFJIT)
|
||||
add_subdirectory(RefJIT)
|
||||
endif()
|
||||
|
||||
# Currently this doesn't introduce any actual dependency on IREE, so add it
|
||||
# unconditionally.
|
||||
# TODO: Put this behind the NPCOMP_ENABLE_IREE flag.
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
################################################################################
|
||||
# NPCOMPBackendRefJITPythonModule
|
||||
################################################################################
|
||||
|
||||
include(NpcompPython)
|
||||
|
||||
set(PYBIND_SOURCES
|
||||
PythonModule.cpp
|
||||
)
|
||||
add_library(NPCOMPBackendRefJITPythonModule
|
||||
${PYBIND_SOURCES}
|
||||
)
|
||||
|
||||
target_link_libraries(NPCOMPBackendRefJITPythonModule
|
||||
pybind11::module
|
||||
MLIRExecutionEngine
|
||||
MLIRTargetLLVMIRExport
|
||||
|
||||
NPCOMPRefBackendJITHelpers
|
||||
)
|
||||
|
||||
npcomp_python_target_compile_options(NPCOMPBackendRefJITPythonModule)
|
||||
|
||||
add_dependencies(NPCOMPBackendRefJITPythonModule
|
||||
NPCOMPPythonResources
|
||||
)
|
|
@ -1,131 +0,0 @@
|
|||
//===- PythonModule.cpp - RefJIT python bindings --------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "npcomp/Backend/RefJIT/PythonModule.h"
|
||||
|
||||
#include "pybind11/numpy.h"
|
||||
|
||||
#include "mlir/CAPI/IR.h"
|
||||
#include "mlir/CAPI/Pass.h"
|
||||
#include "npcomp/RefBackend/JITHelpers/JITModule.h"
|
||||
|
||||
using llvm::SmallVector;
|
||||
using llvm::StringRef;
|
||||
using llvm::Twine;
|
||||
|
||||
// Make namespaces consistent.
|
||||
using refback::JITModule;
|
||||
using refbackrt::Ref;
|
||||
using refbackrt::Tensor;
|
||||
using refbackrt::RtValue;
|
||||
|
||||
template <typename T>
|
||||
static T checkError(llvm::Expected<T> &&expected, Twine banner = {}) {
|
||||
if (LLVM_LIKELY(expected))
|
||||
return std::move(*expected);
|
||||
|
||||
std::string errorMessage;
|
||||
llvm::raw_string_ostream os(errorMessage);
|
||||
llvm::logAllUnhandledErrors(expected.takeError(), os, banner);
|
||||
os.flush();
|
||||
throw py::raisePyError(PyExc_RuntimeError, errorMessage.c_str());
|
||||
}
|
||||
|
||||
static refbackrt::ElementType
|
||||
mapBufferFormatToElementType(const std::string &format, py::ssize_t itemSize) {
|
||||
if (format == "f")
|
||||
return refbackrt::ElementType::F32;
|
||||
|
||||
std::string message("unsupported buffer format: ");
|
||||
message.append(format);
|
||||
throw py::raiseValueError(message);
|
||||
}
|
||||
|
||||
static Ref<Tensor> copyBufferToTensor(py::buffer buffer) {
|
||||
// Request a C contiguous view as that is what Tensor accepts now (no strides
|
||||
// or non row-major layout).
|
||||
int flags = PyBUF_C_CONTIGUOUS | PyBUF_FORMAT;
|
||||
std::unique_ptr<Py_buffer> view(new Py_buffer());
|
||||
if (PyObject_GetBuffer(buffer.ptr(), view.get(), flags) != 0) {
|
||||
throw py::error_already_set();
|
||||
}
|
||||
py::buffer_info info(view.release());
|
||||
auto elementType = mapBufferFormatToElementType(info.format, info.itemsize);
|
||||
|
||||
// TODO: Switch Tensor extents to ssize_t for efficiency.
|
||||
SmallVector<std::int32_t, 4> extents(info.shape.begin(), info.shape.end());
|
||||
return Tensor::create(
|
||||
refbackrt::ArrayRef<std::int32_t>(extents.data(), extents.size()),
|
||||
elementType, info.ptr);
|
||||
}
|
||||
|
||||
py::array wrapTensorAsArray(Ref<Tensor> tensor) {
|
||||
auto pyTensor = py::cast(tensor);
|
||||
auto extents = tensor->getExtents();
|
||||
// TODO: Switch Tensor extents to ssize_t for efficiency.
|
||||
std::vector<ssize_t> shape(extents.data(), extents.data() + extents.size());
|
||||
|
||||
const char *format;
|
||||
switch (tensor->getElementType()) {
|
||||
case refbackrt::ElementType::F32:
|
||||
format = "f";
|
||||
break;
|
||||
default:
|
||||
throw py::raiseValueError("unsupported tensor element type");
|
||||
}
|
||||
|
||||
return py::array(py::dtype(format), shape, tensor->getData(),
|
||||
/*base=*/std::move(pyTensor));
|
||||
}
|
||||
|
||||
void npcomp::python::defineBackendRefJitModule(py::module &m) {
|
||||
m.def("build_backend_compilation_pipeline", [](MlirPassManager capiPm) {
|
||||
mlir::PassManager *pm = unwrap(capiPm);
|
||||
JITModule::buildBackendCompilationPipeline(*pm);
|
||||
});
|
||||
py::class_<JITModule>(m, "JITModule")
|
||||
.def_static(
|
||||
"from_compiled_module",
|
||||
[](MlirModule capiModule, std::vector<std::string> pySharedLibs)
|
||||
-> std::unique_ptr<JITModule> {
|
||||
SmallVector<StringRef, 4> sharedLibs(pySharedLibs.begin(),
|
||||
pySharedLibs.end());
|
||||
auto module = unwrap(capiModule);
|
||||
auto jitModule =
|
||||
checkError(JITModule::fromCompiledModule(module, sharedLibs),
|
||||
"error creating JITModule: ");
|
||||
return jitModule;
|
||||
},
|
||||
py::arg("module"), py::arg("shared_libs"))
|
||||
.def(
|
||||
"invoke",
|
||||
[](JITModule &self, std::string functionName,
|
||||
std::vector<py::buffer> inputs) {
|
||||
// Prepare inputs.
|
||||
llvm::SmallVector<RtValue, 4> inputValues;
|
||||
inputValues.reserve(inputs.size());
|
||||
for (py::buffer &inputBuffer : inputs) {
|
||||
inputValues.push_back(copyBufferToTensor(inputBuffer));
|
||||
}
|
||||
|
||||
auto outputs = checkError(self.invoke(functionName, inputValues),
|
||||
"error invoking JIT function: ");
|
||||
std::vector<py::array> outputArrays;
|
||||
outputArrays.reserve(outputs.size());
|
||||
for (RtValue &outputTensor : outputs) {
|
||||
outputArrays.push_back(wrapTensorAsArray(outputTensor.toTensor()));
|
||||
}
|
||||
return outputArrays;
|
||||
},
|
||||
py::arg("function_name"), py::arg("inputs"));
|
||||
|
||||
// A Ref<Tensor> needs to be bound because we use it as a base for the
|
||||
// ndarray (the array retains a reference to it). Users should not encounter
|
||||
// this unless if they go mucking through the array internals.
|
||||
py::class_<Ref<Tensor>>(m, "TensorRef");
|
||||
}
|
|
@ -6,6 +6,7 @@ set(LLVM_LINK_COMPONENTS
|
|||
|
||||
add_npcomp_library(NPCOMPCAPI
|
||||
InitLLVM.cpp
|
||||
RefJITBackend.cpp
|
||||
Registration.cpp
|
||||
BasicpyTypes.cpp
|
||||
NumpyTypes.cpp
|
||||
|
@ -18,5 +19,11 @@ add_npcomp_library(NPCOMPCAPI
|
|||
NPCOMPInitAll
|
||||
NPCOMPBasicpyDialect
|
||||
NPCOMPNumpyDialect
|
||||
NPCOMPRefBackendJITHelpers
|
||||
NPCOMPRuntime
|
||||
NPCOMPTorchDialect
|
||||
|
||||
# MLIR CAPI deps
|
||||
MLIRCAPIIR
|
||||
MLIRCAPIRegistration # TODO: Remove
|
||||
)
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
//===- RefJITBackend.cpp - CAPI for RefJit --------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "npcomp-c/RefJITBackend.h"
|
||||
|
||||
#include "mlir/CAPI/IR.h"
|
||||
#include "mlir/CAPI/Pass.h"
|
||||
#include "mlir/CAPI/Wrap.h"
|
||||
#include "mlir/Pass/PassManager.h"
|
||||
#include "npcomp/RefBackend/JITHelpers/JITModule.h"
|
||||
#include "llvm/ADT/Optional.h"
|
||||
|
||||
using namespace llvm;
|
||||
using namespace mlir;
|
||||
using namespace refback;
|
||||
using namespace refbackrt;
|
||||
|
||||
using ValueListCpp = SmallVector<RtValue, 4>;
|
||||
DEFINE_C_API_PTR_METHODS(NpcompRefJitModule, JITModule)
|
||||
DEFINE_C_API_PTR_METHODS(NpcompRefJitValueList, ValueListCpp)
|
||||
|
||||
static_assert(static_cast<int>(ElementType::F32) == NPCOMP_REFJIT_F32,
|
||||
"mismatched F32 mapping");
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
static Optional<T> checkError(llvm::Expected<T> &&expected,
|
||||
char **errorMessageCstr, Twine banner = {}) {
|
||||
if (LLVM_LIKELY(expected))
|
||||
return std::move(*expected);
|
||||
|
||||
std::string errorMessage;
|
||||
llvm::raw_string_ostream os(errorMessage);
|
||||
llvm::logAllUnhandledErrors(expected.takeError(), os, banner);
|
||||
os.flush();
|
||||
*errorMessageCstr = strdup(errorMessage.c_str());
|
||||
return llvm::None;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void npcompRefJitBuildBackendCompilationPipeline(MlirPassManager passManager,
|
||||
bool optimize) {
|
||||
JITModule::buildBackendCompilationPipeline(*unwrap(passManager), optimize);
|
||||
}
|
||||
|
||||
NpcompRefJitModule npcompRefJitModuleCreate(MlirModule moduleOp,
|
||||
MlirStringRef *sharedLibs,
|
||||
intptr_t sharedLibsSize,
|
||||
char **errorMessage) {
|
||||
SmallVector<llvm::StringRef> sharedLibsCpp;
|
||||
for (intptr_t i = 0; i < sharedLibsSize; ++i) {
|
||||
sharedLibsCpp.push_back(
|
||||
llvm::StringRef(sharedLibs[i].data, sharedLibs[i].length));
|
||||
}
|
||||
|
||||
auto refJitModuleCpp =
|
||||
checkError(JITModule::fromCompiledModule(unwrap(moduleOp), sharedLibsCpp),
|
||||
errorMessage, "error creating refjit module");
|
||||
if (!refJitModuleCpp)
|
||||
return {nullptr};
|
||||
return wrap(refJitModuleCpp->release());
|
||||
}
|
||||
|
||||
void npcompRefJitModuleDestroy(NpcompRefJitModule module) {
|
||||
delete unwrap(module);
|
||||
}
|
||||
|
||||
bool npcompRefJitModuleInvoke(NpcompRefJitModule m, MlirStringRef functionName,
|
||||
NpcompRefJitValueList inputOutputs,
|
||||
char **errorMessage) {
|
||||
ValueListCpp *ioList = unwrap(inputOutputs);
|
||||
auto results = checkError(
|
||||
unwrap(m)->invoke(llvm::StringRef(functionName.data, functionName.length),
|
||||
*ioList),
|
||||
errorMessage, "error invoking function");
|
||||
ioList->clear();
|
||||
if (!results)
|
||||
return false;
|
||||
|
||||
for (int i = 0, e = results->size(); i < e; ++i) {
|
||||
ioList->push_back(std::move((*results)[i]));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
NpcompRefJitValueList npcompRefJitValueListCreate() {
|
||||
return wrap(new ValueListCpp());
|
||||
}
|
||||
|
||||
void npcompRefJitValueListDestroy(NpcompRefJitValueList list) {
|
||||
delete unwrap(list);
|
||||
}
|
||||
|
||||
intptr_t npcompRefJitValueListSize(NpcompRefJitValueList list) {
|
||||
return unwrap(list)->size();
|
||||
}
|
||||
|
||||
void npcompRefJitValueAddTensorCopy(NpcompRefJitValueList list,
|
||||
NpcompRefJitElementType elementType,
|
||||
const int32_t *extents,
|
||||
intptr_t extentsSize, const void *data) {
|
||||
ElementType elementTypeCpp = static_cast<ElementType>(elementType);
|
||||
auto tensor =
|
||||
Tensor::create(refbackrt::ArrayRef<std::int32_t>(extents, extentsSize),
|
||||
elementTypeCpp, const_cast<void *>(data));
|
||||
unwrap(list)->push_back(std::move(tensor));
|
||||
}
|
||||
|
||||
bool npcompRefJitValueIsaTensor(NpcompRefJitValueList list, intptr_t i) {
|
||||
return (*unwrap(list))[i].isTensor();
|
||||
}
|
||||
|
||||
void *npcompRefJitValueGetTensor(NpcompRefJitValueList list, intptr_t i,
|
||||
NpcompRefJitElementType *elementType,
|
||||
intptr_t *rank, const int32_t **extents) {
|
||||
auto tensor = (*unwrap(list))[i].toTensor();
|
||||
*elementType = static_cast<NpcompRefJitElementType>(tensor->getElementType());
|
||||
*rank = tensor->getRank();
|
||||
*extents = tensor->getExtents().data();
|
||||
return tensor->getData();
|
||||
}
|
|
@ -43,8 +43,10 @@ add_npcomp_library(NPCOMPInitAll
|
|||
NPCOMPTCFPasses
|
||||
NPCOMPTypingPasses
|
||||
|
||||
${npcomp_dialect_libs}
|
||||
# TODO: We shouldn't need npcomp_conversion_libs here, but we have
|
||||
# some dialect transform libraries accumulating into that property.
|
||||
${npcomp_conversion_libs}
|
||||
${npcomp_dialect_libs}
|
||||
${mlir_dialect_libs}
|
||||
${mlir_conversion_libs}
|
||||
)
|
||||
|
|
|
@ -7,6 +7,8 @@ add_subdirectory(TCFToLinalg)
|
|||
add_subdirectory(TCFToStd)
|
||||
add_subdirectory(TCFToTCP)
|
||||
|
||||
get_property(npcomp_conversion_libs GLOBAL PROPERTY NPCOMP_CONVERSION_LIBS)
|
||||
|
||||
add_npcomp_library(NPCOMPConversionPasses
|
||||
Passes.cpp
|
||||
|
||||
|
@ -17,7 +19,5 @@ add_npcomp_library(NPCOMPConversionPasses
|
|||
Core
|
||||
|
||||
LINK_LIBS PUBLIC
|
||||
NPCOMPBasicpyToSTD
|
||||
NPCOMPNumpyToTCF
|
||||
NPCOMPTCFToTCP
|
||||
${npcomp_conversion_libs}
|
||||
)
|
||||
|
|
|
@ -20,6 +20,7 @@ add_npcomp_library(NPCOMPRefBackend
|
|||
MLIRLinalg
|
||||
MLIRLinalgToLLVM
|
||||
MLIRMathToLLVM
|
||||
MLIRMathTransforms
|
||||
MLIRMemRefToLLVM
|
||||
MLIRSCFToStandard
|
||||
MLIRSCFTransforms
|
||||
|
|
|
@ -20,7 +20,12 @@
|
|||
|
||||
using namespace refbackrt;
|
||||
|
||||
extern "C" void __npcomp_compiler_rt_abort_if(bool b, const char *msg) {
|
||||
extern "C" {
|
||||
__attribute__((visibility("default"))) void
|
||||
__npcomp_compiler_rt_abort_if(bool b, const char *msg);
|
||||
}
|
||||
|
||||
void __npcomp_compiler_rt_abort_if(bool b, const char *msg) {
|
||||
if (b) {
|
||||
std::fprintf(stderr, "NPCOMP: aborting: %s\n", msg);
|
||||
std::exit(1);
|
||||
|
|
|
@ -493,7 +493,10 @@ RtValue refbackrt::createRtValueFromOutputArgInfo(const OutputArgInfo &info) {
|
|||
return RtValue(Tensor::create(shape, ElementType::F32, data));
|
||||
break;
|
||||
}
|
||||
default: { assert(false && "unknown output tensor type"); }
|
||||
default: {
|
||||
assert(false && "unknown output tensor type");
|
||||
return RtValue();
|
||||
}
|
||||
}
|
||||
|
||||
// The Tensor::create function will malloc and memcpy the data
|
||||
|
|
|
@ -1,15 +1,19 @@
|
|||
include(AddMLIRPython)
|
||||
include(MLIRDetectPythonEnv)
|
||||
|
||||
################################################################################
|
||||
# Resources that must be packaged into the python tree
|
||||
################################################################################
|
||||
|
||||
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/npcomp/compiler/backend/refjit_resources")
|
||||
add_custom_target(NPCOMPPythonResources)
|
||||
add_custom_target(NPCOMPPythonResources ALL)
|
||||
add_custom_command(
|
||||
TARGET NPCOMPPythonResources
|
||||
COMMAND ${CMAKE_COMMAND} -E copy
|
||||
# TODO: Make the runtime library work for windows.
|
||||
# TODO: Use $<TARGET-FILE:> for this.
|
||||
${CMAKE_BINARY_DIR}/lib/libNPCOMPCompilerRuntimeShlib${CMAKE_SHARED_LIBRARY_SUFFIX}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/npcomp/compiler/generic/backend/libNPCOMPCompilerRuntimeShlib${CMAKE_SHARED_LIBRARY_SUFFIX}
|
||||
${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_core/npcomp/compiler/generic/backend/libNPCOMPCompilerRuntimeShlib${CMAKE_SHARED_LIBRARY_SUFFIX}
|
||||
)
|
||||
add_dependencies(NPCOMPPythonResources
|
||||
NPCOMPCompilerRuntimeShlib
|
||||
|
@ -17,69 +21,123 @@ add_dependencies(NPCOMPPythonResources
|
|||
|
||||
|
||||
################################################################################
|
||||
# Manage python source files
|
||||
################################################################################
|
||||
npcomp_python_create_symlinks(${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
################################################################################
|
||||
# Native extensions
|
||||
# Declare sources
|
||||
################################################################################
|
||||
|
||||
# Normally on unix-like platforms, extensions are built as "MODULE" libraries
|
||||
# and do not explicitly link to the python shared object. This allows for
|
||||
# come greater deployment flexibility since the extension will bind to
|
||||
# symbols in the python interpreter on load. However, it also keeps the
|
||||
# linker from erroring on undefined symbols, leaving this to (usually obtuse)
|
||||
# runtime errors. Building in "SHARED" mode with an explicit link to the
|
||||
# python libraries allows us to build with the expectation of no undefined
|
||||
# symbols, which is better for development.
|
||||
# TODO(laurenzo): Windows requires linking against the PYTHON_LIBRARIES
|
||||
# TODO(laurenzo): OSX requires allowing undefined (-undefined dynamic_lookup)
|
||||
if(NPCOMP_PYTHON_BINDINGS_VERSION_LOCKED)
|
||||
set(NPCOMP_PYEXT_LINK_MODE SHARED)
|
||||
set(NPCOMP_PYEXT_LIBADD ${Python3_LIBRARIES})
|
||||
else()
|
||||
set(NPCOMP_PYEXT_LINK_MODE MODULE)
|
||||
set(NPCOMP_PYEXT_LIBADD)
|
||||
endif()
|
||||
|
||||
set(_addl_extension_sources)
|
||||
if(NPCOMP_ENABLE_REFJIT)
|
||||
list(APPEND NPCOMP_PYEXT_LIBADD
|
||||
NPCOMPBackendRefJITPythonModule
|
||||
)
|
||||
list(APPEND _addl_extension_sources "${CMAKE_CURRENT_SOURCE_DIR}/RefJITBackend.cpp")
|
||||
endif()
|
||||
|
||||
add_library(NPCOMPNativePyExt ${NPCOMP_PYEXT_LINK_MODE}
|
||||
NpcompModule.cpp
|
||||
declare_mlir_python_sources(NPCOMPPythonSources
|
||||
ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
SOURCES
|
||||
npcomp/__init__.py
|
||||
npcomp/decorators.py
|
||||
npcomp/exporter.py
|
||||
npcomp/smoketest.py
|
||||
npcomp/types.py
|
||||
npcomp/dialects/_ods_common.py
|
||||
SOURCES_GLOB
|
||||
npcomp/compiler/*.py
|
||||
npcomp/frontends/*.py
|
||||
npcomp/torch/*.py
|
||||
npcomp/tracing/*.py
|
||||
npcomp/utils/*.py
|
||||
)
|
||||
declare_mlir_python_sources(NPCOMPPythonSources.Dialects
|
||||
ADD_TO_PARENT NPCOMPPythonSources
|
||||
)
|
||||
declare_mlir_python_sources(NPCOMPPythonExtensions)
|
||||
|
||||
declare_mlir_python_extension(NPCOMPPythonExtensions.Core
|
||||
MODULE_NAME _npcomp
|
||||
ADD_TO_PARENT NPCOMPPythonExtensions
|
||||
SOURCES
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/NpcompModule.cpp
|
||||
${_addl_extension_sources}
|
||||
EMBED_CAPI_LINK_LIBS
|
||||
NPCOMPCAPI
|
||||
PRIVATE_LINK_LIBS
|
||||
LLVMSupport
|
||||
)
|
||||
|
||||
set_target_properties(NPCOMPNativePyExt PROPERTIES LIBRARY_OUTPUT_DIRECTORY
|
||||
"${CMAKE_CURRENT_BINARY_DIR}")
|
||||
set_target_properties(NPCOMPNativePyExt PROPERTIES OUTPUT_NAME _npcomp)
|
||||
set_target_properties(NPCOMPNativePyExt PROPERTIES PREFIX
|
||||
"${PYTHON_MODULE_PREFIX}")
|
||||
set_target_properties(NPCOMPNativePyExt PROPERTIES SUFFIX
|
||||
"${PYTHON_MODULE_EXTENSION}")
|
||||
################################################################################
|
||||
# Declare dialects
|
||||
################################################################################
|
||||
|
||||
# pybind requires binding code to be compiled with -fvisibility=hidden
|
||||
# Better code can be generated if the entire project compiles that way, but
|
||||
# that is not enforced here.
|
||||
set_target_properties(NPCOMPNativePyExt PROPERTIES CXX_VISIBILITY_PRESET "hidden")
|
||||
declare_mlir_dialect_python_bindings(
|
||||
ADD_TO_PARENT NPCOMPPythonSources.Dialects
|
||||
ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
TD_FILE npcomp/dialects/BasicpyBind.td
|
||||
SOURCES npcomp/dialects/basicpy.py
|
||||
DIALECT_NAME basicpy)
|
||||
|
||||
get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
|
||||
get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
|
||||
declare_mlir_dialect_python_bindings(
|
||||
ADD_TO_PARENT NPCOMPPythonSources.Dialects
|
||||
ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
TD_FILE npcomp/dialects/NumpyBind.td
|
||||
SOURCES npcomp/dialects/numpy.py
|
||||
DIALECT_NAME numpy)
|
||||
|
||||
target_link_libraries(NPCOMPNativePyExt
|
||||
PRIVATE
|
||||
# Transitive dep on the shared library links most things from there.
|
||||
NPCOMP
|
||||
declare_mlir_dialect_python_bindings(
|
||||
ADD_TO_PARENT NPCOMPPythonSources.Dialects
|
||||
ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
TD_FILE npcomp/dialects/TCFBind.td
|
||||
SOURCES npcomp/dialects/tcf.py
|
||||
DIALECT_NAME tcf)
|
||||
|
||||
NPCOMPInitAll
|
||||
${NPCOMP_PYEXT_LIBADD}
|
||||
declare_mlir_dialect_python_bindings(
|
||||
ADD_TO_PARENT NPCOMPPythonSources.Dialects
|
||||
ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
TD_FILE npcomp/dialects/TorchBind.td
|
||||
SOURCES npcomp/dialects/torch.py
|
||||
DIALECT_NAME torch)
|
||||
|
||||
################################################################################
|
||||
# Build composite binaries
|
||||
################################################################################
|
||||
|
||||
# Bundle our own, self-contained CAPI library with all of our deps.
|
||||
add_mlir_python_common_capi_library(NPCOMPPythonCAPI
|
||||
INSTALL_COMPONENT NPCOMPPythonModules
|
||||
INSTALL_DESTINATION python_packages/npcomp_core/mlir/_mlir_libs
|
||||
# NOTE: When the MLIR API is relocated under npcomp, this would change to
|
||||
# .../npcomp/_mlir_libs
|
||||
OUTPUT_DIRECTORY "${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_core/mlir/_mlir_libs"
|
||||
RELATIVE_INSTALL_ROOT "../../../.."
|
||||
DECLARED_SOURCES
|
||||
# TODO: This can be chopped down significantly for size.
|
||||
MLIRPythonSources
|
||||
MLIRPythonExtension.AllPassesRegistration
|
||||
NPCOMPPythonSources
|
||||
NPCOMPPythonExtensions
|
||||
)
|
||||
npcomp_python_target_compile_options(NPCOMPNativePyExt)
|
||||
|
||||
mlir_check_all_link_libraries(NPCOMPNativePyExt)
|
||||
# Bundle the MLIR python sources into our package.
|
||||
# The MLIR API is position independent, so we explicitly output it to the mlir/
|
||||
# folder as a temporary measure. It will eventually migrate under the npcomp/
|
||||
# folder and be accessible under the unified "import npcomp..." namespace.
|
||||
add_mlir_python_modules(NPCOMPMLIRPythonModules
|
||||
ROOT_PREFIX "${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_core/mlir"
|
||||
INSTALL_PREFIX "python_packages/npcomp_core/mlir"
|
||||
DECLARED_SOURCES
|
||||
MLIRPythonSources
|
||||
MLIRPythonExtension.AllPassesRegistration
|
||||
# We need the npcomp extensions co-located with the MLIR extensions. When
|
||||
# the namespace is unified, this moves to the below.
|
||||
NPCOMPPythonExtensions
|
||||
COMMON_CAPI_LINK_LIBS
|
||||
NPCOMPPythonCAPI
|
||||
)
|
||||
|
||||
# Bundle the NPCOMP python sources into our package.
|
||||
add_mlir_python_modules(NPCOMPPythonModules
|
||||
ROOT_PREFIX "${MLIR_NPCOMP_PYTHON_PACKAGES_DIR}/npcomp_core"
|
||||
INSTALL_PREFIX "python_packages/npcomp_core"
|
||||
DECLARED_SOURCES
|
||||
NPCOMPPythonSources
|
||||
COMMON_CAPI_LINK_LIBS
|
||||
NPCOMPPythonCAPI
|
||||
)
|
||||
|
||||
# Order dependent: Built artifacts add dependencies to the above targets.
|
||||
add_subdirectory(npcomp/dialects)
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
#include <cstddef>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "./NpcompModule.h"
|
||||
#include "./NpcompPybindUtils.h"
|
||||
|
||||
#include "mlir-c/BuiltinAttributes.h"
|
||||
#include "mlir-c/BuiltinTypes.h"
|
||||
#include "mlir-c/Diagnostics.h"
|
||||
|
@ -16,11 +19,6 @@
|
|||
#include "npcomp-c/InitLLVM.h"
|
||||
#include "npcomp-c/NumpyTypes.h"
|
||||
#include "npcomp-c/Registration.h"
|
||||
#include "npcomp/Python/PybindUtils.h"
|
||||
|
||||
#ifdef NPCOMP_ENABLE_REFJIT
|
||||
#include "npcomp/Backend/RefJIT/PythonModule.h"
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- PythonModule.h - IREE python bindings ------------------------------===//
|
||||
//===- NpcompModule.h - Headers for the python module ---------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
|
@ -6,10 +6,10 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef NPCOMP_BACKEND_REFJIT_PYTHON_MODULE_H
|
||||
#define NPCOMP_BACKEND_REFJIT_PYTHON_MODULE_H
|
||||
#ifndef NPCOMP_PYTHON_NPCOMP_MODULE_H
|
||||
#define NPCOMP_PYTHON_NPCOMP_MODULE_H
|
||||
|
||||
#include "npcomp/Python/PybindUtils.h"
|
||||
#include "./NpcompPybindUtils.h"
|
||||
|
||||
namespace npcomp {
|
||||
namespace python {
|
||||
|
@ -20,4 +20,4 @@ void defineBackendRefJitModule(py::module &m);
|
|||
} // namespace python
|
||||
} // namespace npcomp
|
||||
|
||||
#endif // NPCOMP_BACKEND_REFJIT_PYTHON_MODULE_H
|
||||
#endif // NPCOMP_PYTHON_NPCOMP_MODULE_H
|
|
@ -1,4 +1,4 @@
|
|||
//===- PybindUtils.h - Utilities for interop with python ------------------===//
|
||||
//===- NpcompPybindUtils.h - Utilities for interop with python ------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
|
@ -6,8 +6,10 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef NPCOMP_PYTHON_PYBIND_UTILS_H
|
||||
#define NPCOMP_PYTHON_PYBIND_UTILS_H
|
||||
// TODO: Most of this lives upstream now and should be taken from there.
|
||||
|
||||
#ifndef NPCOMP_PYTHON_NPCOMP_PYBIND_UTILS_H
|
||||
#define NPCOMP_PYTHON_NPCOMP_PYBIND_UTILS_H
|
||||
|
||||
#include <string>
|
||||
|
||||
|
@ -192,4 +194,4 @@ inline pybind11::error_already_set raiseValueError(const std::string &message) {
|
|||
|
||||
} // namespace pybind11
|
||||
|
||||
#endif // NPCOMP_PYTHON_PYBIND_UTILS_H
|
||||
#endif // NPCOMP_PYTHON_NPCOMP_PYBIND_UTILS_H
|
|
@ -0,0 +1,164 @@
|
|||
//===- PythonModule.cpp - RefJIT python bindings --------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "./NpcompModule.h"
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include "pybind11/numpy.h"
|
||||
|
||||
#include "npcomp-c/RefJITBackend.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
|
||||
using llvm::SmallVector;
|
||||
using llvm::StringRef;
|
||||
using llvm::Twine;
|
||||
|
||||
static NpcompRefJitElementType
|
||||
mapBufferFormatToElementType(const std::string &format, py::ssize_t itemSize) {
|
||||
if (format == "f")
|
||||
return NPCOMP_REFJIT_F32;
|
||||
|
||||
std::string message("unsupported buffer format: ");
|
||||
message.append(format);
|
||||
throw py::raiseValueError(message);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct PyRefJitModule {
|
||||
PyRefJitModule(NpcompRefJitModule instance) : instance(instance) {}
|
||||
~PyRefJitModule() {
|
||||
if (instance.ptr)
|
||||
npcompRefJitModuleDestroy(instance);
|
||||
}
|
||||
PyRefJitModule(const PyRefJitModule &) = delete;
|
||||
void operator=(const PyRefJitModule &) = delete;
|
||||
PyRefJitModule(PyRefJitModule &&other) : instance(other.instance) {
|
||||
other.instance.ptr = nullptr;
|
||||
}
|
||||
|
||||
NpcompRefJitModule instance = {nullptr};
|
||||
};
|
||||
|
||||
struct PyRefJitValueList {
|
||||
PyRefJitValueList(NpcompRefJitValueList instance) : instance(instance) {}
|
||||
~PyRefJitValueList() {
|
||||
if (instance.ptr)
|
||||
npcompRefJitValueListDestroy(instance);
|
||||
}
|
||||
PyRefJitValueList(const PyRefJitValueList &) = delete;
|
||||
void operator=(const PyRefJitValueList &) = delete;
|
||||
PyRefJitValueList(PyRefJitValueList &&other) : instance(other.instance) {
|
||||
other.instance.ptr = nullptr;
|
||||
}
|
||||
|
||||
NpcompRefJitValueList instance = {nullptr};
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
void npcomp::python::defineBackendRefJitModule(py::module &m) {
|
||||
m.def("build_backend_compilation_pipeline", [](MlirPassManager capiPm) {
|
||||
npcompRefJitBuildBackendCompilationPipeline(capiPm, /*optimize=*/true);
|
||||
});
|
||||
py::class_<PyRefJitValueList>(m, "ValueList");
|
||||
py::class_<PyRefJitModule>(m, "JITModule")
|
||||
.def_static(
|
||||
"from_compiled_module",
|
||||
[](MlirModule capiModule,
|
||||
std::vector<std::string> pySharedLibs) -> PyRefJitModule {
|
||||
SmallVector<MlirStringRef, 4> sharedLibs;
|
||||
for (auto &s : pySharedLibs)
|
||||
sharedLibs.push_back(MlirStringRef{s.data(), s.size()});
|
||||
char *errorMessageCstr;
|
||||
NpcompRefJitModule m =
|
||||
npcompRefJitModuleCreate(capiModule, &sharedLibs[0],
|
||||
sharedLibs.size(), &errorMessageCstr);
|
||||
if (npcompRefJitModuleIsNull(m)) {
|
||||
std::string errorMessage(errorMessageCstr);
|
||||
std::free(errorMessageCstr);
|
||||
throw py::raisePyError(PyExc_RuntimeError, errorMessage.c_str());
|
||||
}
|
||||
return PyRefJitModule(m);
|
||||
},
|
||||
py::arg("module"), py::arg("shared_libs"))
|
||||
.def(
|
||||
"invoke",
|
||||
[](PyRefJitModule &self, std::string functionName,
|
||||
std::vector<py::buffer> inputs) {
|
||||
py::object ioListObject =
|
||||
py::cast(PyRefJitValueList(npcompRefJitValueListCreate()));
|
||||
PyRefJitValueList &ioList =
|
||||
py::cast<PyRefJitValueList &>(ioListObject);
|
||||
|
||||
// Prepare inputs.
|
||||
for (auto &buffer : inputs) {
|
||||
// Request a C contiguous view as that is what Tensor accepts now
|
||||
// (no strides or non row-major layout).
|
||||
int flags = PyBUF_C_CONTIGUOUS | PyBUF_FORMAT;
|
||||
std::unique_ptr<Py_buffer> view(new Py_buffer());
|
||||
if (PyObject_GetBuffer(buffer.ptr(), view.get(), flags) != 0) {
|
||||
throw py::error_already_set();
|
||||
}
|
||||
py::buffer_info info(view.release());
|
||||
auto elementType =
|
||||
mapBufferFormatToElementType(info.format, info.itemsize);
|
||||
SmallVector<int32_t, 4> extents(info.shape.begin(),
|
||||
info.shape.end());
|
||||
|
||||
npcompRefJitValueAddTensorCopy(ioList.instance, elementType,
|
||||
extents.data(), extents.size(),
|
||||
info.ptr);
|
||||
}
|
||||
|
||||
// Invoke.
|
||||
char *errorMessageCstr;
|
||||
if (!npcompRefJitModuleInvoke(
|
||||
self.instance,
|
||||
MlirStringRef{functionName.data(), functionName.size()},
|
||||
ioList.instance, &errorMessageCstr)) {
|
||||
std::string errorMessage(errorMessageCstr);
|
||||
std::free(errorMessageCstr);
|
||||
throw py::raisePyError(PyExc_RuntimeError, errorMessage.c_str());
|
||||
}
|
||||
|
||||
// Prepare outputs.
|
||||
std::vector<py::object> outputs;
|
||||
for (intptr_t i = 0; i < npcompRefJitValueListSize(ioList.instance);
|
||||
++i) {
|
||||
if (npcompRefJitValueIsaTensor(ioList.instance, i)) {
|
||||
NpcompRefJitElementType elementType;
|
||||
intptr_t rank;
|
||||
const int32_t *extents;
|
||||
void *data = npcompRefJitValueGetTensor(
|
||||
ioList.instance, i, &elementType, &rank, &extents);
|
||||
|
||||
const char *format;
|
||||
switch (elementType) {
|
||||
case NPCOMP_REFJIT_F32:
|
||||
format = "f";
|
||||
break;
|
||||
default:
|
||||
throw py::raiseValueError("unsupported tensor element type");
|
||||
}
|
||||
|
||||
outputs.push_back(
|
||||
py::array(py::dtype(format),
|
||||
llvm::ArrayRef<std::int32_t>(extents, rank), data,
|
||||
/*base=*/ioListObject));
|
||||
} else {
|
||||
throw py::raisePyError(PyExc_ValueError,
|
||||
"unsupported npcomp refjit return type");
|
||||
}
|
||||
}
|
||||
return outputs;
|
||||
},
|
||||
py::arg("function_name"), py::arg("inputs"));
|
||||
}
|
|
@ -1,22 +1,7 @@
|
|||
def _load_extension():
|
||||
# TODO: Remote the RTLD_GLOBAL hack once local, cross module imports
|
||||
# resolve symbols properly. Something is keeping the dynamic loader on
|
||||
# Linux from treating the following vague symbols as the same across
|
||||
# _mlir and _npcomp:
|
||||
# mlir::detail::TypeIDExported::get<mlir::FuncOp>()::instance
|
||||
import sys
|
||||
import ctypes
|
||||
flags = sys.getdlopenflags()
|
||||
sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL)
|
||||
import _npcomp
|
||||
sys.setdlopenflags(flags)
|
||||
from mlir import _cext_loader
|
||||
_cext_loader._cext.globals.append_dialect_search_prefix("npcomp.dialects")
|
||||
|
||||
from mlir._cext_loader import _cext
|
||||
_cext.globals.append_dialect_search_prefix("npcomp.dialects")
|
||||
return _npcomp
|
||||
|
||||
|
||||
_cext = _load_extension()
|
||||
_cext = _cext_loader._load_extension("_npcomp")
|
||||
_cext._register_all_passes()
|
||||
_cext._initialize_llvm_codegen()
|
||||
|
||||
|
|
|
@ -18,9 +18,10 @@ def get_refjit():
|
|||
global _refjit
|
||||
if _refjit is not None:
|
||||
return _refjit
|
||||
from .... import _cext
|
||||
try:
|
||||
from _npcomp.backend import refjit as imported_refjit
|
||||
except ImportError:
|
||||
imported_refjit = _cext.backend.refjit
|
||||
except AttributeError:
|
||||
raise ImportError(
|
||||
"The npcomp native module was not compiled with refjit support")
|
||||
_refjit = imported_refjit
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
function(_add_dialect target td_file bind_name)
|
||||
set(LLVM_TARGET_DEFINITIONS ${td_file})
|
||||
mlir_tablegen("${bind_name}.py" -gen-python-op-bindings -bind-dialect=${bind_name})
|
||||
add_public_tablegen_target(${target})
|
||||
add_dependencies(NPCOMPNativePyExt ${target})
|
||||
endfunction()
|
||||
|
||||
_add_dialect(NPCOMPPyDialectBasicpy BasicpyBind.td "basicpy")
|
||||
_add_dialect(NPCOMPPyDialectNumpy NumpyBind.td "numpy")
|
||||
_add_dialect(NPCOMPPyDialectTCF TCFBind.td "tcf")
|
||||
_add_dialect(NPCOMPPyDialectTorch TorchBind.td "torch")
|
|
@ -0,0 +1,5 @@
|
|||
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
# See https://llvm.org/LICENSE.txt for license information.
|
||||
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
|
||||
from ._basicpy_ops_gen import *
|
|
@ -0,0 +1,5 @@
|
|||
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
# See https://llvm.org/LICENSE.txt for license information.
|
||||
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
|
||||
from ._numpy_ops_gen import *
|
|
@ -0,0 +1,5 @@
|
|||
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
# See https://llvm.org/LICENSE.txt for license information.
|
||||
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
|
||||
from ._tcf_ops_gen import *
|
|
@ -0,0 +1,5 @@
|
|||
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
# See https://llvm.org/LICENSE.txt for license information.
|
||||
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
|
||||
from ._torch_ops_gen import *
|
|
@ -3,4 +3,5 @@ llvm_update_compile_flags(npcomp-capi-ir-test)
|
|||
|
||||
target_link_libraries(npcomp-capi-ir-test
|
||||
PRIVATE
|
||||
NPCOMP)
|
||||
NPCOMPCAPI
|
||||
)
|
||||
|
|
|
@ -16,7 +16,8 @@ set(NPCOMP_TEST_DEPENDS
|
|||
npcomp-capi-ir-test
|
||||
npcomp-opt
|
||||
npcomp-run-mlir
|
||||
NPCOMPNativePyExt
|
||||
NPCOMPPythonModules
|
||||
NPCOMPMLIRPythonModules
|
||||
)
|
||||
|
||||
add_lit_testsuite(check-npcomp-lit "Running the npcomp regression tests"
|
||||
|
|
|
@ -6,8 +6,7 @@ import sys
|
|||
|
||||
print(f"PYTHONPATH={sys.path}")
|
||||
|
||||
import mlir
|
||||
import mlir.ir
|
||||
import npcomp
|
||||
import _npcomp
|
||||
|
||||
print("Extensions all loaded")
|
||||
|
|
|
@ -57,8 +57,7 @@ config.npcomp_runtime_shlib = os.path.join(
|
|||
npcomp_python_dir = "python" if config.npcomp_built_standalone else "tools/npcomp/python"
|
||||
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
|
||||
llvm_config.with_environment('PYTHONPATH', [
|
||||
os.path.join(config.llvm_obj_root, "python"),
|
||||
os.path.join(config.npcomp_obj_root, npcomp_python_dir)],
|
||||
os.path.join(config.npcomp_python_packages_dir, 'npcomp_core')],
|
||||
append_path=True)
|
||||
|
||||
tool_dirs = [
|
||||
|
|
|
@ -6,13 +6,15 @@ config.host_triple = "@LLVM_HOST_TRIPLE@"
|
|||
config.target_triple = "@TARGET_TRIPLE@"
|
||||
config.llvm_src_root = "@LLVM_SOURCE_DIR@"
|
||||
config.llvm_obj_root = "@LLVM_BINARY_DIR@"
|
||||
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
|
||||
# TODO: Fix tools dir to find FileCheck.
|
||||
#config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
|
||||
config.llvm_tools_dir = "@LLVM_BINARY_DIR@/bin"
|
||||
config.llvm_lib_dir = "@LLVM_LIBRARY_DIR@"
|
||||
config.llvm_shlib_dir = "@SHLIBDIR@"
|
||||
config.llvm_shlib_ext = "@SHLIBEXT@"
|
||||
config.llvm_exe_ext = "@EXEEXT@"
|
||||
config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@"
|
||||
config.python_executable = "@PYTHON_EXECUTABLE@"
|
||||
config.python_executable = "@Python3_EXECUTABLE@"
|
||||
config.gold_executable = "@GOLD_EXECUTABLE@"
|
||||
config.ld64_executable = "@LD64_EXECUTABLE@"
|
||||
config.enable_shared = @ENABLE_SHARED@
|
||||
|
@ -31,6 +33,7 @@ config.host_arch = "@HOST_ARCH@"
|
|||
config.npcomp_src_root = "@CMAKE_SOURCE_DIR@"
|
||||
config.npcomp_obj_root = "@CMAKE_BINARY_DIR@"
|
||||
config.npcomp_built_standalone = bool("@NPCOMP_BUILT_STANDALONE@")
|
||||
config.npcomp_python_packages_dir = "@MLIR_NPCOMP_PYTHON_PACKAGES_DIR@"
|
||||
|
||||
# Optional features.
|
||||
config.npcomp_enable_iree = @NPCOMP_ENABLE_IREE@
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
add_subdirectory(npcomp-opt)
|
||||
add_subdirectory(npcomp-run-mlir)
|
||||
add_subdirectory(npcomp-shlib)
|
||||
|
|
|
@ -2,10 +2,16 @@
|
|||
# binaries with the python packages for hacking/debugging.
|
||||
add_npcomp_executable(npcomp-opt npcomp-opt.cpp)
|
||||
|
||||
get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
|
||||
get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
|
||||
|
||||
target_link_libraries(npcomp-opt PRIVATE
|
||||
# Shared library deps first ensure we get most of what we need from libraries.
|
||||
NPCOMP
|
||||
MLIR
|
||||
MLIROptLib
|
||||
NPCOMPInitAll
|
||||
|
||||
# TODO: Remove these in favor of interface deps.
|
||||
${dialect_libs}
|
||||
${conversion_libs}
|
||||
)
|
||||
|
||||
mlir_check_all_link_libraries(npcomp-opt)
|
||||
|
|
|
@ -10,11 +10,8 @@ add_npcomp_executable(npcomp-run-mlir
|
|||
|
||||
llvm_update_compile_flags(npcomp-run-mlir)
|
||||
target_link_libraries(npcomp-run-mlir PRIVATE
|
||||
# Shared library deps first ensure we get most of what we need from libraries.
|
||||
NPCOMP
|
||||
MLIR
|
||||
|
||||
NPCOMPCAPI
|
||||
NPCOMPInitAll
|
||||
MLIRAnalysis
|
||||
MLIRIR
|
||||
MLIRJitRunner
|
||||
|
@ -22,6 +19,8 @@ target_link_libraries(npcomp-run-mlir PRIVATE
|
|||
MLIRSupport
|
||||
NPCOMPInitAll
|
||||
NPCOMPRefBackendJITHelpers
|
||||
|
||||
# TODO: Remove these in favor of interface deps.
|
||||
${conversion_libs}
|
||||
${dialect_libs}
|
||||
)
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
# Building libmlir-cpp.so fails if LLVM_ENABLE_PIC=Off
|
||||
if (NOT LLVM_ENABLE_PIC)
|
||||
message(WARNING "Not building NPCOMP dylib because PIC is disabled")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# Building libmlir-cpp.so may not work on MSVC
|
||||
if (MSVC)
|
||||
message(WARNING "Not building NPCOMP dylib because not yet supported on MSVC")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(NOT NPCOMP_BUILD_NPCOMP_DYLIB)
|
||||
message(WARNING "Not building NPCOMP dylib (not NPCOMP_BUILD_NPCOMP_DYLIB): Fully static builds not yet supported")
|
||||
return()
|
||||
endif()
|
||||
|
||||
get_property(npcomp_libs GLOBAL PROPERTY NPCOMP_STATIC_LIBS)
|
||||
list(REMOVE_DUPLICATES npcomp_libs)
|
||||
|
||||
# Populate _OBJECTS and _DEPS as necessary per platform.
|
||||
foreach (lib ${npcomp_libs})
|
||||
if(XCODE)
|
||||
# Xcode doesn't support object libraries, so we have to trick it into
|
||||
# linking the static libraries instead.
|
||||
list(APPEND _DEPS "-force_load" ${lib})
|
||||
else()
|
||||
list(APPEND _OBJECTS $<TARGET_OBJECTS:obj.${lib}>)
|
||||
endif()
|
||||
# Add transitive deps explcitly since otherwise, there would just be
|
||||
# objects.
|
||||
list(APPEND _DEPS $<TARGET_PROPERTY:${lib},LINK_LIBRARIES>)
|
||||
endforeach()
|
||||
|
||||
# Note: Does not use add_npcomp_library, which is used for things that go
|
||||
# *into* the libNPCOMP.so. This is building the shared library, so use
|
||||
# a higher-level rule.
|
||||
llvm_add_library(
|
||||
NPCOMP
|
||||
SHARED
|
||||
npcomp-shlib.cpp
|
||||
${_OBJECTS}
|
||||
LINK_LIBS PUBLIC
|
||||
# Public dependencies on the MLIR public API and impl shared libraries.
|
||||
MLIRPythonCAPI
|
||||
MLIR
|
||||
${_DEPS}
|
||||
)
|
||||
target_link_libraries(NPCOMP PRIVATE ${LLVM_PTHREAD_LIB})
|
|
@ -1 +0,0 @@
|
|||
// Intentionally empty source file to make CMake happy
|
Loading…
Reference in New Issue