torch-mlir/CMakeLists.txt

260 lines
11 KiB
CMake
Raw Normal View History

cmake_minimum_required(VERSION 3.13.4)
2020-04-27 07:26:45 +08:00
2021-08-12 15:10:54 +08:00
# CMP0116: Ninja generators transform `DEPFILE`s from `add_custom_command()`
# New in CMake 3.20. https://cmake.org/cmake/help/latest/policy/CMP0116.html
if(POLICY CMP0116)
cmake_policy(SET CMP0116 OLD)
endif()
2020-04-27 07:26:45 +08:00
if(POLICY CMP0068)
cmake_policy(SET CMP0068 NEW)
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
endif()
if(POLICY CMP0075)
cmake_policy(SET CMP0075 NEW)
endif()
if(POLICY CMP0077)
cmake_policy(SET CMP0077 NEW)
endif()
#-------------------------------------------------------------------------------
# Options and settings
#-------------------------------------------------------------------------------
set(NPCOMP_MINIMUM_PYTHON_VERSION 3.6)
option(NPCOMP_ENABLE_IREE "Enables the IREE backend (must configure location via IREE_DIR)." OFF)
option(NPCOMP_ENABLE_REFJIT "Enables the reference JIT backend." ON)
set(NPCOMP_IREE_BUILDDIR "../iree-build" CACHE STRING "If building IREE, then setting this elects to build from a source directory (versus installed package)")
option(NPCOMP_ENABLE_PYTORCH "Enables PyTorch integration." OFF)
# Turn on -gsplit-dwarf if requested in debug builds.
if (NPCOMP_USE_SPLIT_DWARF AND
((CMAKE_BUILD_TYPE STREQUAL "Debug") OR
(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")))
# Limit to clang and gcc so far. Add compilers supporting this option.
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR
CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
add_compile_options(-gsplit-dwarf)
endif()
endif()
#-------------------------------------------------------------------------------
# MSVC defaults
#-------------------------------------------------------------------------------
if(MSVC)
add_compile_options(
$<$<CONFIG:>:/MD>
$<$<CONFIG:Debug>:/MD>
$<$<CONFIG:Release>:/MD>
)
endif()
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
# If we are not building as a part of LLVM, build NPCOMP as a standalone
# project, including LLVM as a subdirectory. This gives us the most control
# and is used for standalone releases.
#-------------------------------------------------------------------------------
# Project setup and globals
#-------------------------------------------------------------------------------
project(npcomp LANGUAGES CXX C)
set(CMAKE_C_STANDARD 11)
set(CMAKE_CXX_STANDARD 14)
2020-04-27 07:26:45 +08:00
#-------------------------------------------------------------------------------
# Default and required options.
#-------------------------------------------------------------------------------
# CMake library generation settings.
set(BUILD_SHARED_LIBS OFF CACHE BOOL "We are actually building a static mondo-lib")
set(CMAKE_PLATFORM_NO_VERSIONED_SONAME ON CACHE BOOL "Python soname linked libraries are bad")
set(CMAKE_VISIBILITY_INLINES_HIDDEN ON CACHE BOOL "Hide inlines")
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
# Improved LLVM defaults for a standalone build.
set(LLVM_ENABLE_PROJECTS mlir CACHE STRING "LLVM projects")
set(LLVM_ENABLE_Z3_SOLVER OFF CACHE BOOL "Disable Z3")
set(LLVM_ENABLE_ZLIB OFF CACHE BOOL "Disable ZLIB")
set(LLVM_TARGETS_TO_BUILD "host" CACHE STRING "Only build for the host")
set(LLVM_INCLUDE_EXAMPLES OFF CACHE BOOL "Disable examples")
# TODO: MLIR is a "tool"
set(LLVM_INCLUDE_TOOLS ON CACHE BOOL "Disable tools")
set(LLVM_INCLUDE_TESTS ON CACHE BOOL "Disable tests")
set(MLIR_BINDINGS_PYTHON_LOCK_VERSION ON CACHE BOOL "Link against libpython for development (should be disabled for production)")
# Required LLVM settings.
set(MLIR_ENABLE_BINDINGS_PYTHON ON CACHE BOOL "Enable MLIR python bindings" FORCE)
#-------------------------------------------------------------------------------
# MLIR/LLVM Build Setup
# TODO: It would be nice to have a better setup than this for sub including
# MLIR.
#-------------------------------------------------------------------------------
if(NOT LLVM_MAIN_SRC_DIR)
set(LLVM_MAIN_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/llvm-project/llvm")
endif()
if(NOT MLIR_MAIN_SRC_DIR)
set(MLIR_MAIN_SRC_DIR "${LLVM_MAIN_SRC_DIR}/../mlir")
endif()
set(LLVM_MAIN_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/llvm")
set(MLIR_MAIN_BINARY_DIR "${LLVM_MAIN_BINARY_DIR}/tools/mlir")
set(LLVM_INCLUDE_DIR "${LLVM_MAIN_SRC_DIR}/include")
set(LLVM_GENERATED_INCLUDE_DIR "${LLVM_MAIN_BINARY_DIR}/include")
set(MLIR_INCLUDE_DIR "${MLIR_MAIN_SRC_DIR}/include")
set(MLIR_GENERATED_INCLUDE_DIR "${MLIR_MAIN_BINARY_DIR}/include")
set(MLIR_TABLEGEN_EXE "$<TARGET_FILE:mlir-tblgen>")
include_directories(SYSTEM "${LLVM_INCLUDE_DIR}")
include_directories(SYSTEM "${LLVM_GENERATED_INCLUDE_DIR}")
include_directories(SYSTEM "${MLIR_INCLUDE_DIR}")
include_directories(SYSTEM "${MLIR_GENERATED_INCLUDE_DIR}")
list(APPEND CMAKE_MODULE_PATH "${MLIR_MAIN_SRC_DIR}/cmake/modules")
list(APPEND CMAKE_MODULE_PATH "${LLVM_MAIN_SRC_DIR}/cmake/modules")
# Pre-configure the Python environment using the MLIR macros so that they
# are in scope and subsequent include of LLVM will match them.
include(MLIRDetectPythonEnv)
find_package(Python3 ${NPCOMP_MINIMUM_PYTHON_VERSION}
COMPONENTS Interpreter Development NumPy REQUIRED)
message(STATUS "Found python include dirs: ${Python3_INCLUDE_DIRS}")
message(STATUS "Found python libraries: ${Python3_LIBRARIES}")
mlir_detect_pybind11_install()
find_package(pybind11 2.6 CONFIG REQUIRED)
message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIR}")
message(STATUS "Python prefix = '${PYTHON_MODULE_PREFIX}', "
"suffix = '${PYTHON_MODULE_SUFFIX}', "
"extension = '${PYTHON_MODULE_EXTENSION}")
[torch-mlir earthmoving (1/N)] C/C++ code movement. This creates the `external/torch-mlir` directory as an LLVM_EXTERNAL_PROJECTS-compatible project (analogous to `iree-dialects`) and completes movement/rename of all pure MLIR C/C++ compiler code into there. The next step will be to move all the Python code / code that links/includes PyTorch C++ code (which currently lives in `frontends/pytorch`) into a subdirectory here. I call this "earthmoving" because it is mostly mechanical changes and renames. As a quick summary (we can change this down the road easily) - C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch` - CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet` - preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_` - CMake `NPCOMPFoo -> TorchMLIRFoo` The goal of this is to create a standalone project creating a center of mass for entry into the MLIR ecosystem from PyTorch, suitable in scope for eventual inclusion/ownership in PyTorch. The idea is that `external/torch-mlir` will some day be pulled out into its own repository, and then npcomp will simply pull it in as a submodule. Layering-wise, what lives in `torch-mlir` lowers code from PyTorch (currently TorchScript, but TorchFX or pytorch/xla-style tracing are possible extensions) down to what we have been calling the "Torch backend contract" which is cleaned up IR (inlining, simplifcation, conversion to value tensors, ...) entirely in the `torch` dialect. This is the branching off point for further lowering, of which npcomp takes one opinion (outside `torch-mlir` of course!), namely the `TorchConversion` dialect/transforms which lower to IR suitable for IREE and other linalg-on-tensors based lower-level compilers. Summary of changes: - move `{include,lib,test}/Dialect/Torch` into `torch-mlir` - move relevant parts of CAPI into `torch-mlir`. - leave a few things related to the `torch-mlir` Python build commented out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
# Include LLVM_EXTERNAL_PROJECTS.
set(LLVM_EXTERNAL_PROJECTS "iree-dialects;torch-mlir")
Add TorchToIREE and factor out TorchConversion dialect. This converts a basic list op (torch.prim.ListConstruct) to the IREE dialect. ``` def forward(self, x: float): return [x, x] ``` turns into: ``` builtin.func @forward(%arg0: !torch.float) -> !torch.list<!torch.float> { %0 = torch.prim.ListConstruct %arg0, %arg0 : (!torch.float, !torch.float) -> !torch.list<!torch.float> return %0 : !torch.list<!torch.float> } ``` which turns into: ``` builtin.func @forward(%arg0: f64) -> !iree.list<f64> { %c1 = constant 1 : index %c0 = constant 0 : index %c2 = constant 2 : index %0 = iree.list.create %c2 : !iree.list<f64> iree.list.set %0[%c0], %arg0 : !iree.list<f64>, f64 iree.list.set %0[%c1], %arg0 : !iree.list<f64>, f64 return %0 : !iree.list<f64> } ``` As part of doing this, I realized that it was time to formalize the IR form that we reach right before running TorchTo{Linalg,Std,...}. We now call it the "Torch backend contract". We then lower the "Torch backend contract" to the "npcomp backend contract", which involves the new TorchConversion (`torch_c`) dialect, which holds ops that need to operate on both the npcomp backend types (e.g. builtin tensors, i1, IREE list, etc.) and the `!torch` types. This made more sense, as I realized that if I didn't factor out `torch_c` then the Torch dialect would have a dependency on IREE dialect (we previously didn't notice this was an issue because we only depended on `builtin` types), which seemed wrong to me. Recommended review order: - TorchToIREE.cpp / `TorchToIREE/basic.mlir` - Look at the new structure of createTorchScriptToNpcompBackendPipeline. It now lives in TorchConversion/Transforms/Passes.cpp and cleanly calls into `Torch::createTorchScriptToTorchBackendPipeline` for the frontend lowering to the Torch backend contract. - Mechanical change extracting `torch_c.{to,from}_{i1,i64,f64,builtin_tensor,iree_list}` into a new TorchConversion dialect, and a few passes specific to the lowering from the Torch backend contract to the npcomp backend contract. - Minor fixes to TorchToLinalg.cpp to use unconverted operands (now that we convert lists as part of operand materialization, we need to use the original operands). Also added test for AtenMaxPool2dOp and fixed m_TorchConstantIntList. - TmpDeleteDeadIREELists pass. Temporary pass for deleting dead IREE lists that are created as part of operand materialization for conv/max pool/avg pool ops in TorchToLinalg.
2021-08-12 05:40:08 +08:00
set(LLVM_EXTERNAL_IREE_DIALECTS_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/iree-dialects")
[torch-mlir earthmoving (1/N)] C/C++ code movement. This creates the `external/torch-mlir` directory as an LLVM_EXTERNAL_PROJECTS-compatible project (analogous to `iree-dialects`) and completes movement/rename of all pure MLIR C/C++ compiler code into there. The next step will be to move all the Python code / code that links/includes PyTorch C++ code (which currently lives in `frontends/pytorch`) into a subdirectory here. I call this "earthmoving" because it is mostly mechanical changes and renames. As a quick summary (we can change this down the road easily) - C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch` - CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet` - preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_` - CMake `NPCOMPFoo -> TorchMLIRFoo` The goal of this is to create a standalone project creating a center of mass for entry into the MLIR ecosystem from PyTorch, suitable in scope for eventual inclusion/ownership in PyTorch. The idea is that `external/torch-mlir` will some day be pulled out into its own repository, and then npcomp will simply pull it in as a submodule. Layering-wise, what lives in `torch-mlir` lowers code from PyTorch (currently TorchScript, but TorchFX or pytorch/xla-style tracing are possible extensions) down to what we have been calling the "Torch backend contract" which is cleaned up IR (inlining, simplifcation, conversion to value tensors, ...) entirely in the `torch` dialect. This is the branching off point for further lowering, of which npcomp takes one opinion (outside `torch-mlir` of course!), namely the `TorchConversion` dialect/transforms which lower to IR suitable for IREE and other linalg-on-tensors based lower-level compilers. Summary of changes: - move `{include,lib,test}/Dialect/Torch` into `torch-mlir` - move relevant parts of CAPI into `torch-mlir`. - leave a few things related to the `torch-mlir` Python build commented out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
set(LLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/torch-mlir")
Add TorchToIREE and factor out TorchConversion dialect. This converts a basic list op (torch.prim.ListConstruct) to the IREE dialect. ``` def forward(self, x: float): return [x, x] ``` turns into: ``` builtin.func @forward(%arg0: !torch.float) -> !torch.list<!torch.float> { %0 = torch.prim.ListConstruct %arg0, %arg0 : (!torch.float, !torch.float) -> !torch.list<!torch.float> return %0 : !torch.list<!torch.float> } ``` which turns into: ``` builtin.func @forward(%arg0: f64) -> !iree.list<f64> { %c1 = constant 1 : index %c0 = constant 0 : index %c2 = constant 2 : index %0 = iree.list.create %c2 : !iree.list<f64> iree.list.set %0[%c0], %arg0 : !iree.list<f64>, f64 iree.list.set %0[%c1], %arg0 : !iree.list<f64>, f64 return %0 : !iree.list<f64> } ``` As part of doing this, I realized that it was time to formalize the IR form that we reach right before running TorchTo{Linalg,Std,...}. We now call it the "Torch backend contract". We then lower the "Torch backend contract" to the "npcomp backend contract", which involves the new TorchConversion (`torch_c`) dialect, which holds ops that need to operate on both the npcomp backend types (e.g. builtin tensors, i1, IREE list, etc.) and the `!torch` types. This made more sense, as I realized that if I didn't factor out `torch_c` then the Torch dialect would have a dependency on IREE dialect (we previously didn't notice this was an issue because we only depended on `builtin` types), which seemed wrong to me. Recommended review order: - TorchToIREE.cpp / `TorchToIREE/basic.mlir` - Look at the new structure of createTorchScriptToNpcompBackendPipeline. It now lives in TorchConversion/Transforms/Passes.cpp and cleanly calls into `Torch::createTorchScriptToTorchBackendPipeline` for the frontend lowering to the Torch backend contract. - Mechanical change extracting `torch_c.{to,from}_{i1,i64,f64,builtin_tensor,iree_list}` into a new TorchConversion dialect, and a few passes specific to the lowering from the Torch backend contract to the npcomp backend contract. - Minor fixes to TorchToLinalg.cpp to use unconverted operands (now that we convert lists as part of operand materialization, we need to use the original operands). Also added test for AtenMaxPool2dOp and fixed m_TorchConstantIntList. - TmpDeleteDeadIREELists pass. Temporary pass for deleting dead IREE lists that are created as part of operand materialization for conv/max pool/avg pool ops in TorchToLinalg.
2021-08-12 05:40:08 +08:00
# LLVM configuration.
message(STATUS "*** ADDING LLVM ***")
add_subdirectory(
"${CMAKE_CURRENT_SOURCE_DIR}/external/llvm-project/llvm"
"${LLVM_MAIN_BINARY_DIR}"
EXCLUDE_FROM_ALL)
message(STATUS "*** LLVM DONE ***")
set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib)
set(LLVM_EXTERNAL_LIT "${LLVM_MAIN_BINARY_DIR}/bin/llvm-lit")
set(LLVM_TOOLS_DIR "${LLVM_MAIN_BINARY_DIR}/bin")
# Define the default arguments to use with 'lit', and an option for the user to
# override.
set(LIT_ARGS_DEFAULT "-sv")
if (MSVC_IDE OR XCODE)
set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar")
endif()
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
include(TableGen)
include(AddLLVM)
include(AddMLIR)
include(AddMLIRPython)
include(HandleLLVMOptions)
set(NPCOMP_BUILT_STANDALONE 1)
else()
# TODO: RE-ENABLE EXTERNAL UNIFIED BUILD
message(FATAL_ERROR "External project builds of npcomp are currently not available")
# Otherwise, we are building as a part of LLVM, and we need to set up some
# variables and includes.
# set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root
# set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir
# set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include)
# set(MLIR_TABLEGEN_EXE $<TARGET_FILE:mlir-tblgen>)
# include_directories(SYSTEM ${MLIR_INCLUDE_DIR})
# include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR})
# set(BACKEND_PACKAGE_STRING "${PACKAGE_STRING}")
endif()
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
include(AddNPCOMP)
2020-04-27 07:26:45 +08:00
include_directories(${LLVM_INCLUDE_DIRS})
include_directories(${MLIR_INCLUDE_DIRS})
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
include_directories(${CMAKE_CURRENT_BINARY_DIR}/include)
Add TorchToIREE and factor out TorchConversion dialect. This converts a basic list op (torch.prim.ListConstruct) to the IREE dialect. ``` def forward(self, x: float): return [x, x] ``` turns into: ``` builtin.func @forward(%arg0: !torch.float) -> !torch.list<!torch.float> { %0 = torch.prim.ListConstruct %arg0, %arg0 : (!torch.float, !torch.float) -> !torch.list<!torch.float> return %0 : !torch.list<!torch.float> } ``` which turns into: ``` builtin.func @forward(%arg0: f64) -> !iree.list<f64> { %c1 = constant 1 : index %c0 = constant 0 : index %c2 = constant 2 : index %0 = iree.list.create %c2 : !iree.list<f64> iree.list.set %0[%c0], %arg0 : !iree.list<f64>, f64 iree.list.set %0[%c1], %arg0 : !iree.list<f64>, f64 return %0 : !iree.list<f64> } ``` As part of doing this, I realized that it was time to formalize the IR form that we reach right before running TorchTo{Linalg,Std,...}. We now call it the "Torch backend contract". We then lower the "Torch backend contract" to the "npcomp backend contract", which involves the new TorchConversion (`torch_c`) dialect, which holds ops that need to operate on both the npcomp backend types (e.g. builtin tensors, i1, IREE list, etc.) and the `!torch` types. This made more sense, as I realized that if I didn't factor out `torch_c` then the Torch dialect would have a dependency on IREE dialect (we previously didn't notice this was an issue because we only depended on `builtin` types), which seemed wrong to me. Recommended review order: - TorchToIREE.cpp / `TorchToIREE/basic.mlir` - Look at the new structure of createTorchScriptToNpcompBackendPipeline. It now lives in TorchConversion/Transforms/Passes.cpp and cleanly calls into `Torch::createTorchScriptToTorchBackendPipeline` for the frontend lowering to the Torch backend contract. - Mechanical change extracting `torch_c.{to,from}_{i1,i64,f64,builtin_tensor,iree_list}` into a new TorchConversion dialect, and a few passes specific to the lowering from the Torch backend contract to the npcomp backend contract. - Minor fixes to TorchToLinalg.cpp to use unconverted operands (now that we convert lists as part of operand materialization, we need to use the original operands). Also added test for AtenMaxPool2dOp and fixed m_TorchConstantIntList. - TmpDeleteDeadIREELists pass. Temporary pass for deleting dead IREE lists that are created as part of operand materialization for conv/max pool/avg pool ops in TorchToLinalg.
2021-08-12 05:40:08 +08:00
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/external/iree-dialects/include)
include_directories(${CMAKE_CURRENT_BINARY_DIR}/llvm/tools/iree-dialects/include)
[torch-mlir earthmoving (1/N)] C/C++ code movement. This creates the `external/torch-mlir` directory as an LLVM_EXTERNAL_PROJECTS-compatible project (analogous to `iree-dialects`) and completes movement/rename of all pure MLIR C/C++ compiler code into there. The next step will be to move all the Python code / code that links/includes PyTorch C++ code (which currently lives in `frontends/pytorch`) into a subdirectory here. I call this "earthmoving" because it is mostly mechanical changes and renames. As a quick summary (we can change this down the road easily) - C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch` - CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet` - preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_` - CMake `NPCOMPFoo -> TorchMLIRFoo` The goal of this is to create a standalone project creating a center of mass for entry into the MLIR ecosystem from PyTorch, suitable in scope for eventual inclusion/ownership in PyTorch. The idea is that `external/torch-mlir` will some day be pulled out into its own repository, and then npcomp will simply pull it in as a submodule. Layering-wise, what lives in `torch-mlir` lowers code from PyTorch (currently TorchScript, but TorchFX or pytorch/xla-style tracing are possible extensions) down to what we have been calling the "Torch backend contract" which is cleaned up IR (inlining, simplifcation, conversion to value tensors, ...) entirely in the `torch` dialect. This is the branching off point for further lowering, of which npcomp takes one opinion (outside `torch-mlir` of course!), namely the `TorchConversion` dialect/transforms which lower to IR suitable for IREE and other linalg-on-tensors based lower-level compilers. Summary of changes: - move `{include,lib,test}/Dialect/Torch` into `torch-mlir` - move relevant parts of CAPI into `torch-mlir`. - leave a few things related to the `torch-mlir` Python build commented out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/external/torch-mlir/include)
include_directories(${CMAKE_CURRENT_BINARY_DIR}/llvm/tools/torch-mlir/include)
2020-04-27 07:26:45 +08:00
link_directories(${LLVM_BUILD_LIBRARY_DIR})
add_definitions(${LLVM_DEFINITIONS})
set(NPCOMP_TABLEGEN_ARGS "")
2020-04-27 07:26:45 +08:00
#-------------------------------------------------------------------------------
# Optional feature selection
#-------------------------------------------------------------------------------
if(NPCOMP_ENABLE_REFJIT)
add_compile_definitions(NPCOMP_ENABLE_REFJIT)
message(STATUS "Reference JIT backend enabled")
endif()
#-------------------------------------------------------------------------------
# IREE configuration
#-------------------------------------------------------------------------------
if(NPCOMP_ENABLE_IREE)
add_compile_definitions(NPCOMP_ENABLE_IREE)
string(APPEND NPCOMP_TABLEGEN_ARGS "-DNPCOMP_ENABLE_IREE")
if(NPCOMP_IREE_BUILDDIR)
message(STATUS "Depending on IREE build: ${NPCOMP_IREE_BUILDDIR}")
set(IREE_DIR "${NPCOMP_IREE_BUILDDIR}")
set(IREE_FOUND 1)
else()
find_package(IREE REQUIRED CONFIG)
endif()
# This is temporary until IREE properly supports installation. We splice
# the various files into where we want them.
function(symlink_iree src dst)
set(full_src_path "${IREE_DIR}/${src}")
get_filename_component(full_src_path "${full_src_path}" ABSOLUTE)
set(full_dst_path "${CMAKE_CURRENT_BINARY_DIR}/${dst}")
get_filename_component(dst_dir ${full_dst_path} PATH)
file(MAKE_DIRECTORY "${dst_dir}")
execute_process(
COMMAND
${CMAKE_COMMAND} -E create_symlink "${full_src_path}" "${full_dst_path}"
RESULT_VARIABLE result
)
if(NOT ${result} EQUAL 0)
message(FATAL_ERROR "Could not symlink iree file: ${full_src_path} -> ${full_dst_path}")
endif()
endfunction()
symlink_iree(iree/tools/iree-translate python/npcomp/compiler/generic/backend/iree-translate)
symlink_iree(bindings/python/pyiree/rt python/pyiree/rt)
endif()
#-------------------------------------------------------------------------------
# Directory setup
#-------------------------------------------------------------------------------
set(MLIR_NPCOMP_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(MLIR_NPCOMP_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(MLIR_NPCOMP_PYTHON_PACKAGES_DIR ${CMAKE_CURRENT_BINARY_DIR}/python_packages)
add_custom_target(check-npcomp)
add_custom_target(check-npcomp-all)
add_dependencies(check-npcomp-all check-npcomp)
2020-04-27 06:50:23 +08:00
add_subdirectory(include/npcomp)
add_subdirectory(lib)
add_subdirectory(python)
2020-04-27 08:55:15 +08:00
add_subdirectory(test)
add_subdirectory(tools)
if(NPCOMP_ENABLE_PYTORCH)
message(STATUS "Adding PyTorch frontent support...")
add_subdirectory(frontends/pytorch)
endif()