2020-11-14 08:07:57 +08:00
|
|
|
cmake_minimum_required(VERSION 3.12)
|
2020-04-27 07:26:45 +08:00
|
|
|
|
|
|
|
if(POLICY CMP0068)
|
|
|
|
cmake_policy(SET CMP0068 NEW)
|
|
|
|
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if(POLICY CMP0075)
|
|
|
|
cmake_policy(SET CMP0075 NEW)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if(POLICY CMP0077)
|
|
|
|
cmake_policy(SET CMP0077 NEW)
|
|
|
|
endif()
|
|
|
|
|
2020-06-12 07:40:31 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Project setup and globals
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
2020-04-27 08:55:15 +08:00
|
|
|
project(npcomp LANGUAGES CXX C)
|
2020-06-12 07:40:31 +08:00
|
|
|
set(CMAKE_C_STANDARD 11)
|
|
|
|
set(CMAKE_CXX_STANDARD 14)
|
2020-07-05 08:38:01 +08:00
|
|
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
|
2020-04-27 07:26:45 +08:00
|
|
|
|
2020-06-12 07:40:31 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Options and settings
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
option(NPCOMP_ENABLE_IREE "Enables the IREE backend (must configure location via IREE_DIR)." OFF)
|
2020-07-11 08:36:32 +08:00
|
|
|
option(NPCOMP_ENABLE_REFJIT "Enables the reference JIT backend." ON)
|
2020-10-09 09:29:59 +08:00
|
|
|
option(NPCOMP_BUILD_NPCOMP_DYLIB "Enables shared build of NPCOMP dylib (depends on LLVM/MLIR dylib support)" ON)
|
2020-06-12 07:40:31 +08:00
|
|
|
set(NPCOMP_IREE_SRCDIR "" CACHE STRING "If building IREE, then setting this elects to build from a source directory (versus installed package)")
|
2020-11-14 08:07:57 +08:00
|
|
|
set(NPCOMP_ENABLE_PYTORCH "OPTIONAL" CACHE STRING "Enables the PyTorch frontend (OFF, OPTIONAL, REQUIRED)")
|
2020-06-12 07:40:31 +08:00
|
|
|
|
2020-11-04 05:46:46 +08:00
|
|
|
# Turn on -gsplit-dwarf if requested in debug builds.
|
|
|
|
if (NPCOMP_USE_SPLIT_DWARF AND
|
|
|
|
((CMAKE_BUILD_TYPE STREQUAL "Debug") OR
|
|
|
|
(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")))
|
|
|
|
# Limit to clang and gcc so far. Add compilers supporting this option.
|
|
|
|
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR
|
|
|
|
CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
|
|
|
add_compile_options(-gsplit-dwarf)
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
2020-07-02 12:28:04 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# MSVC defaults
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
if(MSVC)
|
|
|
|
add_compile_options(
|
|
|
|
$<$<CONFIG:>:/MD>
|
|
|
|
$<$<CONFIG:Debug>:/MD>
|
|
|
|
$<$<CONFIG:Release>:/MD>
|
|
|
|
)
|
|
|
|
endif()
|
|
|
|
|
2020-06-12 07:40:31 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# MLIR/LLVM Configuration
|
|
|
|
#-------------------------------------------------------------------------------
|
2020-04-27 07:26:45 +08:00
|
|
|
|
2020-06-12 07:40:31 +08:00
|
|
|
find_package(MLIR REQUIRED CONFIG)
|
2020-04-27 07:26:45 +08:00
|
|
|
message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}")
|
|
|
|
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
|
2020-08-06 05:49:18 +08:00
|
|
|
|
|
|
|
#set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
|
|
|
|
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib)
|
|
|
|
|
|
|
|
# Define the default arguments to use with 'lit', and an option for the user to
|
|
|
|
# override.
|
|
|
|
set(LIT_ARGS_DEFAULT "-sv")
|
|
|
|
if (MSVC_IDE OR XCODE)
|
|
|
|
set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar")
|
|
|
|
endif()
|
|
|
|
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
|
|
|
|
|
2020-04-27 07:26:45 +08:00
|
|
|
list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")
|
|
|
|
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
|
|
|
|
include(TableGen)
|
|
|
|
include(AddLLVM)
|
|
|
|
include(AddMLIR)
|
2020-10-09 09:29:59 +08:00
|
|
|
include(AddNPCOMP)
|
2020-04-27 07:26:45 +08:00
|
|
|
include(HandleLLVMOptions)
|
2020-11-14 08:07:57 +08:00
|
|
|
include(ConfigurePyTorch)
|
2020-04-27 07:26:45 +08:00
|
|
|
include_directories(${LLVM_INCLUDE_DIRS})
|
|
|
|
include_directories(${MLIR_INCLUDE_DIRS})
|
|
|
|
include_directories(${PROJECT_SOURCE_DIR}/include)
|
|
|
|
include_directories(${PROJECT_BINARY_DIR}/include)
|
|
|
|
link_directories(${LLVM_BUILD_LIBRARY_DIR})
|
|
|
|
add_definitions(${LLVM_DEFINITIONS})
|
2020-06-14 14:45:43 +08:00
|
|
|
set(NPCOMP_TABLEGEN_ARGS "")
|
2020-04-27 07:26:45 +08:00
|
|
|
|
2020-07-11 08:36:32 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Optional feature selection
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
if(NPCOMP_ENABLE_REFJIT)
|
|
|
|
add_compile_definitions(NPCOMP_ENABLE_REFJIT)
|
|
|
|
message(STATUS "Reference JIT backend enabled")
|
|
|
|
endif()
|
|
|
|
|
2020-06-12 07:40:31 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# IREE configuration
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
2020-07-05 08:38:01 +08:00
|
|
|
function(npcomp_add_iree_src)
|
|
|
|
# TODO: Find a way to fix this upstream as globally demoting warnings
|
|
|
|
# like this is a really bad thing to be doing. Also, Abseil seems to try
|
|
|
|
# really hard to keep us from touching these so some still get through.
|
|
|
|
add_compile_options(-Wno-sign-compare -Wno-unused-template)
|
|
|
|
add_subdirectory("${NPCOMP_IREE_SRCDIR}" "iree" EXCLUDE_FROM_ALL)
|
|
|
|
endfunction()
|
|
|
|
|
2020-06-12 08:47:14 +08:00
|
|
|
if(NPCOMP_ENABLE_IREE)
|
2020-06-12 07:40:31 +08:00
|
|
|
add_compile_definitions(NPCOMP_ENABLE_IREE)
|
2020-06-14 14:45:43 +08:00
|
|
|
string(APPEND NPCOMP_TABLEGEN_ARGS "-DNPCOMP_ENABLE_IREE")
|
2020-06-12 07:40:31 +08:00
|
|
|
if(NPCOMP_IREE_SRCDIR)
|
|
|
|
message(STATUS "Depending on IREE source: ${NPCOMP_IREE_SRCDIR}")
|
2020-06-19 15:30:34 +08:00
|
|
|
set(IREE_BUILD_TESTS OFF CACHE BOOL "Override IREE setting" FORCE)
|
|
|
|
set(IREE_BUILD_SAMPLES OFF CACHE BOOL "Override IREE setting" FORCE)
|
|
|
|
set(IREE_BUILD_PYTHON_BINDINGS ON CACHE BOOL "Override IREE setting" FORCE)
|
2020-06-12 07:40:31 +08:00
|
|
|
set(IREE_MLIR_DEP_MODE "DISABLED" CACHE STRING "Override IREE setting")
|
2020-07-05 08:38:01 +08:00
|
|
|
npcomp_add_iree_src()
|
2020-06-12 07:40:31 +08:00
|
|
|
else()
|
|
|
|
find_package(IREE REQUIRED CONFIG)
|
|
|
|
endif()
|
2020-06-12 08:47:14 +08:00
|
|
|
|
|
|
|
message(STATUS "IREE INCLUDE DIRS: ${IREE_PUBLIC_INCLUDE_DIRS}")
|
|
|
|
include_directories("${IREE_PUBLIC_INCLUDE_DIRS}")
|
2020-06-12 07:40:31 +08:00
|
|
|
endif()
|
|
|
|
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Python Configuration
|
|
|
|
#-------------------------------------------------------------------------------
|
2020-04-27 06:50:23 +08:00
|
|
|
|
2020-08-05 08:10:14 +08:00
|
|
|
set(NPCOMP_PYTHON_BINDINGS_VERSION_LOCKED 1 CACHE BOOL
|
|
|
|
"Links to specific python libraries, resolving all symbols.")
|
2020-11-14 08:07:57 +08:00
|
|
|
find_package(Python3 COMPONENTS Interpreter Development REQUIRED)
|
|
|
|
message(STATUS "Found python include dirs: ${Python3_INCLUDE_DIRS}")
|
|
|
|
message(STATUS "Found python libraries: ${Python3_LIBRARIES}")
|
2020-04-27 06:50:23 +08:00
|
|
|
|
Add pytorch interface to ATen Dialect (#30)
This patch adds a pytorch interface to npcomp. This interface is modeled
after pytorch_xla and exposes the MLIR-based flow as a virtual device (similar
to a gpu device or the xla backend). Usage is intended to be something like:
dev = torch_mlir.mlir_device()
t0 = torch.randn((4,4), device=dev)
t1 = torch.randn((4,4), device=dev)
t2 = t0 + t1
t2_mlir = torch_mlir.get_mlir( t2 )
t2_cpu = t2.to('cpu')
In this case t2_cpu would contain the result of the computation, and t2_mlir
contains the mlir description of the computation. Note that this also
properly returns backward paths synthesized by pytorch. There are several
parts of this:
1) A tensor type (implemented by tensor.* and tensor_impl.*)
2) The device modeling (aten_mlir_bridge.*, aten_mlir_device.*, aten_mlir_type*)
3) a temporary IR (implemented by ir.cpp)
There is also a reference lowering directly from the ATen dialect to C
function calls consisting of two parts:
1) The driver that uses the IR to generate MLIR, run Passes and compile the
result using mlir::ExecutionEngine (implemented by jit.cpp and
mlir_gen.cpp)
2) A runtime library implemented by lib/aten_ops.cpp. Most of the operations
are implemented by callbacks into the torch C++ libraries.
Some aspects of this are known to be less than optimal, in particular:
1) There's some function definitions that don't live in the file corresponding
to their declaration.
2) More aspects of this (e.g. the IR) seem like they should be automatically
generated.
3) It's unclear to me how much of the 'IR' is actually necessary, or whether
MLIR could be created on the fly.
Note that this code is licensed in a way similar to pytorch, with the
intention that eventually (when npcomp reaches some maturity) it should be
pushed there. (see frontends/pytorch/LICENSE) The code is also structured
much closer to the pytorch coding style than the LLVM coding style.
2020-08-22 02:22:47 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Pytorch Configuration
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
2020-11-14 08:07:57 +08:00
|
|
|
if(NPCOMP_ENABLE_PYTORCH)
|
|
|
|
ProbeForPyTorchInstall()
|
|
|
|
if(NPCOMP_ENABLE_PYTORCH EQUAL "OPTIONAL")
|
|
|
|
find_package(Torch)
|
|
|
|
else()
|
|
|
|
find_package(Torch REQUIRED)
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Pybind11 Configuration
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
find_package(pybind11 CONFIG REQUIRED)
|
|
|
|
# TODO: pybind11 v2.6 switched from pybind11_INCLUDE_DIRS (plural) to
|
|
|
|
# pybind11_INCLUDE_DIR (singular). A lot has changed in this area since this
|
|
|
|
# was written and overall python config and pybind11 should be modernized.
|
|
|
|
set(pybind11_INCLUDE_DIR ${pybind11_INCLUDE_DIR} ${pybind11_INCLUDE_DIRS})
|
|
|
|
message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIR}")
|
|
|
|
message(STATUS "Python prefix = '${PYTHON_MODULE_PREFIX}', "
|
|
|
|
"suffix = '${PYTHON_MODULE_SUFFIX}', "
|
|
|
|
"extension = '${PYTHON_MODULE_EXTENSION}")
|
Add pytorch interface to ATen Dialect (#30)
This patch adds a pytorch interface to npcomp. This interface is modeled
after pytorch_xla and exposes the MLIR-based flow as a virtual device (similar
to a gpu device or the xla backend). Usage is intended to be something like:
dev = torch_mlir.mlir_device()
t0 = torch.randn((4,4), device=dev)
t1 = torch.randn((4,4), device=dev)
t2 = t0 + t1
t2_mlir = torch_mlir.get_mlir( t2 )
t2_cpu = t2.to('cpu')
In this case t2_cpu would contain the result of the computation, and t2_mlir
contains the mlir description of the computation. Note that this also
properly returns backward paths synthesized by pytorch. There are several
parts of this:
1) A tensor type (implemented by tensor.* and tensor_impl.*)
2) The device modeling (aten_mlir_bridge.*, aten_mlir_device.*, aten_mlir_type*)
3) a temporary IR (implemented by ir.cpp)
There is also a reference lowering directly from the ATen dialect to C
function calls consisting of two parts:
1) The driver that uses the IR to generate MLIR, run Passes and compile the
result using mlir::ExecutionEngine (implemented by jit.cpp and
mlir_gen.cpp)
2) A runtime library implemented by lib/aten_ops.cpp. Most of the operations
are implemented by callbacks into the torch C++ libraries.
Some aspects of this are known to be less than optimal, in particular:
1) There's some function definitions that don't live in the file corresponding
to their declaration.
2) More aspects of this (e.g. the IR) seem like they should be automatically
generated.
3) It's unclear to me how much of the 'IR' is actually necessary, or whether
MLIR could be created on the fly.
Note that this code is licensed in a way similar to pytorch, with the
intention that eventually (when npcomp reaches some maturity) it should be
pushed there. (see frontends/pytorch/LICENSE) The code is also structured
much closer to the pytorch coding style than the LLVM coding style.
2020-08-22 02:22:47 +08:00
|
|
|
|
2020-06-12 07:40:31 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Directory setup
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
set(MLIR_NPCOMP_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
|
|
|
|
set(MLIR_NPCOMP_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
|
|
|
|
|
2020-06-08 05:35:58 +08:00
|
|
|
add_custom_target(check-npcomp)
|
Add pytorch interface to ATen Dialect (#30)
This patch adds a pytorch interface to npcomp. This interface is modeled
after pytorch_xla and exposes the MLIR-based flow as a virtual device (similar
to a gpu device or the xla backend). Usage is intended to be something like:
dev = torch_mlir.mlir_device()
t0 = torch.randn((4,4), device=dev)
t1 = torch.randn((4,4), device=dev)
t2 = t0 + t1
t2_mlir = torch_mlir.get_mlir( t2 )
t2_cpu = t2.to('cpu')
In this case t2_cpu would contain the result of the computation, and t2_mlir
contains the mlir description of the computation. Note that this also
properly returns backward paths synthesized by pytorch. There are several
parts of this:
1) A tensor type (implemented by tensor.* and tensor_impl.*)
2) The device modeling (aten_mlir_bridge.*, aten_mlir_device.*, aten_mlir_type*)
3) a temporary IR (implemented by ir.cpp)
There is also a reference lowering directly from the ATen dialect to C
function calls consisting of two parts:
1) The driver that uses the IR to generate MLIR, run Passes and compile the
result using mlir::ExecutionEngine (implemented by jit.cpp and
mlir_gen.cpp)
2) A runtime library implemented by lib/aten_ops.cpp. Most of the operations
are implemented by callbacks into the torch C++ libraries.
Some aspects of this are known to be less than optimal, in particular:
1) There's some function definitions that don't live in the file corresponding
to their declaration.
2) More aspects of this (e.g. the IR) seem like they should be automatically
generated.
3) It's unclear to me how much of the 'IR' is actually necessary, or whether
MLIR could be created on the fly.
Note that this code is licensed in a way similar to pytorch, with the
intention that eventually (when npcomp reaches some maturity) it should be
pushed there. (see frontends/pytorch/LICENSE) The code is also structured
much closer to the pytorch coding style than the LLVM coding style.
2020-08-22 02:22:47 +08:00
|
|
|
add_custom_target(check-all)
|
|
|
|
add_dependencies(check-all check-npcomp)
|
2020-06-08 05:35:58 +08:00
|
|
|
|
2020-04-27 06:50:23 +08:00
|
|
|
add_subdirectory(include/npcomp)
|
|
|
|
add_subdirectory(lib)
|
2020-08-05 08:10:14 +08:00
|
|
|
add_subdirectory(python)
|
2020-04-27 08:55:15 +08:00
|
|
|
add_subdirectory(test)
|
Add pytorch interface to ATen Dialect (#30)
This patch adds a pytorch interface to npcomp. This interface is modeled
after pytorch_xla and exposes the MLIR-based flow as a virtual device (similar
to a gpu device or the xla backend). Usage is intended to be something like:
dev = torch_mlir.mlir_device()
t0 = torch.randn((4,4), device=dev)
t1 = torch.randn((4,4), device=dev)
t2 = t0 + t1
t2_mlir = torch_mlir.get_mlir( t2 )
t2_cpu = t2.to('cpu')
In this case t2_cpu would contain the result of the computation, and t2_mlir
contains the mlir description of the computation. Note that this also
properly returns backward paths synthesized by pytorch. There are several
parts of this:
1) A tensor type (implemented by tensor.* and tensor_impl.*)
2) The device modeling (aten_mlir_bridge.*, aten_mlir_device.*, aten_mlir_type*)
3) a temporary IR (implemented by ir.cpp)
There is also a reference lowering directly from the ATen dialect to C
function calls consisting of two parts:
1) The driver that uses the IR to generate MLIR, run Passes and compile the
result using mlir::ExecutionEngine (implemented by jit.cpp and
mlir_gen.cpp)
2) A runtime library implemented by lib/aten_ops.cpp. Most of the operations
are implemented by callbacks into the torch C++ libraries.
Some aspects of this are known to be less than optimal, in particular:
1) There's some function definitions that don't live in the file corresponding
to their declaration.
2) More aspects of this (e.g. the IR) seem like they should be automatically
generated.
3) It's unclear to me how much of the 'IR' is actually necessary, or whether
MLIR could be created on the fly.
Note that this code is licensed in a way similar to pytorch, with the
intention that eventually (when npcomp reaches some maturity) it should be
pushed there. (see frontends/pytorch/LICENSE) The code is also structured
much closer to the pytorch coding style than the LLVM coding style.
2020-08-22 02:22:47 +08:00
|
|
|
add_subdirectory(frontends)
|
2020-10-09 09:29:59 +08:00
|
|
|
|
|
|
|
# Tools needs to come late to ensure that NPCOMP_ALL_LIBS is populated.
|
|
|
|
# Generally things after this point may depend on NPCOMP_ALL_LIBS or libNPCOMP.so.
|
|
|
|
add_subdirectory(tools)
|