2023-11-03 10:45:55 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Project setup and globals
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
2021-10-08 12:44:15 +08:00
|
|
|
cmake_minimum_required(VERSION 3.12)
|
|
|
|
|
|
|
|
if(POLICY CMP0068)
|
|
|
|
cmake_policy(SET CMP0068 NEW)
|
|
|
|
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if(POLICY CMP0075)
|
|
|
|
cmake_policy(SET CMP0075 NEW)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if(POLICY CMP0077)
|
|
|
|
cmake_policy(SET CMP0077 NEW)
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
endif()
|
|
|
|
|
2022-01-26 14:16:30 +08:00
|
|
|
if(POLICY CMP0116)
|
|
|
|
cmake_policy(SET CMP0116 OLD)
|
|
|
|
endif()
|
|
|
|
|
2021-10-09 02:28:40 +08:00
|
|
|
project(torch-mlir LANGUAGES CXX C)
|
2021-10-08 12:44:15 +08:00
|
|
|
set(CMAKE_C_STANDARD 11)
|
2022-08-09 11:17:35 +08:00
|
|
|
set(CMAKE_CXX_STANDARD 17)
|
2021-10-08 12:44:15 +08:00
|
|
|
|
2023-12-13 11:02:51 +08:00
|
|
|
include(CMakeDependentOption)
|
|
|
|
|
2023-11-03 10:45:55 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Project options
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
2024-01-31 15:33:21 +08:00
|
|
|
option(TORCH_MLIR_ENABLE_WERROR_FLAG "Enable `-Werror` flag on supported directories, treat error as warning" OFF)
|
2023-11-23 09:52:07 +08:00
|
|
|
option(TORCH_MLIR_USE_INSTALLED_PYTORCH "If depending on PyTorch use it as installed in the current Python environment" ON)
|
|
|
|
|
2023-11-03 10:45:55 +08:00
|
|
|
option(TORCH_MLIR_ENABLE_REFBACKEND "Enable reference backend" ON)
|
|
|
|
if(TORCH_MLIR_ENABLE_REFBACKEND)
|
|
|
|
add_definitions(-DTORCH_MLIR_ENABLE_REFBACKEND)
|
|
|
|
endif()
|
2022-02-03 07:01:38 +08:00
|
|
|
|
2023-02-02 21:29:47 +08:00
|
|
|
option(TORCH_MLIR_ENABLE_STABLEHLO "Add stablehlo dialect" ON)
|
|
|
|
if(TORCH_MLIR_ENABLE_STABLEHLO)
|
|
|
|
add_definitions(-DTORCH_MLIR_ENABLE_STABLEHLO)
|
2022-07-21 07:18:16 +08:00
|
|
|
endif()
|
|
|
|
|
2023-11-03 10:45:55 +08:00
|
|
|
option(TORCH_MLIR_OUT_OF_TREE_BUILD "Specifies an out of tree build" OFF)
|
|
|
|
|
2023-12-13 11:02:51 +08:00
|
|
|
# PyTorch native extension gate. If OFF, then no features which depend on
|
|
|
|
# native extensions will be built.
|
|
|
|
option(TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS "Enables PyTorch native extension features" ON)
|
|
|
|
cmake_dependent_option(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER "Enables JIT IR Importer" ON TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF)
|
|
|
|
cmake_dependent_option(TORCH_MLIR_ENABLE_LTC "Enables LTC backend" OFF TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF)
|
2022-08-16 21:30:22 +08:00
|
|
|
|
2023-12-28 04:13:34 +08:00
|
|
|
option(TORCH_MLIR_ENABLE_ONNX_C_IMPORTER "Enables the ONNX C importer" OFF)
|
|
|
|
|
2024-01-31 15:33:21 +08:00
|
|
|
macro(torch_mlir_enable_werror)
|
|
|
|
if(TORCH_MLIR_ENABLE_WERROR_FLAG)
|
|
|
|
if(NOT MSVC)
|
|
|
|
add_compile_options(-Werror)
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
endmacro()
|
|
|
|
|
2023-11-03 10:45:55 +08:00
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Configure out-of-tree vs in-tree build
|
|
|
|
#-------------------------------------------------------------------------------
|
2022-09-23 10:01:37 +08:00
|
|
|
|
2022-09-26 23:58:05 +08:00
|
|
|
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR TORCH_MLIR_OUT_OF_TREE_BUILD)
|
2022-07-21 07:18:16 +08:00
|
|
|
message(STATUS "Torch-MLIR out-of-tree build.")
|
2021-10-08 12:44:15 +08:00
|
|
|
# Out-of-tree build
|
|
|
|
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# MLIR/LLVM Configuration
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
find_package(MLIR REQUIRED CONFIG)
|
|
|
|
message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}")
|
|
|
|
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
|
|
|
|
|
|
|
|
set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
|
|
|
|
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib)
|
|
|
|
|
|
|
|
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
|
|
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
2021-10-22 12:09:00 +08:00
|
|
|
|
2021-10-08 12:44:15 +08:00
|
|
|
# Define the default arguments to use with 'lit', and an option for the user to
|
|
|
|
# override.
|
|
|
|
set(LIT_ARGS_DEFAULT "-sv")
|
|
|
|
if (MSVC_IDE OR XCODE)
|
|
|
|
set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar")
|
|
|
|
endif()
|
|
|
|
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
|
|
|
|
|
|
|
|
list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")
|
|
|
|
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
|
|
|
|
include(TableGen)
|
|
|
|
include(AddLLVM)
|
|
|
|
include(AddMLIR)
|
|
|
|
include(HandleLLVMOptions)
|
|
|
|
|
|
|
|
# Don't try to compile the python extensions at the moment. We need
|
|
|
|
# to import lots of dependencies from AddMLIRPython to make this work.
|
2023-11-03 10:45:55 +08:00
|
|
|
set(MLIR_ENABLE_BINDINGS_PYTHON ON)
|
2021-10-08 12:44:15 +08:00
|
|
|
|
2023-11-03 10:45:55 +08:00
|
|
|
set(TORCH-MLIR_BUILT_STANDALONE ON)
|
2021-10-08 12:44:15 +08:00
|
|
|
set(BACKEND_PACKAGE_STRING "LLVM ${LLVM_PACKAGE_VERSION}")
|
|
|
|
else()
|
2022-07-21 07:18:16 +08:00
|
|
|
message(STATUS "Torch-MLIR in-tree build.")
|
2021-10-08 12:44:15 +08:00
|
|
|
# In-tree build with LLVM_EXTERNAL_PROJECTS=torch-mlir
|
2021-10-22 12:09:00 +08:00
|
|
|
|
2021-10-08 12:44:15 +08:00
|
|
|
option(MLIR_ENABLE_BINDINGS_PYTHON "Enables MLIR Python Bindings" OFF)
|
|
|
|
|
|
|
|
# TODO: Fix this upstream so that global include directories are not needed.
|
|
|
|
set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir)
|
|
|
|
set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include)
|
|
|
|
set(MLIR_GENERATED_INCLUDE_DIR ${LLVM_BINARY_DIR}/tools/mlir/include)
|
|
|
|
set(MLIR_INCLUDE_DIRS "${MLIR_INCLUDE_DIR};${MLIR_GENERATED_INCLUDE_DIR}")
|
2021-10-09 02:28:40 +08:00
|
|
|
endif()
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
|
|
|
|
set(TORCH_MLIR_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
|
|
|
set(TORCH_MLIR_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}")
|
|
|
|
message(STATUS "Building torch-mlir project at ${TORCH_MLIR_SOURCE_DIR} (into ${TORCH_MLIR_BINARY_DIR})")
|
|
|
|
|
2021-10-08 12:44:15 +08:00
|
|
|
include_directories(${LLVM_INCLUDE_DIRS})
|
|
|
|
include_directories(${MLIR_INCLUDE_DIRS})
|
|
|
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
|
|
|
|
include_directories(${CMAKE_CURRENT_BINARY_DIR}/include)
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
|
|
|
|
function(torch_mlir_target_includes target)
|
|
|
|
set(_dirs
|
2022-04-04 17:37:28 +08:00
|
|
|
$<BUILD_INTERFACE:${MLIR_INCLUDE_DIRS}>
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
$<BUILD_INTERFACE:${TORCH_MLIR_SOURCE_DIR}/include>
|
|
|
|
$<BUILD_INTERFACE:${TORCH_MLIR_BINARY_DIR}/include>
|
|
|
|
)
|
|
|
|
# In LLVM parlance, the actual target may just be an interface and may not
|
|
|
|
# be responsible for actually compiling anything. The corresponding obj.
|
|
|
|
# target, when present, is just used for compilation and does not
|
|
|
|
# contribute to the interface properties.
|
|
|
|
# TODO: Normalize this upstream.
|
|
|
|
target_include_directories(${target} PUBLIC ${_dirs})
|
|
|
|
if(TARGET obj.${target})
|
|
|
|
target_include_directories(obj.${target} PRIVATE ${_dirs})
|
|
|
|
endif()
|
|
|
|
endfunction()
|
|
|
|
|
2022-02-26 06:41:09 +08:00
|
|
|
# Configure CMake.
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
list(APPEND CMAKE_MODULE_PATH ${MLIR_MAIN_SRC_DIR}/cmake/modules)
|
|
|
|
list(APPEND CMAKE_MODULE_PATH ${LLVM_MAIN_SRC_DIR}/cmake)
|
2023-11-20 04:10:19 +08:00
|
|
|
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/build_tools/cmake)
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
|
|
|
|
include(TableGen)
|
|
|
|
include(AddLLVM)
|
|
|
|
include(AddMLIR)
|
2023-11-20 04:10:19 +08:00
|
|
|
include(AddMLIRPython)
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
|
|
|
|
################################################################################
|
|
|
|
# Setup python.
|
|
|
|
################################################################################
|
|
|
|
|
|
|
|
if(MLIR_ENABLE_BINDINGS_PYTHON)
|
|
|
|
include(MLIRDetectPythonEnv)
|
2021-10-22 12:09:00 +08:00
|
|
|
mlir_configure_python_dev_packages()
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
endif()
|
|
|
|
|
|
|
|
add_subdirectory(include)
|
|
|
|
add_subdirectory(lib)
|
|
|
|
add_subdirectory(tools)
|
|
|
|
|
2021-09-28 02:36:44 +08:00
|
|
|
add_custom_target(check-torch-mlir-all)
|
2023-11-03 10:45:55 +08:00
|
|
|
add_dependencies(check-torch-mlir-all check-torch-mlir)
|
2021-09-28 02:36:44 +08:00
|
|
|
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
if(MLIR_ENABLE_BINDINGS_PYTHON)
|
2021-09-21 04:55:36 +08:00
|
|
|
# If parent projects want to configure where to place the python packages,
|
|
|
|
# respect that.
|
2021-09-11 02:44:38 +08:00
|
|
|
if(NOT TORCH_MLIR_PYTHON_PACKAGES_DIR)
|
|
|
|
set(TORCH_MLIR_PYTHON_PACKAGES_DIR "${CMAKE_CURRENT_BINARY_DIR}/python_packages")
|
|
|
|
endif()
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
endif()
|
2021-09-11 02:44:38 +08:00
|
|
|
|
|
|
|
add_subdirectory(test)
|
2023-05-09 03:27:00 +08:00
|
|
|
|
|
|
|
if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
|
|
|
|
install(DIRECTORY include/torch-mlir include/torch-mlir-c
|
|
|
|
DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
|
|
|
|
COMPONENT torch-mlir-headers
|
|
|
|
FILES_MATCHING
|
|
|
|
PATTERN "*.def"
|
|
|
|
PATTERN "*.h"
|
|
|
|
PATTERN "*.inc"
|
|
|
|
PATTERN "*.td"
|
|
|
|
PATTERN "LICENSE.TXT"
|
|
|
|
)
|
|
|
|
|
|
|
|
install(DIRECTORY ${TORCH_MLIR_BINARY_DIR}/include/torch-mlir
|
|
|
|
DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
|
|
|
|
COMPONENT torch-mlir-headers
|
|
|
|
FILES_MATCHING
|
|
|
|
PATTERN "*.def"
|
|
|
|
PATTERN "*.h"
|
|
|
|
PATTERN "*.gen"
|
|
|
|
PATTERN "*.inc"
|
|
|
|
PATTERN "*.td"
|
|
|
|
PATTERN "CMakeFiles" EXCLUDE
|
|
|
|
PATTERN "config.h" EXCLUDE
|
|
|
|
)
|
|
|
|
|
|
|
|
if (NOT LLVM_ENABLE_IDE)
|
|
|
|
add_llvm_install_targets(install-torch-mlir-headers
|
|
|
|
DEPENDS torch-mlir-headers
|
|
|
|
COMPONENT torch-mlir-headers)
|
|
|
|
endif()
|
CI: prepare CI for ccache updates for MSVC/Windows (#2120)
This patch, by itself, doesn't fix caching on Windows, but once a new
release of ccache is available, caching for Windows builds should start
working again (validated by building ccache from source and using it
with LLVM builds).
Ccache rejects caching when either the `/Zi` or `/ZI` flags are used
during compilation on Windows, since these flags tell the compiler to
embed debug information in a PDB file (separate from the object file
produced by the compiler). In particular, our CI builds add the `/Zi`
flag, making ccache mark these compiler invocations as uncacheable.
But what caused our CI to add debug flags, especially when we specified
`-DCMAKE_BUILD_TYPE=Release`? On Windows, unless we specify the
`--config Release` flag during the CMake build step, CMake assumes a
debug build. So all this while, we had been producing debug builds of
torch-mlir for every PR! No doubt it took so long to build the Windows
binaries.
The reason for having to specify the configuration during the _build_
step (as opposed to the _configure_ step) of CMake on Windows is that
CMake's Visual Studio generators will produce _both_ Release and Debug
profiles during the CMake configure step (thus requiring a build-time
value that tells CMake whether to build in Release or Debug mode).
Luckily, on Linux and macOS, the `--config` flag seems to be simply
ignored, instead of causing build errors.
Strangely, based on cursory tests, it seems like on Windows we need to
specify the Relase configuration as both `-DCMAKE_BUILD_TYPE=Release` as
well as `--config Release`. Dropping either made my build switch to a
Debug configuration.
Additionally, there is a bug in ccache v4.8 (although this is addressed
in trunk) that causes ccache to reject caching if the compiler
invocation includes any flag that starts with `/Z`, including /`Zc`,
which is added by LLVM's HandleLLVMOptions.cmake and which isn't related
to debug info or PDB files. The next release of ccache should include
the fix, which is to reject caching only for `/Zi` and `/ZI` flags and
not all flags that start with `/Z`.
As a side note, debugging this problem was possible because of ccache's
log file, which is enabled by: `ccache --set-config="log_file=log.txt"`.
2023-05-13 01:45:01 +08:00
|
|
|
endif()
|
2023-09-13 11:51:45 +08:00
|
|
|
|
|
|
|
# Important: If loading StableHLO in this fashion, it must come last,
|
|
|
|
# after all of our libraries and test targets have been defined.
|
|
|
|
# It seems that they both abuse upstream CMake macros that accumulate
|
|
|
|
# properties.
|
|
|
|
# Getting this wrong results in building large parts of the stablehlo
|
|
|
|
# project that we don't actually depend on. Further some of those parts
|
|
|
|
# do not even compile on all platforms.
|
|
|
|
if (TORCH_MLIR_ENABLE_STABLEHLO)
|
|
|
|
set(STABLEHLO_BUILD_EMBEDDED ON)
|
2024-07-10 13:00:13 +08:00
|
|
|
set(STABLEHLO_ENABLE_BINDINGS_PYTHON ON)
|
2023-09-13 11:51:45 +08:00
|
|
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/externals/stablehlo
|
|
|
|
${CMAKE_CURRENT_BINARY_DIR}/stablehlo
|
|
|
|
EXCLUDE_FROM_ALL)
|
|
|
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/externals/stablehlo)
|
|
|
|
endif()
|
2023-11-03 10:45:55 +08:00
|
|
|
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
# Sub-projects
|
|
|
|
#-------------------------------------------------------------------------------
|
|
|
|
|
2023-12-13 11:02:51 +08:00
|
|
|
# Sub-projects can bundle additional PyTorch extensions by adding them to this
|
|
|
|
# source target. It is typically empty unless if features are enabled.
|
|
|
|
if(MLIR_ENABLE_BINDINGS_PYTHON)
|
|
|
|
declare_mlir_python_sources(TorchMLIRPythonTorchExtensionsSources)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
# Build projects first as it may populate additional Python deps.
|
2023-11-20 04:10:19 +08:00
|
|
|
add_subdirectory(projects)
|
2023-12-13 11:02:51 +08:00
|
|
|
|
|
|
|
# Finish with top-level Python bindings so it can handle additional deps.
|
|
|
|
if(MLIR_ENABLE_BINDINGS_PYTHON)
|
|
|
|
add_subdirectory(python)
|
2024-04-28 05:08:09 +08:00
|
|
|
endif()
|