mirror of https://github.com/llvm/torch-mlir
Resolve remaining LTC CI failures (#1110)
* Replace CHECK_EQ with TORCH_CHECK_EQ * Check value of TORCH_MLIR_USE_INSTALLED_PYTORCH during LTC build * Update LTC XFAIL with NewZerosModule ops * Explicitly blacklist _like ops * Automatically blacklist new_/_like ops * Prune away unused Python dependencies from LTC * Add flag to disable LTC * Autogen dummy _REFERENCE_LAZY_BACKEND library when LTC is disabled * Implement compute_shape_var * Removed Var tests from XFAIL Set * XFAIL tests using _local_scalar_dense or index.Tensor * Add StdDim tests to XFAIL set * Autogen aten::catpull/1125/head snapshot-20220731.550
parent
425362263b
commit
2c3b3606d0
|
@ -96,6 +96,7 @@ jobs:
|
||||||
-DMLIR_ENABLE_BINDINGS_PYTHON=OFF \
|
-DMLIR_ENABLE_BINDINGS_PYTHON=OFF \
|
||||||
-DTORCH_MLIR_ENABLE_MHLO=ON \
|
-DTORCH_MLIR_ENABLE_MHLO=ON \
|
||||||
-DTORCH_MLIR_USE_INSTALLED_PYTORCH=${{ matrix.torch-binary }} \
|
-DTORCH_MLIR_USE_INSTALLED_PYTORCH=${{ matrix.torch-binary }} \
|
||||||
|
-DTORCH_MLIR_ENABLE_LTC=OFF \
|
||||||
-DPython3_EXECUTABLE=$(which python) \
|
-DPython3_EXECUTABLE=$(which python) \
|
||||||
.
|
.
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,8 @@ if(TORCH_MLIR_ENABLE_MHLO)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
option(TORCH_MLIR_ENABLE_LTC "Enables LTC backend" ON)
|
||||||
|
|
||||||
torch_mlir_add_llvm_external_project(
|
torch_mlir_add_llvm_external_project(
|
||||||
torch-mlir-dialects
|
torch-mlir-dialects
|
||||||
TORCH_MLIR_DIALECTS
|
TORCH_MLIR_DIALECTS
|
||||||
|
|
|
@ -86,7 +86,7 @@ class GenMlirLazyIr(torchgen.dest.GenLazyIR):
|
||||||
{emplace_arguments_str}
|
{emplace_arguments_str}
|
||||||
{emplace_kwarguments}
|
{emplace_kwarguments}
|
||||||
torch::lazy::TorchMlirOpVector {schema.aten_name}_out = torch::lazy::LowerTorchMlirBuiltin(function, op().op, shapes(), arguments, kwarguments);
|
torch::lazy::TorchMlirOpVector {schema.aten_name}_out = torch::lazy::LowerTorchMlirBuiltin(function, op().op, shapes(), arguments, kwarguments);
|
||||||
CHECK_EQ({schema.aten_name}_out.size(), {len(schema.returns)});
|
TORCH_CHECK_EQ({schema.aten_name}_out.size(), {len(schema.returns)});
|
||||||
|
|
||||||
return {schema.aten_name}_out;
|
return {schema.aten_name}_out;
|
||||||
}}
|
}}
|
||||||
|
@ -236,6 +236,9 @@ class GenTorchMlirLTC:
|
||||||
continue
|
continue
|
||||||
if base in supported or op in supported:
|
if base in supported or op in supported:
|
||||||
continue
|
continue
|
||||||
|
# Blacklist new_/_like ops since they are non-differentiable.
|
||||||
|
if any(o.startswith("new_") or o.endswith("_like") for o in (base, op)):
|
||||||
|
continue
|
||||||
|
|
||||||
if func.has_composite_implicit_autograd_kernel:
|
if func.has_composite_implicit_autograd_kernel:
|
||||||
composite_implicit.add(op)
|
composite_implicit.add(op)
|
||||||
|
|
|
@ -43,7 +43,6 @@ supported:
|
||||||
# - bernoulli
|
# - bernoulli
|
||||||
# - bernoulli_
|
# - bernoulli_
|
||||||
- _to_copy
|
- _to_copy
|
||||||
- cat
|
|
||||||
- clone
|
- clone
|
||||||
- empty.memory_format
|
- empty.memory_format
|
||||||
- empty_strided
|
- empty_strided
|
||||||
|
|
|
@ -227,6 +227,7 @@ LTC_XFAIL_SET = {
|
||||||
"FullLikeModuleInt3D_basic",
|
"FullLikeModuleInt3D_basic",
|
||||||
"GeFloatIntModule_basic",
|
"GeFloatIntModule_basic",
|
||||||
"GeFloatModule_basic",
|
"GeFloatModule_basic",
|
||||||
|
"GeIntModule_basic",
|
||||||
"GtFloatIntModule_basic",
|
"GtFloatIntModule_basic",
|
||||||
"GtIntModule_basic",
|
"GtIntModule_basic",
|
||||||
"HBC_basic",
|
"HBC_basic",
|
||||||
|
@ -266,6 +267,11 @@ LTC_XFAIL_SET = {
|
||||||
"IndexPutImpl3DFloatNonAccumulateModule_basic",
|
"IndexPutImpl3DFloatNonAccumulateModule_basic",
|
||||||
"IndexTensorModule3dInput_basic",
|
"IndexTensorModule3dInput_basic",
|
||||||
"IndexTensorModule_basic",
|
"IndexTensorModule_basic",
|
||||||
|
"IndexTensorMultiInputContiguousCenter_basic",
|
||||||
|
"IndexTensorMultiInputNonContiguous_basic",
|
||||||
|
"IndexTensorMultiInputOneDim_basic",
|
||||||
|
"IndexTensorMultiInputThreeIndexers_basic",
|
||||||
|
"IndexTensorMultiInput_basic",
|
||||||
"IndexTensorSelectDimModule_basic",
|
"IndexTensorSelectDimModule_basic",
|
||||||
"Matmul_dot",
|
"Matmul_dot",
|
||||||
"Matmul_matvec",
|
"Matmul_matvec",
|
||||||
|
@ -288,6 +294,12 @@ LTC_XFAIL_SET = {
|
||||||
"NewOnesModuleFloat3D_basic",
|
"NewOnesModuleFloat3D_basic",
|
||||||
"NewOnesModuleInt2D_basic",
|
"NewOnesModuleInt2D_basic",
|
||||||
"NewOnesModuleInt3D_basic",
|
"NewOnesModuleInt3D_basic",
|
||||||
|
"NewZerosModuleDefaultDtype_basic",
|
||||||
|
"NewZerosModuleFalsePinMemory_basic",
|
||||||
|
"NewZerosModuleFloat2D_basic",
|
||||||
|
"NewZerosModuleFloat3D_basic",
|
||||||
|
"NewZerosModuleInt2D_basic",
|
||||||
|
"NewZerosModuleInt3D_basic",
|
||||||
"OnesLikeModule_defaultDtype",
|
"OnesLikeModule_defaultDtype",
|
||||||
"OnesLikeModule_falsePinMemory",
|
"OnesLikeModule_falsePinMemory",
|
||||||
"OnesLikeModule_float",
|
"OnesLikeModule_float",
|
||||||
|
@ -302,6 +314,9 @@ LTC_XFAIL_SET = {
|
||||||
"SliceStartEqEndModule_basic",
|
"SliceStartEqEndModule_basic",
|
||||||
"SqrtIntModule_basic",
|
"SqrtIntModule_basic",
|
||||||
"StdBiasedModule_basic",
|
"StdBiasedModule_basic",
|
||||||
|
"StdDimBiasedModule_basic",
|
||||||
|
"StdDimKeepDimFalseModule_basic",
|
||||||
|
"StdDimKeepDimTrueModule_basic",
|
||||||
"StdUnbiasedModule_basic",
|
"StdUnbiasedModule_basic",
|
||||||
"SubFloatModule_basic",
|
"SubFloatModule_basic",
|
||||||
"SubIntModule_basic",
|
"SubIntModule_basic",
|
||||||
|
@ -317,15 +332,5 @@ LTC_XFAIL_SET = {
|
||||||
"UniformModule_basic",
|
"UniformModule_basic",
|
||||||
"UniformStaticModule_basic",
|
"UniformStaticModule_basic",
|
||||||
"UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic",
|
"UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic",
|
||||||
"VarBiasedModule_basic",
|
|
||||||
"VarDimAllDimReduceModule_basic",
|
|
||||||
"VarDimBiasedModule_basic",
|
|
||||||
"VarDimKeepDimFalseModule_basic",
|
|
||||||
"VarDimModule_basic",
|
|
||||||
"VarDimMultiDimModule_basic",
|
|
||||||
"VarDimNegativeModule_basic",
|
|
||||||
"VarDimSingleDimModule_basic",
|
|
||||||
"VarDimUnbiasedModule_basic",
|
|
||||||
"VarUnbiasedModule_basic",
|
|
||||||
"ViewCollapseDynamicWithAtenSizeIntModule_basic",
|
"ViewCollapseDynamicWithAtenSizeIntModule_basic",
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,30 @@ set(TORCH_MLIR_PYTHON_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/torch_mlir")
|
||||||
# We vendor our own MLIR instance in the `torch_mlir` namespace.
|
# We vendor our own MLIR instance in the `torch_mlir` namespace.
|
||||||
add_compile_definitions("MLIR_PYTHON_PACKAGE_PREFIX=torch_mlir.")
|
add_compile_definitions("MLIR_PYTHON_PACKAGE_PREFIX=torch_mlir.")
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# PyTorch
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
option(TORCH_MLIR_USE_INSTALLED_PYTORCH "Build from local PyTorch in environment" ON)
|
||||||
|
|
||||||
|
if (NOT TORCH_MLIR_USE_INSTALLED_PYTORCH)
|
||||||
|
# Source builds
|
||||||
|
set(ENV{PYTORCH_REPO} ${PYTORCH_REPO})
|
||||||
|
set(ENV{PYTORCH_BRANCH} ${PYTORCH_BRANCH})
|
||||||
|
set(ENV{MACOSX_DEPLOYMENT_TARGET} ${MACOSX_DEPLOYMENT_TARGET})
|
||||||
|
set(ENV{CMAKE_OSX_ARCHITECTURES} ${CMAKE_OSX_ARCHITECTURES})
|
||||||
|
set(ENV{CMAKE_C_COMPILER_LAUNCHER} ${CMAKE_C_COMPILER_LAUNCHER})
|
||||||
|
set(ENV{CMAKE_CXX_COMPILER_LAUNCHER} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../build_tools/build_libtorch.sh
|
||||||
|
RESULT_VARIABLE _result
|
||||||
|
)
|
||||||
|
if(_result)
|
||||||
|
message(FATAL_ERROR "Failed to run `build_libtorch.sh`")
|
||||||
|
endif()
|
||||||
|
set(TORCH_INSTALL_PREFIX "libtorch")
|
||||||
|
endif()
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Sources
|
# Sources
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -60,33 +84,17 @@ declare_mlir_python_extension(TorchMLIRPythonExtensions.Main
|
||||||
# Lazy Tensor Core
|
# Lazy Tensor Core
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
add_subdirectory(torch_mlir/csrc/base_lazy_backend)
|
if(TORCH_MLIR_ENABLE_LTC)
|
||||||
|
add_subdirectory(torch_mlir/csrc/base_lazy_backend)
|
||||||
|
endif()
|
||||||
|
# Reference backend has a separate check for TORCH_MLIR_ENABLE_LTC.
|
||||||
add_subdirectory(torch_mlir/csrc/reference_lazy_backend)
|
add_subdirectory(torch_mlir/csrc/reference_lazy_backend)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Optionally handle JIT IR importer.
|
# Optionally handle JIT IR importer.
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
option(TORCH_MLIR_USE_INSTALLED_PYTORCH "Build from local PyTorch in environment" ON)
|
|
||||||
|
|
||||||
if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER)
|
if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER)
|
||||||
if (NOT TORCH_MLIR_USE_INSTALLED_PYTORCH)
|
|
||||||
# Source builds
|
|
||||||
set(ENV{PYTORCH_REPO} ${PYTORCH_REPO})
|
|
||||||
set(ENV{PYTORCH_BRANCH} ${PYTORCH_BRANCH})
|
|
||||||
set(ENV{MACOSX_DEPLOYMENT_TARGET} ${MACOSX_DEPLOYMENT_TARGET})
|
|
||||||
set(ENV{CMAKE_OSX_ARCHITECTURES} ${CMAKE_OSX_ARCHITECTURES})
|
|
||||||
set(ENV{CMAKE_C_COMPILER_LAUNCHER} ${CMAKE_C_COMPILER_LAUNCHER})
|
|
||||||
set(ENV{CMAKE_CXX_COMPILER_LAUNCHER} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
|
||||||
execute_process(
|
|
||||||
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../build_tools/build_libtorch.sh
|
|
||||||
RESULT_VARIABLE _result
|
|
||||||
)
|
|
||||||
if(_result)
|
|
||||||
message(FATAL_ERROR "Failed to run `build_libtorch.sh`")
|
|
||||||
endif()
|
|
||||||
set(TORCH_INSTALL_PREFIX "libtorch")
|
|
||||||
endif()
|
|
||||||
add_subdirectory(torch_mlir/dialects/torch/importer/jit_ir)
|
add_subdirectory(torch_mlir/dialects/torch/importer/jit_ir)
|
||||||
add_subdirectory(torch_mlir_e2e_test)
|
add_subdirectory(torch_mlir_e2e_test)
|
||||||
endif()
|
endif()
|
||||||
|
@ -154,8 +162,10 @@ endif()
|
||||||
# TODO: Add after macOS builds are fixed
|
# TODO: Add after macOS builds are fixed
|
||||||
#add_dependencies(TorchMLIRPythonModules torch_mlir_custom_op_example)
|
#add_dependencies(TorchMLIRPythonModules torch_mlir_custom_op_example)
|
||||||
|
|
||||||
# Add Torch-MLIR LTC backend as dependency
|
if(TORCH_MLIR_ENABLE_LTC)
|
||||||
add_dependencies(TorchMLIRPythonModules torch_mlir_ltc_backend)
|
# Add Torch-MLIR LTC backend as dependency
|
||||||
add_dependencies(TorchMLIRPythonModules reference_lazy_backend)
|
add_dependencies(TorchMLIRPythonModules torch_mlir_ltc_backend)
|
||||||
|
add_dependencies(TorchMLIRPythonModules reference_lazy_backend)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_subdirectory(test)
|
add_subdirectory(test)
|
||||||
|
|
|
@ -5,10 +5,16 @@
|
||||||
|
|
||||||
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/python/torch_mlir/cmake/modules")
|
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/python/torch_mlir/cmake/modules")
|
||||||
include(TorchMLIRPyTorch)
|
include(TorchMLIRPyTorch)
|
||||||
|
|
||||||
TorchMLIRProbeForPyTorchInstall()
|
TorchMLIRProbeForPyTorchInstall()
|
||||||
|
if(TORCH_MLIR_USE_INSTALLED_PYTORCH)
|
||||||
|
TorchMLIRConfigurePyTorch()
|
||||||
|
else()
|
||||||
|
set(Torch_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../../libtorch/share/cmake/Torch")
|
||||||
|
endif()
|
||||||
|
|
||||||
find_package(Torch 1.11 REQUIRED)
|
find_package(Torch 1.11 REQUIRED)
|
||||||
|
|
||||||
TorchMLIRConfigurePyTorch()
|
|
||||||
set(TORCHGEN_DIR ${Torch_ROOT}/../../../torchgen)
|
set(TORCHGEN_DIR ${Torch_ROOT}/../../../torchgen)
|
||||||
|
|
||||||
include_directories(BEFORE
|
include_directories(BEFORE
|
||||||
|
@ -76,8 +82,6 @@ target_link_libraries(torch_mlir_ltc_backend
|
||||||
TorchMLIRAggregateCAPI
|
TorchMLIRAggregateCAPI
|
||||||
TorchMLIRJITIRImporter
|
TorchMLIRJITIRImporter
|
||||||
${TORCH_LIBRARIES}
|
${TORCH_LIBRARIES}
|
||||||
${Python3_LIBRARIES}
|
|
||||||
torch_python
|
|
||||||
)
|
)
|
||||||
|
|
||||||
message(STATUS "TORCH_CXXFLAGS=${TORCH_CXXFLAGS} -Wno-pedantic")
|
message(STATUS "TORCH_CXXFLAGS=${TORCH_CXXFLAGS} -Wno-pedantic")
|
||||||
|
|
|
@ -64,7 +64,7 @@ void TorchMlirLoweringContext::Lower(const Node* node) {
|
||||||
dynamic_cast<const torch::lazy::TorchMlirNode*>(node)) {
|
dynamic_cast<const torch::lazy::TorchMlirNode*>(node)) {
|
||||||
TorchMlirOpVector ops = torch_mlir_node->Lower(function_, this);
|
TorchMlirOpVector ops = torch_mlir_node->Lower(function_, this);
|
||||||
CHECK(!ops.empty()) << "Failed to lower: " << *node;
|
CHECK(!ops.empty()) << "Failed to lower: " << *node;
|
||||||
CHECK_EQ(node->num_outputs(), ops.size());
|
TORCH_CHECK_EQ(node->num_outputs(), ops.size());
|
||||||
for (size_t i = 0; i < ops.size(); ++i) {
|
for (size_t i = 0; i < ops.size(); ++i) {
|
||||||
AssignOutputOp(torch::lazy::Output(node, i), ops[i]);
|
AssignOutputOp(torch::lazy::Output(node, i), ops[i]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,25 +154,6 @@ void copy_(torch::lazy::LazyTensorPtr& input, torch::lazy::LazyTensorPtr& src) {
|
||||||
// // return self;
|
// // return self;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
at::Tensor LazyNativeFunctions::cat(at::TensorList tensors, int64_t dim) {
|
|
||||||
TORCH_LAZY_FN_COUNTER("lazy::");
|
|
||||||
auto lazy_tensors = torch::lazy::GetLtcTensors(tensors);
|
|
||||||
std::vector<torch::lazy::Value> values;
|
|
||||||
values.reserve(lazy_tensors.size());
|
|
||||||
for (auto& tensor : lazy_tensors) {
|
|
||||||
values.emplace_back(tensor->GetIrValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shapes = torch::lazy::compute_shape_cat(tensors, dim);
|
|
||||||
UNIMPLEMENTED_FUNCTION_ERROR();
|
|
||||||
// auto node =
|
|
||||||
// torch::lazy::MakeNode<ir::ops::Cat>(values, dim, std::move(shapes));
|
|
||||||
// auto result = torch::lazy::CreateAtenFromLtcTensor(
|
|
||||||
// torch::lazy::LazyTensor::Create(torch::lazy::Value(node, 0),
|
|
||||||
// lazy_tensors[0]->GetDevice()));
|
|
||||||
// return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// clone is special in LT because we make it a no-op.
|
// clone is special in LT because we make it a no-op.
|
||||||
// This should be safe to do, because every operator in the LT is functional.
|
// This should be safe to do, because every operator in the LT is functional.
|
||||||
at::Tensor LazyNativeFunctions::clone(
|
at::Tensor LazyNativeFunctions::clone(
|
||||||
|
|
|
@ -205,7 +205,7 @@ GenerateClone(torch::jit::Value* val, TorchMlirFunction function) {
|
||||||
// Type of cloned value should be identical to the original one.
|
// Type of cloned value should be identical to the original one.
|
||||||
TorchMlirOpVector cloned =
|
TorchMlirOpVector cloned =
|
||||||
LowerBuiltin(at::aten::clone, {val->type()}, function, clone_arguments);
|
LowerBuiltin(at::aten::clone, {val->type()}, function, clone_arguments);
|
||||||
CHECK_EQ(cloned.size(), 1);
|
TORCH_CHECK_EQ(cloned.size(), 1);
|
||||||
return cloned.front();
|
return cloned.front();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ torch::jit::Value* GenerateSlice(
|
||||||
c10::ArrayRef<Shape>(
|
c10::ArrayRef<Shape>(
|
||||||
compute_shape_slice(base->type(), dim, start, end, step)),
|
compute_shape_slice(base->type(), dim, start, end, step)),
|
||||||
function, arguments);
|
function, arguments);
|
||||||
CHECK_EQ(selected.size(), 1);
|
TORCH_CHECK_EQ(selected.size(), 1);
|
||||||
return selected.front();
|
return selected.front();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ class ToCopy : public torch::lazy::TorchMlirNode {
|
||||||
kwarguments.emplace_back("non_blocking", non_blocking);
|
kwarguments.emplace_back("non_blocking", non_blocking);
|
||||||
kwarguments.emplace_back("memory_format", memory_format);
|
kwarguments.emplace_back("memory_format", memory_format);
|
||||||
torch::lazy::TorchMlirOpVector _to_copy_out = torch::lazy::LowerTorchMlirBuiltin(function, op().op, shapes(), arguments, kwarguments);
|
torch::lazy::TorchMlirOpVector _to_copy_out = torch::lazy::LowerTorchMlirBuiltin(function, op().op, shapes(), arguments, kwarguments);
|
||||||
CHECK_EQ(_to_copy_out.size(), 1);
|
TORCH_CHECK_EQ(_to_copy_out.size(), 1);
|
||||||
|
|
||||||
return _to_copy_out;
|
return _to_copy_out;
|
||||||
|
|
||||||
|
|
|
@ -29,5 +29,12 @@ compute_shape_mul(const at::Tensor& self, const at::Scalar& other) {
|
||||||
return {Shape(self.scalar_type(), self.sizes().vec())};
|
return {Shape(self.scalar_type(), self.sizes().vec())};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<torch::lazy::Shape> compute_shape_var(
|
||||||
|
const at::Tensor& self, at::OptionalIntArrayRef dim,
|
||||||
|
c10::optional<int64_t> correction, bool keepdim) {
|
||||||
|
// Result of variance is scalar tensor.
|
||||||
|
return {Shape(self.scalar_type(), {})};
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace lazy
|
} // namespace lazy
|
||||||
} // namespace torch
|
} // namespace torch
|
||||||
|
|
|
@ -4,10 +4,15 @@
|
||||||
|
|
||||||
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/python/torch_mlir/cmake/modules")
|
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/python/torch_mlir/cmake/modules")
|
||||||
include(TorchMLIRPyTorch)
|
include(TorchMLIRPyTorch)
|
||||||
TorchMLIRProbeForPyTorchInstall()
|
|
||||||
find_package(Torch 1.11 REQUIRED)
|
|
||||||
|
|
||||||
TorchMLIRConfigurePyTorch()
|
TorchMLIRProbeForPyTorchInstall()
|
||||||
|
if(TORCH_MLIR_USE_INSTALLED_PYTORCH)
|
||||||
|
TorchMLIRConfigurePyTorch()
|
||||||
|
else()
|
||||||
|
set(Torch_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../../libtorch/share/cmake/Torch")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
find_package(Torch 1.11 REQUIRED)
|
||||||
|
|
||||||
###########################################################################
|
###########################################################################
|
||||||
# Setup Python development
|
# Setup Python development
|
||||||
|
@ -21,39 +26,47 @@ mlir_configure_python_dev_packages()
|
||||||
# Library definition
|
# Library definition
|
||||||
###########################################################################
|
###########################################################################
|
||||||
|
|
||||||
include_directories(BEFORE
|
set(LIBRARY_OUTPUT_PATH "${TORCH_MLIR_PYTHON_PACKAGES_DIR}/torch_mlir/torch_mlir/reference_lazy_backend")
|
||||||
${TORCH_INCLUDE_DIRS}
|
set(OUTPUT_NAME "_REFERENCE_LAZY_BACKEND")
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}
|
|
||||||
${Python3_INCLUDE_DIRS}
|
|
||||||
${PYTHON_H_DIR}
|
|
||||||
${PROJECT_SOURCE_DIR}/python
|
|
||||||
)
|
|
||||||
link_directories("${TORCH_INSTALL_PREFIX}/lib")
|
|
||||||
link_directories(${CMAKE_CURRENT_SOURCE_DIR}/lib)
|
|
||||||
add_link_options(-Wl,-rpath,$ORIGIN/lib)
|
|
||||||
|
|
||||||
set(REFERENCE_LAZY_BACKEND_CSRC
|
if(TORCH_MLIR_ENABLE_LTC)
|
||||||
backend_impl.cpp
|
include_directories(BEFORE
|
||||||
reference_lazy_backend_pybind.cpp
|
${TORCH_INCLUDE_DIRS}
|
||||||
)
|
${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
add_library(reference_lazy_backend SHARED ${REFERENCE_LAZY_BACKEND_CSRC})
|
${CMAKE_CURRENT_BINARY_DIR}
|
||||||
add_dependencies(reference_lazy_backend
|
${Python3_INCLUDE_DIRS}
|
||||||
torch_mlir_ltc_backend
|
${PYTHON_H_DIR}
|
||||||
)
|
${PROJECT_SOURCE_DIR}/python
|
||||||
target_link_libraries(reference_lazy_backend
|
)
|
||||||
${TORCH_LIBRARIES}
|
link_directories("${TORCH_INSTALL_PREFIX}/lib")
|
||||||
${Python3_LIBRARIES}
|
link_directories(${CMAKE_CURRENT_SOURCE_DIR}/lib)
|
||||||
torch_python
|
add_link_options(-Wl,-rpath,$ORIGIN/lib)
|
||||||
torch_mlir_ltc_backend
|
|
||||||
)
|
|
||||||
|
|
||||||
message(STATUS "TORCH_CXXFLAGS=${TORCH_CXXFLAGS} -Wno-pedantic")
|
add_library(reference_lazy_backend SHARED
|
||||||
set_target_properties(reference_lazy_backend PROPERTIES
|
backend_impl.cpp
|
||||||
LIBRARY_OUTPUT_DIRECTORY "${TORCH_MLIR_PYTHON_PACKAGES_DIR}/torch_mlir/torch_mlir/reference_lazy_backend"
|
reference_lazy_backend_pybind.cpp
|
||||||
OUTPUT_NAME _REFERENCE_LAZY_BACKEND
|
)
|
||||||
PREFIX "${PYTHON_MODULE_PREFIX}"
|
add_dependencies(reference_lazy_backend
|
||||||
SUFFIX "${PYTHON_MODULE_EXTENSION}"
|
torch_mlir_ltc_backend
|
||||||
CXX_VISIBILITY_PRESET "hidden"
|
)
|
||||||
COMPILE_FLAGS "${TORCH_CXXFLAGS} -Wno-pedantic"
|
target_link_libraries(reference_lazy_backend
|
||||||
)
|
${TORCH_LIBRARIES}
|
||||||
|
torch_mlir_ltc_backend
|
||||||
|
)
|
||||||
|
|
||||||
|
message(STATUS "TORCH_CXXFLAGS=${TORCH_CXXFLAGS} -Wno-pedantic")
|
||||||
|
set_target_properties(reference_lazy_backend PROPERTIES
|
||||||
|
LIBRARY_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
|
||||||
|
OUTPUT_NAME ${OUTPUT_NAME}
|
||||||
|
PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||||
|
SUFFIX "${PYTHON_MODULE_EXTENSION}"
|
||||||
|
CXX_VISIBILITY_PRESET "hidden"
|
||||||
|
COMPILE_FLAGS "${TORCH_CXXFLAGS} -Wno-pedantic"
|
||||||
|
)
|
||||||
|
else()
|
||||||
|
# To avoid import errors when LTC is disabled (and a bunch of checks
|
||||||
|
# associated with that), we will generate a dummy placeholder library.
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/gen_dummy_lib.py ${LIBRARY_OUTPUT_PATH} ${OUTPUT_NAME}
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
# When LTC is disabled in Torch-MLIR build, we will generate a dummy module to
|
||||||
|
# ensure that no import errors occur.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
path = sys.argv[1] # dummy script path
|
||||||
|
file_name = sys.argv[2] # dummy script
|
||||||
|
|
||||||
|
contents = '''
|
||||||
|
# This file was automatically generated due to LTC being disabled in build.
|
||||||
|
|
||||||
|
class LazyTensorCoreTestConfig:
|
||||||
|
def __init__(self):
|
||||||
|
assert False, "LTC is not enabled. Check the value of `TORCH_MLIR_ENABLE_LTC`"
|
||||||
|
'''
|
||||||
|
|
||||||
|
if not os.path.exists(path):
|
||||||
|
os.makedirs(path)
|
||||||
|
|
||||||
|
with open(os.path.join(path, file_name + '.py'), 'w') as file:
|
||||||
|
file.write(contents)
|
|
@ -10,7 +10,14 @@ include_directories(BEFORE
|
||||||
)
|
)
|
||||||
link_directories("${TORCH_INSTALL_PREFIX}/lib")
|
link_directories("${TORCH_INSTALL_PREFIX}/lib")
|
||||||
|
|
||||||
add_library(TorchMLIRJITIRImporter SHARED
|
# Hack! Currently out-of-tree build fails when this is set to SHARED, so we have this toggle
|
||||||
|
if(TORCH_MLIR_ENABLE_LTC)
|
||||||
|
set(LIBRARY_TYPE "SHARED")
|
||||||
|
else()
|
||||||
|
set(LIBRARY_TYPE "MODULE")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
add_library(TorchMLIRJITIRImporter ${LIBRARY_TYPE}
|
||||||
class_annotator.cpp
|
class_annotator.cpp
|
||||||
class_annotator_pybind.cpp
|
class_annotator_pybind.cpp
|
||||||
get_registered_ops.cpp
|
get_registered_ops.cpp
|
||||||
|
|
Loading…
Reference in New Issue