Re-enable custom op support

pull/1236/head
nithinsubbiah 2022-08-02 20:35:56 +00:00 committed by Vivek Khandelwal
parent 0af55781ae
commit fde390c766
10 changed files with 21 additions and 51 deletions

View File

@ -16,19 +16,17 @@ build_dir="$(realpath "${TORCH_MLIR_BUILD_DIR:-$src_dir/build}")"
torch_transforms_cpp_dir="${src_dir}/lib/Dialect/Torch/Transforms" torch_transforms_cpp_dir="${src_dir}/lib/Dialect/Torch/Transforms"
python_packages_dir="${build_dir}/tools/torch-mlir/python_packages" python_packages_dir="${build_dir}/tools/torch-mlir/python_packages"
TORCH_MLIR_EXT_PYTHONPATH="${TORCH_MLIR_EXT_PYTHONPATH:-""}"
pypath="${python_packages_dir}/torch_mlir" pypath="${python_packages_dir}/torch_mlir"
# TODO: Re-enable once custom op support is back. if [ ! -z ${TORCH_MLIR_EXT_PYTHONPATH} ]; then
#if [ ! -z ${TORCH_MLIR_EXT_PYTHONPATH} ]; then pypath="${pypath}:${TORCH_MLIR_EXT_PYTHONPATH}"
# pypath="${pypath}:${TORCH_MLIR_EXT_PYTHONPATH}" fi
#fi TORCH_MLIR_EXT_MODULES="${TORCH_MLIR_EXT_MODULES:-""}"
#ext_module="torch_mlir._torch_mlir_custom_op_example" if [ ! -z ${TORCH_MLIR_EXT_MODULES} ]; then
#if [ ! -z ${TORCH_MLIR_EXT_MODULES} ]; then ext_module="${TORCH_MLIR_EXT_MODULES} "
# ext_module="${ext_module},${TORCH_MLIR_EXT_MODULES} " fi
#fi
PYTHONPATH="${pypath}" python \ PYTHONPATH="${pypath}" python \
-m torch_mlir.dialects.torch.importer.jit_ir.build_tools.shape_lib_gen \ -m torch_mlir.dialects.torch.importer.jit_ir.build_tools.shape_lib_gen \
--pytorch_op_extensions=${ext_module} \
--torch_transforms_cpp_dir="${torch_transforms_cpp_dir}" --torch_transforms_cpp_dir="${torch_transforms_cpp_dir}"
# TODO: Add back to shape_lib_gen invocation once custom op support is back.
# --pytorch_op_extensions=${ext_module} \

View File

@ -16,20 +16,19 @@ build_dir="$(realpath "${TORCH_MLIR_BUILD_DIR:-$src_dir/build}")"
torch_ir_include_dir="${src_dir}/include/torch-mlir/Dialect/Torch/IR" torch_ir_include_dir="${src_dir}/include/torch-mlir/Dialect/Torch/IR"
python_packages_dir="${build_dir}/tools/torch-mlir/python_packages" python_packages_dir="${build_dir}/tools/torch-mlir/python_packages"
TORCH_MLIR_EXT_PYTHONPATH="${TORCH_MLIR_EXT_PYTHONPATH:-""}"
pypath="${python_packages_dir}/torch_mlir" pypath="${python_packages_dir}/torch_mlir"
# TODO: Re-enable once custom op support is back. if [ ! -z ${TORCH_MLIR_EXT_PYTHONPATH} ]; then
#if [ ! -z ${TORCH_MLIR_EXT_PYTHONPATH} ]; then pypath="${pypath}:${TORCH_MLIR_EXT_PYTHONPATH}"
# pypath="${pypath}:${TORCH_MLIR_EXT_PYTHONPATH}" fi
#fi TORCH_MLIR_EXT_MODULES="${TORCH_MLIR_EXT_MODULES:-""}"
#ext_module="torch_mlir._torch_mlir_custom_op_example" ext_module="${ext_module:-""}"
#if [ ! -z ${TORCH_MLIR_EXT_MODULES} ]; then if [ ! -z ${TORCH_MLIR_EXT_MODULES} ]; then
# ext_module="${ext_module},${TORCH_MLIR_EXT_MODULES}" ext_module="${TORCH_MLIR_EXT_MODULES}"
#fi fi
PYTHONPATH="${pypath}" python \ PYTHONPATH="${pypath}" python \
-m torch_mlir.dialects.torch.importer.jit_ir.build_tools.torch_ods_gen \ -m torch_mlir.dialects.torch.importer.jit_ir.build_tools.torch_ods_gen \
--torch_ir_include_dir="${torch_ir_include_dir}" \ --torch_ir_include_dir="${torch_ir_include_dir}" \
--pytorch_op_extensions="${ext_module}" \
--debug_registry_dump="${torch_ir_include_dir}/JITOperatorRegistryDump.txt" --debug_registry_dump="${torch_ir_include_dir}/JITOperatorRegistryDump.txt"
# TODO: Add back to torch_ods_gen invocation once custom op support is back.
# --pytorch_op_extensions="${ext_module}" \

View File

@ -1,6 +1,4 @@
add_mlir_conversion_library(TorchMLIRTorchToLinalg add_mlir_conversion_library(TorchMLIRTorchToLinalg
# TODO: Re-enable after MacOS support is fixed for the custom op extension.
# CustomOpExample.cpp
DataMovement.cpp DataMovement.cpp
IndirectDataMovement.cpp IndirectDataMovement.cpp
Linear.cpp Linear.cpp

View File

@ -63,9 +63,6 @@ void populateIndirectDataMovementPatternsAndLegality(
void populateTensorConstructorsPatternsAndLegality(TypeConverter &typeConverter, void populateTensorConstructorsPatternsAndLegality(TypeConverter &typeConverter,
RewritePatternSet &patterns, RewritePatternSet &patterns,
ConversionTarget &target); ConversionTarget &target);
//void populateCustomOpExamplePatternsAndLegality(TypeConverter &typeConverter,
// RewritePatternSet &patterns,
// ConversionTarget &target);
} // namespace torch_to_linalg } // namespace torch_to_linalg
} // namespace torch } // namespace torch

View File

@ -62,8 +62,6 @@ public:
RewritePatternSet patterns(context); RewritePatternSet patterns(context);
//torch_to_linalg::populateCustomOpExamplePatternsAndLegality(
// typeConverter, patterns, target);
torch_to_linalg::populateTensorScalarInteropPatternsAndLegality( torch_to_linalg::populateTensorScalarInteropPatternsAndLegality(
typeConverter, patterns, target); typeConverter, patterns, target);
torch_to_linalg::populateLinearPatternsAndLegality(typeConverter, patterns, torch_to_linalg::populateLinearPatternsAndLegality(typeConverter, patterns,

View File

@ -111,8 +111,7 @@ add_subdirectory(torch_mlir/eager_mode)
# Required for running the update_torch_ods.sh and update_shape_lib.sh scripts. # Required for running the update_torch_ods.sh and update_shape_lib.sh scripts.
################################################################################ ################################################################################
# TODO: renable once it build on macOS Intel / M1 # add_subdirectory(torch_mlir/_torch_mlir_custom_op_example)
#add_subdirectory(torch_mlir/_torch_mlir_custom_op_example)
################################################################################ ################################################################################
# Generate packages and shared library # Generate packages and shared library
@ -159,9 +158,6 @@ if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER)
add_dependencies(TorchMLIRPythonModules TorchMLIRE2ETestPythonModules) add_dependencies(TorchMLIRPythonModules TorchMLIRE2ETestPythonModules)
endif() endif()
# TODO: Add after macOS builds are fixed
#add_dependencies(TorchMLIRPythonModules torch_mlir_custom_op_example)
if(TORCH_MLIR_ENABLE_LTC) if(TORCH_MLIR_ENABLE_LTC)
# Add Torch-MLIR LTC backend as dependency # Add Torch-MLIR LTC backend as dependency
add_dependencies(TorchMLIRPythonModules torch_mlir_ltc_backend) add_dependencies(TorchMLIRPythonModules torch_mlir_ltc_backend)

View File

@ -1,5 +1,5 @@
# Setup PyTorch # Setup PyTorch
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../dialects/torch/importer/jit_ir/cmake/modules") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules")
include(TorchMLIRPyTorch) include(TorchMLIRPyTorch)
TorchMLIRProbeForPyTorchInstall() TorchMLIRProbeForPyTorchInstall()
find_package(Torch 1.8 REQUIRED) find_package(Torch 1.8 REQUIRED)

View File

@ -1173,10 +1173,6 @@ def atenlinalg_vector_norm(self: List[int], ord: float = 2, dim: Optional[Lis
dim = list(range(len(self))) dim = list(range(len(self)))
return upstream_shape_functions.mean_dim(self, dim, keepdim, dtype) return upstream_shape_functions.mean_dim(self, dim, keepdim, dtype)
# TODO: Re-enable after MacOS support is fixed for the extension.
#def _torch_mlir_custom_op_exampleidentity(t: List[int]) -> List[int]:
# return upstream_shape_functions.unary(t)
# ============================================================================== # ==============================================================================
# Shape library generator main(). # Shape library generator main().
# ============================================================================== # ==============================================================================

View File

@ -633,16 +633,6 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
"quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)", "quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)",
traits=["HasValueSemantics"]) traits=["HasValueSemantics"])
# ==========================================================================
# `_torch_mlir_custom_op_example::` namespace.
#
# This is a demonstration of supporting an operation defined in a PyTorch
# extension.
# ==========================================================================
# TODO: Re-enable after MacOS support is fixed for the extension.
#emit("_torch_mlir_custom_op_example::identity : (Tensor) -> (Tensor)")
def dump_registered_ops(outfile: TextIO, registry: Registry): def dump_registered_ops(outfile: TextIO, registry: Registry):
for _, v in sorted(registry.by_unique_key.items()): for _, v in sorted(registry.by_unique_key.items()):

View File

@ -51,5 +51,3 @@ def register_all_tests():
from . import return_types from . import return_types
from . import control_flow from . import control_flow
from . import stats from . import stats
# TODO: Re-enable after MacOS support is fixed for the extension.
#from . import custom_op_example