Fixes a few issues found when debugging powderluv's setup.

* It is optional to link against Python3_LIBRARIES. Check that and don't do it if they don't exist for this config.
* Clean and auditwheel need to operate on sanitized package names. So "torch_mlir" vs "torch-mlir".
* Adds a pyproject.toml file that pins the build dependencies needed to detect both Torch and Python (the MLIR Python build was failing to detect because Numpy wasn't in the pip venv).
* Commented out auditwheel: These wheels are not PyPi compliant since they weak link to libtorch at runtime. However, they should be fine to deploy to users.
* Adds the --extra-index-url to the pip wheel command, allowing PyTorch to be found.
* Hack setup.py to remove the _mlir_libs dir before building. This keeps back-to-back versions from accumulating in the wheels for subsequent versions. IREE has a more principled way of doing this, but what I have here should work.
manylinux
Stella Laurenzo 2022-04-20 22:09:45 -07:00
parent e13911ad75
commit c39a90e6e9
4 changed files with 56 additions and 9 deletions

View File

@ -81,9 +81,9 @@ function run_in_docker() {
echo ":::: Python version $(python --version)"
case "$package" in
torch-mlir)
clean_wheels torch-mlir $python_version
clean_wheels torch_mlir $python_version
build_torch_mlir
run_audit_wheel torch-mlir $python_version
#run_audit_wheel torch_mlir $python_version
;;
*)
echo "Unrecognized package '$package'"
@ -95,10 +95,9 @@ function run_in_docker() {
}
function build_torch_mlir() {
python -m pip install --pre torch torchvision --extra-index-url https://download.pytorch.org/whl/nightly/cpu
python -m pip install -r /main_checkout/torch-mlir/requirements.txt
CMAKE_GENERATOR=Ninja \
python -m pip wheel -v -w /wheelhouse /main_checkout/torch-mlir/
python -m pip wheel -v -w /wheelhouse /main_checkout/torch-mlir/ \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu
}
function run_audit_wheel() {

21
pyproject.toml 100644
View File

@ -0,0 +1,21 @@
[build-system]
requires = [
"setuptools>=42",
"wheel",
# There is no fundamental reason to pin this CMake version, beyond
# build stability.
"cmake==3.22.2",
"ninja==1.10.2",
"packaging",
# Version 2.7.0 excluded: https://github.com/pybind/pybind11/issues/3136
"pybind11>=2.6.0,!=2.7.0",
"PyYAML",
# The torch-mlir CMake build requires numpy and torch to be installed.
# Further, the setup.py will pin the version selected here into built
# artifacts.
# TODO: Come up with a better way to pin the version.
"numpy",
"torch==1.12.0.dev20220419+cpu",
]
build-backend = "setuptools.build_meta"

View File

@ -24,10 +24,19 @@ add_library(TorchMLIRJITIRImporter MODULE
target_link_libraries(TorchMLIRJITIRImporter
TorchMLIRAggregateCAPI
${TORCH_LIBRARIES}
${Python3_LIBRARIES}
torch_python
)
# On static Python builds, there may not be Python libraries to link against
# (they will late bind at runtime from the executable). We have to condition
# this because in that case it is set to NOTFOUND and CMake will consider
# this an error.
if(Python3_LIBRARIES)
target_link_libraries(TorchMLIRJITIRImporter
${Python3_LIBRARIES}
)
endif()
message(STATUS "TORCH_CXXFLAGS=${TORCH_CXXFLAGS}")
set_target_properties(TorchMLIRJITIRImporter PROPERTIES
LIBRARY_OUTPUT_DIRECTORY "${TORCH_MLIR_PYTHON_PACKAGES_DIR}/torch_mlir/torch_mlir/_mlir_libs"

View File

@ -61,6 +61,10 @@ class CMakeBuild(build_py):
if not cmake_build_dir:
cmake_build_dir = os.path.abspath(
os.path.join(target_dir, "..", "cmake_build"))
python_package_dir = os.path.join(cmake_build_dir,
"tools", "torch-mlir", "python_packages",
"torch_mlir")
if not os.getenv("TORCH_MLIR_CMAKE_BUILD_DIR_ALREADY_BUILT"):
src_dir = os.path.abspath(os.path.dirname(__file__))
llvm_dir = os.path.join(
@ -83,15 +87,26 @@ class CMakeBuild(build_py):
cmake_cache_file = os.path.join(cmake_build_dir, "CMakeCache.txt")
if os.path.exists(cmake_cache_file):
os.remove(cmake_cache_file)
# NOTE: With repeated builds for different Python versions, the
# prior version binaries will continue to accumulate. IREE uses
# a separate install step and cleans the install directory to
# keep this from happening. That is the most robust. Here we just
# delete the directory where we build native extensions to keep
# this from happening but still take advantage of most of the
# build cache.
mlir_libs_dir = os.path.join(python_package_dir, "torch_mlir", "_mlir_libs")
if os.path.exists(mlir_libs_dir):
print(f"Removing _mlir_mlibs dir to force rebuild: {mlir_libs_dir}")
shutil.rmtree(mlir_libs_dir)
else:
print(f"Not removing _mlir_libs dir (does not exist): {mlir_libs_dir}")
subprocess.check_call(["cmake", llvm_dir] +
cmake_args, cwd=cmake_build_dir)
subprocess.check_call(["cmake",
"--build", ".",
"--target", "TorchMLIRPythonModules"],
cwd=cmake_build_dir)
python_package_dir = os.path.join(cmake_build_dir,
"tools", "torch-mlir", "python_packages",
"torch_mlir")
if os.path.exists(target_dir):
shutil.rmtree(target_dir, ignore_errors=False, onerror=None)
@ -131,8 +146,11 @@ setup(
CMakeExtension("torch_mlir._mlir_libs._jit_ir_importer"),
],
install_requires=[
"numpy",
# To avoid issues with drift for each nightly build, we pin to the
# exact version we built against.
# TODO: This includes the +cpu specifier which is overly
# restrictive and a bit unfortunate.
f"torch=={torch.__version__}",
],
zip_safe=False,