From 7301aa80fd38ed30a7a9b2006b4af1c1e2557eea Mon Sep 17 00:00:00 2001 From: Stella Laurenzo Date: Tue, 30 Jan 2024 23:33:21 -0800 Subject: [PATCH] Enable -Werror in lib/ and LTC. (#2841) Required some massaging of LTC to make it warning clean, and I had to manually disable some warnings on the generated source files (which we don't control). The project is warning clean now. The `-Werror` flag is disabled by default as we can't control everywhere people will try to build/install. The CI enables it via -DTORCH_MLIR_ENABLE_WERROR_FLAG=ON. --- CMakeLists.txt | 9 +++++ build_tools/ci/build_posix.sh | 1 + include/torch-mlir-c/Registration.h | 2 +- include/torch-mlir-c/TorchTypes.h | 40 +++++++++---------- lib/CMakeLists.txt | 2 + .../TorchOnnxToTorch/DefaultDomainQtoZ.cpp | 2 +- .../ltc/csrc/base_lazy_backend/CMakeLists.txt | 10 +++++ .../ltc/csrc/base_lazy_backend/dynamic_ir.cpp | 8 ++-- .../base_lazy_backend/mlir_lowering_context.h | 9 ++--- .../mlir_native_functions.cpp | 8 ++-- .../csrc/base_lazy_backend/utils/sys_utils.h | 6 +-- 11 files changed, 59 insertions(+), 38 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 376aea80e..44f02ac6a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,6 +31,7 @@ include(CMakeDependentOption) # Project options #------------------------------------------------------------------------------- +option(TORCH_MLIR_ENABLE_WERROR_FLAG "Enable `-Werror` flag on supported directories, treat error as warning" OFF) option(TORCH_MLIR_USE_INSTALLED_PYTORCH "If depending on PyTorch use it as installed in the current Python environment" ON) option(TORCH_MLIR_ENABLE_REFBACKEND "Enable reference backend" ON) @@ -53,6 +54,14 @@ cmake_dependent_option(TORCH_MLIR_ENABLE_LTC "Enables LTC backend" OFF TORCH_MLI option(TORCH_MLIR_ENABLE_ONNX_C_IMPORTER "Enables the ONNX C importer" OFF) +macro(torch_mlir_enable_werror) + if(TORCH_MLIR_ENABLE_WERROR_FLAG) + if(NOT MSVC) + add_compile_options(-Werror) + endif() + endif() +endmacro() + #------------------------------------------------------------------------------- # Configure out-of-tree vs in-tree build #------------------------------------------------------------------------------- diff --git a/build_tools/ci/build_posix.sh b/build_tools/ci/build_posix.sh index 438a55c74..fec5e252e 100755 --- a/build_tools/ci/build_posix.sh +++ b/build_tools/ci/build_posix.sh @@ -42,6 +42,7 @@ cmake -S "$repo_root/externals/llvm-project/llvm" -B "$build_dir" \ -DCMAKE_BUILD_TYPE=Release \ -DPython3_EXECUTABLE="$(which python)" \ -DLLVM_ENABLE_ASSERTIONS=ON \ + -DTORCH_MLIR_ENABLE_WERROR_FLAG=ON \ -DCMAKE_INSTALL_PREFIX="$install_dir" \ -DCMAKE_INSTALL_LIBDIR=lib \ -DLLVM_ENABLE_PROJECTS=mlir \ diff --git a/include/torch-mlir-c/Registration.h b/include/torch-mlir-c/Registration.h index 4d582e61f..7d607693d 100644 --- a/include/torch-mlir-c/Registration.h +++ b/include/torch-mlir-c/Registration.h @@ -23,7 +23,7 @@ extern "C" { MLIR_CAPI_EXPORTED void torchMlirRegisterAllDialects(MlirContext context); /** Registers all passes for symbolic access with the global registry. */ -MLIR_CAPI_EXPORTED void torchMlirRegisterAllPasses(); +MLIR_CAPI_EXPORTED void torchMlirRegisterAllPasses(void); #ifdef __cplusplus } diff --git a/include/torch-mlir-c/TorchTypes.h b/include/torch-mlir-c/TorchTypes.h index c852dd613..b214e147d 100644 --- a/include/torch-mlir-c/TorchTypes.h +++ b/include/torch-mlir-c/TorchTypes.h @@ -35,7 +35,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchNnModuleTypeGet(MlirContext context, MlirStringRef className); /// Gets the !torch.nn.Module typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNnModuleTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNnModuleTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.optional type. @@ -53,7 +53,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchOptionalTypeGetContained(MlirType containedType); /// Gets the !torch.optional typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchOptionalTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchOptionalTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.tuple type. @@ -75,7 +75,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchTupleTypeGetType(MlirType t, intptr_t pos); /// Gets the !torch.tuple typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchTupleTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchTupleTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.union type. @@ -97,7 +97,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchUnionTypeGetType(MlirType t, intptr_t pos); /// Gets the !torch.union typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchUnionTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchUnionTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.list type. @@ -113,7 +113,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchListTypeGet(MlirType containedType); MLIR_CAPI_EXPORTED MlirType torchMlirTorchListTypeGetContainedType(MlirType t); /// Gets the !torch.list typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchListTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchListTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.Device type. @@ -126,7 +126,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchDevice(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchDeviceTypeGet(MlirContext context); /// Gets the !torch.device typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDeviceTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDeviceTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.Generator type. @@ -139,7 +139,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchGenerator(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchGeneratorTypeGet(MlirContext context); /// Gets the !torch.generator typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchGeneratorTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchGeneratorTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.bool type. @@ -152,7 +152,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchBool(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchBoolTypeGet(MlirContext context); /// Gets the !torch.bool typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchBoolTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchBoolTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.int type. @@ -165,7 +165,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchInt(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchIntTypeGet(MlirContext context); /// Gets the !torch.int typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchIntTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchIntTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.float type. @@ -178,7 +178,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchFloat(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchFloatTypeGet(MlirContext context); /// Gets the !torch.float typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchFloatTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchFloatTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.LinearParams type. @@ -192,7 +192,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchLinearParamsTypeGet(MlirContext context); /// Gets the !torch.linearparams typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchLinearParamsTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchLinearParamsTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.qint8 type. @@ -205,7 +205,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchQInt8(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchQInt8TypeGet(MlirContext context); /// Gets the !torch.qint8 typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQInt8TypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQInt8TypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.quint8 type. @@ -218,7 +218,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchQUInt8(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchQUInt8TypeGet(MlirContext context); /// Gets the !torch.quint8 typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQUInt8TypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQUInt8TypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.tensor type. @@ -266,7 +266,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchNonValueTensorTypeGetDtype(MlirType t); /// Gets the !torch.tensor typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNonValueTensorTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNonValueTensorTypeGetTypeID(void); //===----------------------------------------------------------------------===// // torch.vtensor type. @@ -312,7 +312,7 @@ torchMlirTorchValueTensorTypeGetSizes(MlirType t, int64_t *sizes); MLIR_CAPI_EXPORTED MlirType torchMlirTorchValueTensorTypeGetDtype(MlirType t); /// Gets the !torch.vtensor typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchValueTensorTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchValueTensorTypeGetTypeID(void); //===----------------------------------------------------------------------===// // !torch.none type. @@ -325,7 +325,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchNone(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchNoneTypeGet(MlirContext context); /// Gets the !torch.none typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNoneTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNoneTypeGetTypeID(void); //===----------------------------------------------------------------------===// // !torch.str type. @@ -338,7 +338,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchString(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchStringTypeGet(MlirContext context); /// Gets the !torch.str typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchStringTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchStringTypeGetTypeID(void); //===----------------------------------------------------------------------===// // !torch.any type. @@ -351,7 +351,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchAny(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchAnyTypeGet(MlirContext context); /// Gets the !torch.any typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchAnyTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchAnyTypeGetTypeID(void); //===----------------------------------------------------------------------===// // !torch.number type. @@ -364,7 +364,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchNumber(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchNumberTypeGet(MlirContext context); /// Gets the !torch.number typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNumberTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNumberTypeGetTypeID(void); //===----------------------------------------------------------------------===// // !torch.dict type. @@ -387,7 +387,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchDictTypeGetKeyType(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchDictTypeGetValueType(MlirType t); /// Gets the !torch.dict typeid. -MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDictTypeGetTypeID(); +MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDictTypeGetTypeID(void); #ifdef __cplusplus } diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index d9030c23a..0db753e47 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -1,3 +1,5 @@ +torch_mlir_enable_werror() + add_subdirectory(CAPI) add_subdirectory(Conversion) add_subdirectory(Dialect) diff --git a/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp b/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp index ff0bcd524..d54c1e1b9 100644 --- a/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp +++ b/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp @@ -673,7 +673,7 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ( if (operands.size() == 1) { if (noop_with_empty_axes == 0) { MLIRContext *context = binder.op->getContext(); - auto rank = + int rank = data.getType().cast().getSizes().size(); SmallVector dims; for (int i = 0; i < rank; i++) { diff --git a/projects/ltc/csrc/base_lazy_backend/CMakeLists.txt b/projects/ltc/csrc/base_lazy_backend/CMakeLists.txt index eee3044f0..2bbdbd233 100644 --- a/projects/ltc/csrc/base_lazy_backend/CMakeLists.txt +++ b/projects/ltc/csrc/base_lazy_backend/CMakeLists.txt @@ -2,11 +2,21 @@ # Setup PyTorch/LTC #------------------------------------------------------------------------------- +torch_mlir_enable_werror() + set(LTC_GENERATED generated/LazyNativeFunctions.cpp generated/RegisterLazy.cpp generated/shape_inference.cpp ) + +# The auto generated files trigger some warnings we can't do anything about. +if(NOT MSVC) + set_source_files_properties(${LTC_GENERATED} + PROPERTIES COMPILE_FLAGS "-Wno-sign-compare -Wno-unused-function" + ) +endif() + set(LTC_BACKEND_DEPENDS mlir_lowering_context.cpp mlir_native_functions.cpp diff --git a/projects/ltc/csrc/base_lazy_backend/dynamic_ir.cpp b/projects/ltc/csrc/base_lazy_backend/dynamic_ir.cpp index c11c1563b..363bac959 100644 --- a/projects/ltc/csrc/base_lazy_backend/dynamic_ir.cpp +++ b/projects/ltc/csrc/base_lazy_backend/dynamic_ir.cpp @@ -24,7 +24,7 @@ std::string DimensionNode::ToString() const { return "DimensionNode"; } SizeNode::SizeNode(Value input, size_t dim) : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::size")}, {input}, MHash(dim)), - dim_(dim){}; + dim_(dim) {} int64_t SizeNode::getStaticValue() const { return dynamic_cast(operand(0).node) @@ -35,7 +35,7 @@ int64_t SizeNode::getStaticValue() const { std::string SizeNode::ToString() const { return "SizeNode"; } SizeAdd::SizeAdd(Value a, Value b) - : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::add")}, {a, b}){}; + : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::add")}, {a, b}) {} int64_t SizeAdd::getStaticValue() const { return dynamic_cast(operand(0).node) @@ -46,7 +46,7 @@ int64_t SizeAdd::getStaticValue() const { std::string SizeAdd::ToString() const { return "SizeAdd"; } SizeMul::SizeMul(Value a, Value b) - : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::mul")}, {a, b}){}; + : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::mul")}, {a, b}) {} int64_t SizeMul::getStaticValue() const { return dynamic_cast(operand(0).node) @@ -57,7 +57,7 @@ int64_t SizeMul::getStaticValue() const { std::string SizeMul::ToString() const { return "SizeMul"; } SizeDiv::SizeDiv(Value a, Value b) - : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::div")}, {a, b}){}; + : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::div")}, {a, b}) {} int64_t SizeDiv::getStaticValue() const { TORCH_CHECK( diff --git a/projects/ltc/csrc/base_lazy_backend/mlir_lowering_context.h b/projects/ltc/csrc/base_lazy_backend/mlir_lowering_context.h index 3b226b468..e69820535 100644 --- a/projects/ltc/csrc/base_lazy_backend/mlir_lowering_context.h +++ b/projects/ltc/csrc/base_lazy_backend/mlir_lowering_context.h @@ -150,15 +150,14 @@ public: protected: size_t num_parameters_; - std::unordered_map parameters_map_; - std::vector parameter_names_; - std::vector parameter_shapes_; - Shape result_shape_; - MlirModule module_op_; MlirContext mlir_context_; std::shared_ptr graph_; InputOutputAliases input_output_aliases_; + std::unordered_map parameters_map_; + std::vector parameter_names_; + std::vector parameter_shapes_; + Shape result_shape_; }; } // namespace lazy diff --git a/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp b/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp index a0e4bae76..af680f224 100644 --- a/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp +++ b/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp @@ -67,7 +67,7 @@ c10::optional to_meta(const c10::optional &tensor) { return c10::nullopt; } -std::vector to_meta(at::ITensorListRef t_list) { +[[maybe_unused]] std::vector to_meta(at::ITensorListRef t_list) { std::vector outs; outs.reserve(t_list.size()); for (const auto &tensor : t_list) { @@ -92,7 +92,7 @@ namespace lazy { namespace { -at::Tensor +[[maybe_unused]] at::Tensor CreateLtcTensor(const at::Tensor &tensor, const c10::optional &device) { if (tensor.defined() && device) { @@ -102,7 +102,7 @@ CreateLtcTensor(const at::Tensor &tensor, return tensor; } -c10::optional +[[maybe_unused]] c10::optional GetLtcDevice(const c10::optional &device) { if (!device) { return c10::nullopt; @@ -334,7 +334,7 @@ at::Tensor LazyNativeFunctions::_to_copy( std::move(node), lazy_self->GetDevice())); return result; } -}; +} at::Tensor LazyNativeFunctions::_unsafe_view(const at::Tensor &self, at::IntArrayRef size) { diff --git a/projects/ltc/csrc/base_lazy_backend/utils/sys_utils.h b/projects/ltc/csrc/base_lazy_backend/utils/sys_utils.h index f6c51ba61..5804bce5f 100644 --- a/projects/ltc/csrc/base_lazy_backend/utils/sys_utils.h +++ b/projects/ltc/csrc/base_lazy_backend/utils/sys_utils.h @@ -14,8 +14,8 @@ static T GetEnv(const std::string &name, const T &default_value = T(0)) { return T(std::atoi(env)); } -static std::string GetEnvString(const std::string &name, - const std::string &default_value) { +[[maybe_unused]] static std::string +GetEnvString(const std::string &name, const std::string &default_value) { const char *env = std::getenv(name.c_str()); if (!env) { return default_value; @@ -23,7 +23,7 @@ static std::string GetEnvString(const std::string &name, return std::string(env); } -static bool GetEnvBool(const char *name, bool defval) { +[[maybe_unused]] static bool GetEnvBool(const char *name, bool defval) { const char *env = std::getenv(name); if (env == nullptr) { return defval;