Enable -Werror in lib/ and LTC. (#2841)

Required some massaging of LTC to make it warning clean, and I had to
manually disable some warnings on the generated source files (which we
don't control).

The project is warning clean now.

The `-Werror` flag is disabled by default as we can't control everywhere
people will try to build/install. The CI enables it via
-DTORCH_MLIR_ENABLE_WERROR_FLAG=ON.
pull/2847/head
Stella Laurenzo 2024-01-30 23:33:21 -08:00 committed by GitHub
parent 943164d797
commit 7301aa80fd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 59 additions and 38 deletions

View File

@ -31,6 +31,7 @@ include(CMakeDependentOption)
# Project options # Project options
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
option(TORCH_MLIR_ENABLE_WERROR_FLAG "Enable `-Werror` flag on supported directories, treat error as warning" OFF)
option(TORCH_MLIR_USE_INSTALLED_PYTORCH "If depending on PyTorch use it as installed in the current Python environment" ON) option(TORCH_MLIR_USE_INSTALLED_PYTORCH "If depending on PyTorch use it as installed in the current Python environment" ON)
option(TORCH_MLIR_ENABLE_REFBACKEND "Enable reference backend" ON) option(TORCH_MLIR_ENABLE_REFBACKEND "Enable reference backend" ON)
@ -53,6 +54,14 @@ cmake_dependent_option(TORCH_MLIR_ENABLE_LTC "Enables LTC backend" OFF TORCH_MLI
option(TORCH_MLIR_ENABLE_ONNX_C_IMPORTER "Enables the ONNX C importer" OFF) option(TORCH_MLIR_ENABLE_ONNX_C_IMPORTER "Enables the ONNX C importer" OFF)
macro(torch_mlir_enable_werror)
if(TORCH_MLIR_ENABLE_WERROR_FLAG)
if(NOT MSVC)
add_compile_options(-Werror)
endif()
endif()
endmacro()
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
# Configure out-of-tree vs in-tree build # Configure out-of-tree vs in-tree build
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------

View File

@ -42,6 +42,7 @@ cmake -S "$repo_root/externals/llvm-project/llvm" -B "$build_dir" \
-DCMAKE_BUILD_TYPE=Release \ -DCMAKE_BUILD_TYPE=Release \
-DPython3_EXECUTABLE="$(which python)" \ -DPython3_EXECUTABLE="$(which python)" \
-DLLVM_ENABLE_ASSERTIONS=ON \ -DLLVM_ENABLE_ASSERTIONS=ON \
-DTORCH_MLIR_ENABLE_WERROR_FLAG=ON \
-DCMAKE_INSTALL_PREFIX="$install_dir" \ -DCMAKE_INSTALL_PREFIX="$install_dir" \
-DCMAKE_INSTALL_LIBDIR=lib \ -DCMAKE_INSTALL_LIBDIR=lib \
-DLLVM_ENABLE_PROJECTS=mlir \ -DLLVM_ENABLE_PROJECTS=mlir \

View File

@ -23,7 +23,7 @@ extern "C" {
MLIR_CAPI_EXPORTED void torchMlirRegisterAllDialects(MlirContext context); MLIR_CAPI_EXPORTED void torchMlirRegisterAllDialects(MlirContext context);
/** Registers all passes for symbolic access with the global registry. */ /** Registers all passes for symbolic access with the global registry. */
MLIR_CAPI_EXPORTED void torchMlirRegisterAllPasses(); MLIR_CAPI_EXPORTED void torchMlirRegisterAllPasses(void);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -35,7 +35,7 @@ MLIR_CAPI_EXPORTED MlirType
torchMlirTorchNnModuleTypeGet(MlirContext context, MlirStringRef className); torchMlirTorchNnModuleTypeGet(MlirContext context, MlirStringRef className);
/// Gets the !torch.nn.Module typeid. /// Gets the !torch.nn.Module typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNnModuleTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNnModuleTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.optional type. // torch.optional type.
@ -53,7 +53,7 @@ MLIR_CAPI_EXPORTED MlirType
torchMlirTorchOptionalTypeGetContained(MlirType containedType); torchMlirTorchOptionalTypeGetContained(MlirType containedType);
/// Gets the !torch.optional typeid. /// Gets the !torch.optional typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchOptionalTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchOptionalTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.tuple<T1, T2, T3> type. // torch.tuple<T1, T2, T3> type.
@ -75,7 +75,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchTupleTypeGetType(MlirType t,
intptr_t pos); intptr_t pos);
/// Gets the !torch.tuple typeid. /// Gets the !torch.tuple typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchTupleTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchTupleTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.union<T1, T2, T3> type. // torch.union<T1, T2, T3> type.
@ -97,7 +97,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchUnionTypeGetType(MlirType t,
intptr_t pos); intptr_t pos);
/// Gets the !torch.union typeid. /// Gets the !torch.union typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchUnionTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchUnionTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.list<T> type. // torch.list<T> type.
@ -113,7 +113,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchListTypeGet(MlirType containedType);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchListTypeGetContainedType(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchListTypeGetContainedType(MlirType t);
/// Gets the !torch.list typeid. /// Gets the !torch.list typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchListTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchListTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.Device type. // torch.Device type.
@ -126,7 +126,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchDevice(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchDeviceTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchDeviceTypeGet(MlirContext context);
/// Gets the !torch.device typeid. /// Gets the !torch.device typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDeviceTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDeviceTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.Generator type. // torch.Generator type.
@ -139,7 +139,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchGenerator(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchGeneratorTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchGeneratorTypeGet(MlirContext context);
/// Gets the !torch.generator typeid. /// Gets the !torch.generator typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchGeneratorTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchGeneratorTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.bool type. // torch.bool type.
@ -152,7 +152,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchBool(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchBoolTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchBoolTypeGet(MlirContext context);
/// Gets the !torch.bool typeid. /// Gets the !torch.bool typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchBoolTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchBoolTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.int type. // torch.int type.
@ -165,7 +165,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchInt(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchIntTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchIntTypeGet(MlirContext context);
/// Gets the !torch.int typeid. /// Gets the !torch.int typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchIntTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchIntTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.float type. // torch.float type.
@ -178,7 +178,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchFloat(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchFloatTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchFloatTypeGet(MlirContext context);
/// Gets the !torch.float typeid. /// Gets the !torch.float typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchFloatTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchFloatTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.LinearParams type. // torch.LinearParams type.
@ -192,7 +192,7 @@ MLIR_CAPI_EXPORTED MlirType
torchMlirTorchLinearParamsTypeGet(MlirContext context); torchMlirTorchLinearParamsTypeGet(MlirContext context);
/// Gets the !torch.linearparams typeid. /// Gets the !torch.linearparams typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchLinearParamsTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchLinearParamsTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.qint8 type. // torch.qint8 type.
@ -205,7 +205,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchQInt8(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchQInt8TypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchQInt8TypeGet(MlirContext context);
/// Gets the !torch.qint8 typeid. /// Gets the !torch.qint8 typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQInt8TypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQInt8TypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.quint8 type. // torch.quint8 type.
@ -218,7 +218,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchQUInt8(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchQUInt8TypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchQUInt8TypeGet(MlirContext context);
/// Gets the !torch.quint8 typeid. /// Gets the !torch.quint8 typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQUInt8TypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchQUInt8TypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.tensor type. // torch.tensor type.
@ -266,7 +266,7 @@ MLIR_CAPI_EXPORTED MlirType
torchMlirTorchNonValueTensorTypeGetDtype(MlirType t); torchMlirTorchNonValueTensorTypeGetDtype(MlirType t);
/// Gets the !torch.tensor typeid. /// Gets the !torch.tensor typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNonValueTensorTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNonValueTensorTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// torch.vtensor type. // torch.vtensor type.
@ -312,7 +312,7 @@ torchMlirTorchValueTensorTypeGetSizes(MlirType t, int64_t *sizes);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchValueTensorTypeGetDtype(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchValueTensorTypeGetDtype(MlirType t);
/// Gets the !torch.vtensor typeid. /// Gets the !torch.vtensor typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchValueTensorTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchValueTensorTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// !torch.none type. // !torch.none type.
@ -325,7 +325,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchNone(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchNoneTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchNoneTypeGet(MlirContext context);
/// Gets the !torch.none typeid. /// Gets the !torch.none typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNoneTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNoneTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// !torch.str type. // !torch.str type.
@ -338,7 +338,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchString(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchStringTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchStringTypeGet(MlirContext context);
/// Gets the !torch.str typeid. /// Gets the !torch.str typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchStringTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchStringTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// !torch.any type. // !torch.any type.
@ -351,7 +351,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchAny(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchAnyTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchAnyTypeGet(MlirContext context);
/// Gets the !torch.any typeid. /// Gets the !torch.any typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchAnyTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchAnyTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// !torch.number type. // !torch.number type.
@ -364,7 +364,7 @@ MLIR_CAPI_EXPORTED bool torchMlirTypeIsATorchNumber(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchNumberTypeGet(MlirContext context); MLIR_CAPI_EXPORTED MlirType torchMlirTorchNumberTypeGet(MlirContext context);
/// Gets the !torch.number typeid. /// Gets the !torch.number typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNumberTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchNumberTypeGetTypeID(void);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// !torch.dict type. // !torch.dict type.
@ -387,7 +387,7 @@ MLIR_CAPI_EXPORTED MlirType torchMlirTorchDictTypeGetKeyType(MlirType t);
MLIR_CAPI_EXPORTED MlirType torchMlirTorchDictTypeGetValueType(MlirType t); MLIR_CAPI_EXPORTED MlirType torchMlirTorchDictTypeGetValueType(MlirType t);
/// Gets the !torch.dict typeid. /// Gets the !torch.dict typeid.
MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDictTypeGetTypeID(); MLIR_CAPI_EXPORTED MlirTypeID torchMlirTorchDictTypeGetTypeID(void);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -1,3 +1,5 @@
torch_mlir_enable_werror()
add_subdirectory(CAPI) add_subdirectory(CAPI)
add_subdirectory(Conversion) add_subdirectory(Conversion)
add_subdirectory(Dialect) add_subdirectory(Dialect)

View File

@ -673,7 +673,7 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
if (operands.size() == 1) { if (operands.size() == 1) {
if (noop_with_empty_axes == 0) { if (noop_with_empty_axes == 0) {
MLIRContext *context = binder.op->getContext(); MLIRContext *context = binder.op->getContext();
auto rank = int rank =
data.getType().cast<Torch::ValueTensorType>().getSizes().size(); data.getType().cast<Torch::ValueTensorType>().getSizes().size();
SmallVector<Value, 1> dims; SmallVector<Value, 1> dims;
for (int i = 0; i < rank; i++) { for (int i = 0; i < rank; i++) {

View File

@ -2,11 +2,21 @@
# Setup PyTorch/LTC # Setup PyTorch/LTC
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
torch_mlir_enable_werror()
set(LTC_GENERATED set(LTC_GENERATED
generated/LazyNativeFunctions.cpp generated/LazyNativeFunctions.cpp
generated/RegisterLazy.cpp generated/RegisterLazy.cpp
generated/shape_inference.cpp generated/shape_inference.cpp
) )
# The auto generated files trigger some warnings we can't do anything about.
if(NOT MSVC)
set_source_files_properties(${LTC_GENERATED}
PROPERTIES COMPILE_FLAGS "-Wno-sign-compare -Wno-unused-function"
)
endif()
set(LTC_BACKEND_DEPENDS set(LTC_BACKEND_DEPENDS
mlir_lowering_context.cpp mlir_lowering_context.cpp
mlir_native_functions.cpp mlir_native_functions.cpp

View File

@ -24,7 +24,7 @@ std::string DimensionNode::ToString() const { return "DimensionNode"; }
SizeNode::SizeNode(Value input, size_t dim) SizeNode::SizeNode(Value input, size_t dim)
: DimensionNode(OpKind{c10::Symbol::fromQualString("aten::size")}, {input}, : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::size")}, {input},
MHash(dim)), MHash(dim)),
dim_(dim){}; dim_(dim) {}
int64_t SizeNode::getStaticValue() const { int64_t SizeNode::getStaticValue() const {
return dynamic_cast<const TorchMlirNode *>(operand(0).node) return dynamic_cast<const TorchMlirNode *>(operand(0).node)
@ -35,7 +35,7 @@ int64_t SizeNode::getStaticValue() const {
std::string SizeNode::ToString() const { return "SizeNode"; } std::string SizeNode::ToString() const { return "SizeNode"; }
SizeAdd::SizeAdd(Value a, Value b) SizeAdd::SizeAdd(Value a, Value b)
: DimensionNode(OpKind{c10::Symbol::fromQualString("aten::add")}, {a, b}){}; : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::add")}, {a, b}) {}
int64_t SizeAdd::getStaticValue() const { int64_t SizeAdd::getStaticValue() const {
return dynamic_cast<const DimensionNode *>(operand(0).node) return dynamic_cast<const DimensionNode *>(operand(0).node)
@ -46,7 +46,7 @@ int64_t SizeAdd::getStaticValue() const {
std::string SizeAdd::ToString() const { return "SizeAdd"; } std::string SizeAdd::ToString() const { return "SizeAdd"; }
SizeMul::SizeMul(Value a, Value b) SizeMul::SizeMul(Value a, Value b)
: DimensionNode(OpKind{c10::Symbol::fromQualString("aten::mul")}, {a, b}){}; : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::mul")}, {a, b}) {}
int64_t SizeMul::getStaticValue() const { int64_t SizeMul::getStaticValue() const {
return dynamic_cast<const DimensionNode *>(operand(0).node) return dynamic_cast<const DimensionNode *>(operand(0).node)
@ -57,7 +57,7 @@ int64_t SizeMul::getStaticValue() const {
std::string SizeMul::ToString() const { return "SizeMul"; } std::string SizeMul::ToString() const { return "SizeMul"; }
SizeDiv::SizeDiv(Value a, Value b) SizeDiv::SizeDiv(Value a, Value b)
: DimensionNode(OpKind{c10::Symbol::fromQualString("aten::div")}, {a, b}){}; : DimensionNode(OpKind{c10::Symbol::fromQualString("aten::div")}, {a, b}) {}
int64_t SizeDiv::getStaticValue() const { int64_t SizeDiv::getStaticValue() const {
TORCH_CHECK( TORCH_CHECK(

View File

@ -150,15 +150,14 @@ public:
protected: protected:
size_t num_parameters_; size_t num_parameters_;
std::unordered_map<int, std::string> parameters_map_;
std::vector<std::string> parameter_names_;
std::vector<Shape> parameter_shapes_;
Shape result_shape_;
MlirModule module_op_; MlirModule module_op_;
MlirContext mlir_context_; MlirContext mlir_context_;
std::shared_ptr<torch::jit::Graph> graph_; std::shared_ptr<torch::jit::Graph> graph_;
InputOutputAliases input_output_aliases_; InputOutputAliases input_output_aliases_;
std::unordered_map<int, std::string> parameters_map_;
std::vector<std::string> parameter_names_;
std::vector<Shape> parameter_shapes_;
Shape result_shape_;
}; };
} // namespace lazy } // namespace lazy

View File

@ -67,7 +67,7 @@ c10::optional<at::Tensor> to_meta(const c10::optional<at::Tensor> &tensor) {
return c10::nullopt; return c10::nullopt;
} }
std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) { [[maybe_unused]] std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
std::vector<at::Tensor> outs; std::vector<at::Tensor> outs;
outs.reserve(t_list.size()); outs.reserve(t_list.size());
for (const auto &tensor : t_list) { for (const auto &tensor : t_list) {
@ -92,7 +92,7 @@ namespace lazy {
namespace { namespace {
at::Tensor [[maybe_unused]] at::Tensor
CreateLtcTensor(const at::Tensor &tensor, CreateLtcTensor(const at::Tensor &tensor,
const c10::optional<torch::lazy::BackendDevice> &device) { const c10::optional<torch::lazy::BackendDevice> &device) {
if (tensor.defined() && device) { if (tensor.defined() && device) {
@ -102,7 +102,7 @@ CreateLtcTensor(const at::Tensor &tensor,
return tensor; return tensor;
} }
c10::optional<torch::lazy::BackendDevice> [[maybe_unused]] c10::optional<torch::lazy::BackendDevice>
GetLtcDevice(const c10::optional<c10::Device> &device) { GetLtcDevice(const c10::optional<c10::Device> &device) {
if (!device) { if (!device) {
return c10::nullopt; return c10::nullopt;
@ -334,7 +334,7 @@ at::Tensor LazyNativeFunctions::_to_copy(
std::move(node), lazy_self->GetDevice())); std::move(node), lazy_self->GetDevice()));
return result; return result;
} }
}; }
at::Tensor LazyNativeFunctions::_unsafe_view(const at::Tensor &self, at::Tensor LazyNativeFunctions::_unsafe_view(const at::Tensor &self,
at::IntArrayRef size) { at::IntArrayRef size) {

View File

@ -14,8 +14,8 @@ static T GetEnv(const std::string &name, const T &default_value = T(0)) {
return T(std::atoi(env)); return T(std::atoi(env));
} }
static std::string GetEnvString(const std::string &name, [[maybe_unused]] static std::string
const std::string &default_value) { GetEnvString(const std::string &name, const std::string &default_value) {
const char *env = std::getenv(name.c_str()); const char *env = std::getenv(name.c_str());
if (!env) { if (!env) {
return default_value; return default_value;
@ -23,7 +23,7 @@ static std::string GetEnvString(const std::string &name,
return std::string(env); return std::string(env);
} }
static bool GetEnvBool(const char *name, bool defval) { [[maybe_unused]] static bool GetEnvBool(const char *name, bool defval) {
const char *env = std::getenv(name); const char *env = std::getenv(name);
if (env == nullptr) { if (env == nullptr) {
return defval; return defval;