From e2f862cb8538846d205ac0a837faddb50d65174f Mon Sep 17 00:00:00 2001 From: Henry Tu Date: Wed, 24 Aug 2022 15:04:28 -0400 Subject: [PATCH] Fix LTC build warnings (#1272) * Resolved Wunused-variable * Fix Wunneeded-internal-declaration * Address review comment * Update autogen_ltc_backend.py * Update mlir_native_functions.cpp to work with updated PyTorch * Remove NewZeros from LTC XFAIL set --- build_tools/autogen_ltc_backend.py | 5 +- e2e_testing/torchscript/xfail_sets.py | 6 -- .../mlir_native_functions.cpp | 65 +++++++------------ 3 files changed, 26 insertions(+), 50 deletions(-) diff --git a/build_tools/autogen_ltc_backend.py b/build_tools/autogen_ltc_backend.py index ff31b95b4..8012a57be 100644 --- a/build_tools/autogen_ltc_backend.py +++ b/build_tools/autogen_ltc_backend.py @@ -74,6 +74,9 @@ class GenMlirLazyIr(torchgen.dest.GenLazyIR): for a in emplace_kwarg_values + emplace_kwarg_scalars ) + # Only create this variable if it's used to avoid Wunused-variable + operand_idx_counter = "size_t i = 0;" if "i++" in (emplace_arguments_str + emplace_kwarguments) else "" + return reindent( f""" {signature} {{ @@ -82,7 +85,7 @@ class GenMlirLazyIr(torchgen.dest.GenLazyIR): std::vector kwarguments; arguments.reserve({len(emplace_arguments)}); kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)}); - size_t i = 0; + {operand_idx_counter} {emplace_arguments_str} {emplace_kwarguments} torch::lazy::TorchMlirOpVector {schema.aten_name}_out = torch::lazy::LowerTorchMlirBuiltin(function, op().op, shapes(), arguments, kwarguments); diff --git a/e2e_testing/torchscript/xfail_sets.py b/e2e_testing/torchscript/xfail_sets.py index 8904d7faa..f106c9749 100644 --- a/e2e_testing/torchscript/xfail_sets.py +++ b/e2e_testing/torchscript/xfail_sets.py @@ -435,12 +435,6 @@ LTC_XFAIL_SET = { "NewOnesModuleFloat3D_basic", "NewOnesModuleInt2D_basic", "NewOnesModuleInt3D_basic", - "NewZerosModuleDefaultDtype_basic", - "NewZerosModuleFalsePinMemory_basic", - "NewZerosModuleFloat2D_basic", - "NewZerosModuleFloat3D_basic", - "NewZerosModuleInt2D_basic", - "NewZerosModuleInt3D_basic", "OnesLikeModule_defaultDtype", "OnesLikeModule_falsePinMemory", "OnesLikeModule_float", diff --git a/python/torch_mlir/csrc/base_lazy_backend/mlir_native_functions.cpp b/python/torch_mlir/csrc/base_lazy_backend/mlir_native_functions.cpp index e197af3e5..cfd0cf68e 100644 --- a/python/torch_mlir/csrc/base_lazy_backend/mlir_native_functions.cpp +++ b/python/torch_mlir/csrc/base_lazy_backend/mlir_native_functions.cpp @@ -39,42 +39,6 @@ namespace lazy { namespace { -std::pair -GetBinaryOperands(const at::Tensor& self, const at::Tensor& other) { - torch::lazy::LazyTensorPtr self_tensor; - torch::lazy::LazyTensorPtr other_tensor; - auto self_xtensor = torch::lazy::TryGetLtcTensor(self); - if (!self_xtensor) { - other_tensor = torch::lazy::TryGetLtcTensor(other); - self_tensor = GetOrCreateLtcTensor(self, other_tensor->GetDevice()); - } else { - self_tensor = self_xtensor; - other_tensor = GetOrCreateLtcTensor(other, self_tensor->GetDevice()); - } - return std::pair( - self_tensor, other_tensor); -} - -template -at::Tensor -DoBinaryOp(const at::Tensor& self, const at::Tensor& other, const B& bin_op) { - at::ScalarType dtype = at::result_type(self, other); - std::pair operands = - GetBinaryOperands( - torch::lazy::UnwrapNumber(self, dtype), - torch::lazy::UnwrapNumber(other, dtype)); - torch::lazy::LazyTensorPtr result = bin_op(operands.first, operands.second); - return torch::lazy::CreateAtenFromLtcTensor(result); -} - -template -at::Tensor -DoBinaryOp(const at::Tensor& self, const at::Scalar& other, const B& bin_op) { - torch::lazy::LazyTensorPtr self_tensor = torch::lazy::GetLtcTensor(self); - torch::lazy::LazyTensorPtr result = bin_op(self_tensor, other); - return torch::lazy::CreateAtenFromLtcTensor(result); -} - at::Tensor CreateLtcTensor( const at::Tensor& tensor, const c10::optional& device) { @@ -338,10 +302,14 @@ at::Tensor LazyNativeFunctions::_to_copy( }; at::Tensor LazyNativeFunctions::empty( - at::IntArrayRef size, c10::optional dtype, - c10::optional layout, c10::optional device, + at::SymIntArrayRef sym_size, + c10::optional dtype, + c10::optional layout, + c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + // TODO: support this directly + auto size = c10::asIntArrayRefSlow(sym_size); const auto device_type = torch::lazy::getBackend()->EagerFallbackDeviceType(); at::TensorOptions options = at::TensorOptions() .device(c10::Device(device_type)) @@ -353,8 +321,9 @@ at::Tensor LazyNativeFunctions::empty( // See Note [Lazy Tensor Functionalization] if (c10::impl::tls_local_dispatch_key_set().excluded_.has( c10::DispatchKey::Functionalize)) { - // Invariant: if the functionalization key is in the exclude set, then we're expected - // to return an ordinary tensor, which will be "lifted" into a functional wrapper later. + // Invariant: if the functionalization key is in the exclude set, then we're + // expected to return an ordinary tensor, which will be "lifted" into a + // functional wrapper later. return tensor; } else { auto wrapped = at::functionalization::impl::to_functional_tensor(tensor); @@ -367,7 +336,13 @@ at::Tensor LazyNativeFunctions::empty_strided( c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { TORCH_LAZY_FN_COUNTER("lazy::"); - at::Tensor t = empty(size, dtype, layout, device, pin_memory, c10::nullopt); + at::Tensor t = empty( + c10::SymIntArrayRef::fromIntArrayRef(size), + dtype, + layout, + device, + pin_memory, + c10::nullopt); return t.as_strided(size, stride, /*storage_offset=*/0); } @@ -386,7 +361,8 @@ LazyNativeFunctions::fill_(at::Tensor& self, const at::Scalar& value) { at::Tensor LazyNativeFunctions::_unsafe_view( const at::Tensor& self, at::IntArrayRef size) { TORCH_LAZY_FN_COUNTER("lazy::"); - return LazyNativeFunctions::view_copy(self, size); + return LazyNativeFunctions::view_copy( + self, c10::SymIntArrayRef::fromIntArrayRef(size)); } // This is needed by the torch.tensor constructor. @@ -422,7 +398,10 @@ at::Tensor LazyNativeFunctions::new_empty_strided( } at::Tensor LazyNativeFunctions::narrow_copy( - const at::Tensor& self, int64_t dim, int64_t start, int64_t length) { + const at::Tensor& self, + int64_t dim, + c10::SymInt start, + c10::SymInt length) { return at::functionalization::functionalize_aten_op::call(self, dim, start, length); }