Fix LTC build warnings (#1272)

* Resolved Wunused-variable

* Fix Wunneeded-internal-declaration

* Address review comment

* Update autogen_ltc_backend.py

* Update mlir_native_functions.cpp to work with updated PyTorch

* Remove NewZeros from LTC XFAIL set
pull/1274/head
Henry Tu 2022-08-24 15:04:28 -04:00 committed by GitHub
parent f012279fa2
commit e2f862cb85
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 26 additions and 50 deletions

View File

@ -74,6 +74,9 @@ class GenMlirLazyIr(torchgen.dest.GenLazyIR):
for a in emplace_kwarg_values + emplace_kwarg_scalars
)
# Only create this variable if it's used to avoid Wunused-variable
operand_idx_counter = "size_t i = 0;" if "i++" in (emplace_arguments_str + emplace_kwarguments) else ""
return reindent(
f"""
{signature} {{
@ -82,7 +85,7 @@ class GenMlirLazyIr(torchgen.dest.GenLazyIR):
std::vector<torch::jit::NamedValue> kwarguments;
arguments.reserve({len(emplace_arguments)});
kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)});
size_t i = 0;
{operand_idx_counter}
{emplace_arguments_str}
{emplace_kwarguments}
torch::lazy::TorchMlirOpVector {schema.aten_name}_out = torch::lazy::LowerTorchMlirBuiltin(function, op().op, shapes(), arguments, kwarguments);

View File

@ -435,12 +435,6 @@ LTC_XFAIL_SET = {
"NewOnesModuleFloat3D_basic",
"NewOnesModuleInt2D_basic",
"NewOnesModuleInt3D_basic",
"NewZerosModuleDefaultDtype_basic",
"NewZerosModuleFalsePinMemory_basic",
"NewZerosModuleFloat2D_basic",
"NewZerosModuleFloat3D_basic",
"NewZerosModuleInt2D_basic",
"NewZerosModuleInt3D_basic",
"OnesLikeModule_defaultDtype",
"OnesLikeModule_falsePinMemory",
"OnesLikeModule_float",

View File

@ -39,42 +39,6 @@ namespace lazy {
namespace {
std::pair<torch::lazy::LazyTensorPtr, torch::lazy::LazyTensorPtr>
GetBinaryOperands(const at::Tensor& self, const at::Tensor& other) {
torch::lazy::LazyTensorPtr self_tensor;
torch::lazy::LazyTensorPtr other_tensor;
auto self_xtensor = torch::lazy::TryGetLtcTensor(self);
if (!self_xtensor) {
other_tensor = torch::lazy::TryGetLtcTensor(other);
self_tensor = GetOrCreateLtcTensor(self, other_tensor->GetDevice());
} else {
self_tensor = self_xtensor;
other_tensor = GetOrCreateLtcTensor(other, self_tensor->GetDevice());
}
return std::pair<torch::lazy::LazyTensorPtr, torch::lazy::LazyTensorPtr>(
self_tensor, other_tensor);
}
template <typename B>
at::Tensor
DoBinaryOp(const at::Tensor& self, const at::Tensor& other, const B& bin_op) {
at::ScalarType dtype = at::result_type(self, other);
std::pair<torch::lazy::LazyTensorPtr, torch::lazy::LazyTensorPtr> operands =
GetBinaryOperands(
torch::lazy::UnwrapNumber(self, dtype),
torch::lazy::UnwrapNumber(other, dtype));
torch::lazy::LazyTensorPtr result = bin_op(operands.first, operands.second);
return torch::lazy::CreateAtenFromLtcTensor(result);
}
template <typename B>
at::Tensor
DoBinaryOp(const at::Tensor& self, const at::Scalar& other, const B& bin_op) {
torch::lazy::LazyTensorPtr self_tensor = torch::lazy::GetLtcTensor(self);
torch::lazy::LazyTensorPtr result = bin_op(self_tensor, other);
return torch::lazy::CreateAtenFromLtcTensor(result);
}
at::Tensor CreateLtcTensor(
const at::Tensor& tensor,
const c10::optional<torch::lazy::BackendDevice>& device) {
@ -338,10 +302,14 @@ at::Tensor LazyNativeFunctions::_to_copy(
};
at::Tensor LazyNativeFunctions::empty(
at::IntArrayRef size, c10::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, c10::optional<at::Device> device,
at::SymIntArrayRef sym_size,
c10::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout,
c10::optional<at::Device> device,
c10::optional<bool> pin_memory,
c10::optional<at::MemoryFormat> memory_format) {
// TODO: support this directly
auto size = c10::asIntArrayRefSlow(sym_size);
const auto device_type = torch::lazy::getBackend()->EagerFallbackDeviceType();
at::TensorOptions options = at::TensorOptions()
.device(c10::Device(device_type))
@ -353,8 +321,9 @@ at::Tensor LazyNativeFunctions::empty(
// See Note [Lazy Tensor Functionalization]
if (c10::impl::tls_local_dispatch_key_set().excluded_.has(
c10::DispatchKey::Functionalize)) {
// Invariant: if the functionalization key is in the exclude set, then we're expected
// to return an ordinary tensor, which will be "lifted" into a functional wrapper later.
// Invariant: if the functionalization key is in the exclude set, then we're
// expected to return an ordinary tensor, which will be "lifted" into a
// functional wrapper later.
return tensor;
} else {
auto wrapped = at::functionalization::impl::to_functional_tensor(tensor);
@ -367,7 +336,13 @@ at::Tensor LazyNativeFunctions::empty_strided(
c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout,
c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
TORCH_LAZY_FN_COUNTER("lazy::");
at::Tensor t = empty(size, dtype, layout, device, pin_memory, c10::nullopt);
at::Tensor t = empty(
c10::SymIntArrayRef::fromIntArrayRef(size),
dtype,
layout,
device,
pin_memory,
c10::nullopt);
return t.as_strided(size, stride, /*storage_offset=*/0);
}
@ -386,7 +361,8 @@ LazyNativeFunctions::fill_(at::Tensor& self, const at::Scalar& value) {
at::Tensor LazyNativeFunctions::_unsafe_view(
const at::Tensor& self, at::IntArrayRef size) {
TORCH_LAZY_FN_COUNTER("lazy::");
return LazyNativeFunctions::view_copy(self, size);
return LazyNativeFunctions::view_copy(
self, c10::SymIntArrayRef::fromIntArrayRef(size));
}
// This is needed by the torch.tensor constructor.
@ -422,7 +398,10 @@ at::Tensor LazyNativeFunctions::new_empty_strided(
}
at::Tensor LazyNativeFunctions::narrow_copy(
const at::Tensor& self, int64_t dim, int64_t start, int64_t length) {
const at::Tensor& self,
int64_t dim,
c10::SymInt start,
c10::SymInt length) {
return at::functionalization::functionalize_aten_op<ATEN_OP(
narrow_copy)>::call(self, dim, start, length);
}