mirror of https://github.com/llvm/torch-mlir
Fix symint related functionalization ops (#1289)
* Fix symint related functionalization ops * Remove zeros xfail from LTC testspull/1285/head
parent
0e3ddbac91
commit
8e880a2d00
|
@ -24,8 +24,9 @@ from torchgen.gen import get_grouped_native_functions, parse_native_yaml
|
|||
from torchgen.gen_backend_stubs import parse_backend_yaml
|
||||
|
||||
TORCH_DIR = Path(importlib.util.find_spec("torch").origin).resolve().parent.parent
|
||||
if TORCH_DIR.joinpath("torch", "include").is_dir():
|
||||
TORCH_DIR = TORCH_DIR.joinpath("torch", "include")
|
||||
TORCH_INCLUDE_DIR = TORCH_DIR.joinpath("torch", "include")
|
||||
if not TORCH_INCLUDE_DIR.is_dir():
|
||||
TORCH_INCLUDE_DIR = TORCH_DIR
|
||||
TORCHGEN_DIR = Path(torchgen.__path__[0]).resolve()
|
||||
TORCH_MLIR_DIR = Path(__file__).resolve().parent.parent
|
||||
|
||||
|
@ -167,6 +168,9 @@ class GenTorchMlirLTC:
|
|||
ts_native_yaml = None
|
||||
if ts_native_yaml_path.exists():
|
||||
ts_native_yaml = yaml.load(ts_native_yaml_path.read_text(), yaml.CLoader)
|
||||
else:
|
||||
logging.warning(f"Could not find `ts_native_functions.yaml` at {ts_native_yaml_path}")
|
||||
|
||||
|
||||
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
|
||||
self.native_functions = parsed_yaml.native_functions
|
||||
|
@ -290,6 +294,7 @@ class GenTorchMlirLTC:
|
|||
|
||||
if ts_native_yaml:
|
||||
ts_full_codegen = set(ts_native_yaml["full_codegen"])
|
||||
ts_supported = set(ts_native_yaml["supported"])
|
||||
mlir_full_codegen = set(self.ops)
|
||||
|
||||
if ts_full_codegen - mlir_full_codegen:
|
||||
|
@ -308,6 +313,22 @@ class GenTorchMlirLTC:
|
|||
)
|
||||
)
|
||||
|
||||
if ts_supported - supported:
|
||||
logging.debug(
|
||||
"Ops supported by the TorchScript backend "
|
||||
"but not by the Torch-MLIR backend:\n {}".format(
|
||||
"\n ".join(sorted(ts_supported - supported))
|
||||
)
|
||||
)
|
||||
|
||||
if supported - ts_supported:
|
||||
logging.debug(
|
||||
"Ops supported by the Torch-MLIR backend "
|
||||
"but not by the TorchScript backend:\n {}".format(
|
||||
"\n ".join(sorted(supported - ts_supported))
|
||||
)
|
||||
)
|
||||
|
||||
def generate_shape_inference(self):
|
||||
parsed_backend_yaml = parse_backend_yaml(
|
||||
self.source_yaml,
|
||||
|
@ -367,7 +388,7 @@ class GenTorchMlirLTC:
|
|||
)
|
||||
assert len(shape_inference_decls) > 0
|
||||
upstream_shape_inference_decls = extract_signatures(
|
||||
TORCH_DIR.joinpath(
|
||||
TORCH_INCLUDE_DIR.joinpath(
|
||||
"torch", "csrc", "lazy", "core", "shape_inference.h"
|
||||
).read_text()
|
||||
)
|
||||
|
|
|
@ -19,6 +19,7 @@ blacklist:
|
|||
- new_empty
|
||||
- rsub
|
||||
- slice.Tensor # Disabled in favour of slice_copy.Tensor
|
||||
- zeros
|
||||
|
||||
# Disabled in favour of functionalized alternatives
|
||||
- _reshape_alias
|
||||
|
@ -59,14 +60,12 @@ supported:
|
|||
# but their implementations call view operators (which we need to functionalize away).
|
||||
- block_diag
|
||||
- new_empty_strided
|
||||
- narrow_copy
|
||||
- pixel_shuffle
|
||||
- pixel_unshuffle
|
||||
- select_backward
|
||||
- slice_backward
|
||||
- diagonal_backward
|
||||
- _trilinear
|
||||
- linalg_inv_ex
|
||||
- linalg_pinv.atol_rtol_tensor
|
||||
- logsumexp.out
|
||||
|
||||
|
|
|
@ -435,12 +435,6 @@ LTC_XFAIL_SET = {
|
|||
"NewOnesModuleFloat3D_basic",
|
||||
"NewOnesModuleInt2D_basic",
|
||||
"NewOnesModuleInt3D_basic",
|
||||
"NewZerosModuleDefaultDtype_basic",
|
||||
"NewZerosModuleFalsePinMemory_basic",
|
||||
"NewZerosModuleFloat2D_basic",
|
||||
"NewZerosModuleFloat3D_basic",
|
||||
"NewZerosModuleInt2D_basic",
|
||||
"NewZerosModuleInt3D_basic",
|
||||
"OnesLikeModule_defaultDtype",
|
||||
"OnesLikeModule_falsePinMemory",
|
||||
"OnesLikeModule_float",
|
||||
|
|
|
@ -302,10 +302,12 @@ at::Tensor LazyNativeFunctions::_to_copy(
|
|||
};
|
||||
|
||||
at::Tensor LazyNativeFunctions::empty(
|
||||
at::IntArrayRef size, c10::optional<at::ScalarType> dtype,
|
||||
at::SymIntArrayRef sym_size, c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout, c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory,
|
||||
c10::optional<at::MemoryFormat> memory_format) {
|
||||
// TODO: support this directly
|
||||
auto size = c10::asIntArrayRefSlow(sym_size);
|
||||
const auto device_type = torch::lazy::getBackend()->EagerFallbackDeviceType();
|
||||
at::TensorOptions options = at::TensorOptions()
|
||||
.device(c10::Device(device_type))
|
||||
|
@ -331,7 +333,9 @@ at::Tensor LazyNativeFunctions::empty_strided(
|
|||
c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
||||
TORCH_LAZY_FN_COUNTER("lazy::");
|
||||
at::Tensor t = empty(size, dtype, layout, device, pin_memory, c10::nullopt);
|
||||
at::Tensor t = empty(
|
||||
c10::SymIntArrayRef::fromIntArrayRef(size),
|
||||
dtype, layout, device, pin_memory, c10::nullopt);
|
||||
return t.as_strided(size, stride, /*storage_offset=*/0);
|
||||
}
|
||||
|
||||
|
@ -350,7 +354,7 @@ LazyNativeFunctions::fill_(at::Tensor& self, const at::Scalar& value) {
|
|||
at::Tensor LazyNativeFunctions::_unsafe_view(
|
||||
const at::Tensor& self, at::IntArrayRef size) {
|
||||
TORCH_LAZY_FN_COUNTER("lazy::");
|
||||
return LazyNativeFunctions::view_copy(self, size);
|
||||
return LazyNativeFunctions::view_copy(self, c10::SymIntArrayRef::fromIntArrayRef(size));
|
||||
}
|
||||
|
||||
// This is needed by the torch.tensor constructor.
|
||||
|
@ -385,11 +389,6 @@ at::Tensor LazyNativeFunctions::new_empty_strided(
|
|||
self, size, stride, dtype, layout, device, pin_memory);
|
||||
}
|
||||
|
||||
at::Tensor LazyNativeFunctions::narrow_copy(
|
||||
const at::Tensor& self, int64_t dim, int64_t start, int64_t length) {
|
||||
return at::functionalization::functionalize_aten_op<ATEN_OP(
|
||||
narrow_copy)>::call(self, dim, start, length);
|
||||
}
|
||||
at::Tensor LazyNativeFunctions::pixel_shuffle(
|
||||
const at::Tensor& self, int64_t upscale_factor) {
|
||||
return at::functionalization::functionalize_aten_op<ATEN_OP(
|
||||
|
@ -425,11 +424,6 @@ at::Tensor LazyNativeFunctions::_trilinear(
|
|||
return at::functionalization::functionalize_aten_op<ATEN_OP(_trilinear)>::
|
||||
call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
|
||||
}
|
||||
::std::tuple<at::Tensor, at::Tensor>
|
||||
LazyNativeFunctions::linalg_inv_ex(const at::Tensor& self, bool check_errors) {
|
||||
return at::functionalization::functionalize_aten_op<ATEN_OP(
|
||||
linalg_inv_ex)>::call(self, check_errors);
|
||||
}
|
||||
at::Tensor LazyNativeFunctions::linalg_pinv(
|
||||
const at::Tensor& self, const c10::optional<at::Tensor>& atol,
|
||||
const c10::optional<at::Tensor>& rtol, bool hermitian) {
|
||||
|
|
Loading…
Reference in New Issue