Fix functionalize_aten_op calls for symint ops (#1459)

* Fix functionalize_aten_op calls for symint ops

* Update PyTorch version
pull/1466/head
Jae Hoon (Antonio) Kim 2022-10-05 10:23:48 -04:00 committed by GitHub
parent faa9a78e38
commit c57d801260
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 5 additions and 5 deletions

View File

@ -391,7 +391,7 @@ at::Tensor LazyNativeFunctions::new_empty_strided_symint(
c10::optional<at::Device> device,
c10::optional<bool> pin_memory) {
return at::functionalization::
functionalize_aten_op<ATEN_OP(new_empty_strided)>::call(
functionalize_aten_op_symint<ATEN_OP(new_empty_strided)>::call(
self, size, stride, dtype, layout, device, pin_memory);
}
@ -400,7 +400,7 @@ at::Tensor LazyNativeFunctions::narrow_copy_symint(
int64_t dim,
c10::SymInt start,
c10::SymInt length) {
return at::functionalization::functionalize_aten_op<ATEN_OP(
return at::functionalization::functionalize_aten_op_symint<ATEN_OP(
narrow_copy)>::call(self, dim, start, length);
}
at::Tensor LazyNativeFunctions::pixel_shuffle(
@ -426,7 +426,7 @@ at::Tensor LazyNativeFunctions::slice_backward_symint(
c10::SymInt start,
c10::SymInt end,
c10::SymInt step) {
return at::functionalization::functionalize_aten_op<ATEN_OP(
return at::functionalization::functionalize_aten_op_symint<ATEN_OP(
slice_backward)>::call(grad_output, input_sizes, dim, start, end, step);
}
at::Tensor LazyNativeFunctions::diagonal_backward(

View File

@ -1,3 +1,3 @@
-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
--pre
torch==1.13.0.dev20221003
torch==1.13.0.dev20221004

View File

@ -1 +1 @@
57d0543a3fbfb27d1b365d3515e2a2ba86b44878
9f3d8fec5747fde5191618eb895fbec2d50edf93