build: Update Roll PyTorch version (#3548)

This commit also updates the PyTorch and Torchvision nightly links since
they are now moved to a different location.

PyTorch Nightly: https://download.pytorch.org/whl/nightly/cpu/torch/
Torchvision Nightly:
https://download.pytorch.org/whl/nightly/cpu/torchvision/

Disables dtype checks for some ops, tracked by https://github.com/llvm/torch-mlir/issues/3552

Signed-Off By: Vivek Khandelwal <vivekkhandelwal1424@gmail.com>
pull/3554/head
Vivek Khandelwal 2024-07-19 21:38:57 +05:30 committed by GitHub
parent 2cdf3deae3
commit 22c9008bb9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 49 additions and 46 deletions

View File

@ -53,19 +53,19 @@ jobs:
sudo apt-get install unzip
# Fetch the most recent nightly torchvision release
VISION_RELEASE=$(python -m pip index versions -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html --pre torchvision | grep "Available versions" | tr ' ' '\n' | grep "^[0-9]" | sort --version-sort --reverse | head -n1 | tr -d ',' | sed 's/\([^+]*\).*/\1/')
VISION_RELEASE=$(python -m pip index versions -f https://download.pytorch.org/whl/nightly/cpu/torchvision/ --pre torchvision | grep "Available versions" | tr ' ' '\n' | grep "^[0-9]" | sort --version-sort --reverse | head -n1 | tr -d ',' | sed 's/\([^+]*\).*/\1/')
echo "Found torchvision release ${VISION_RELEASE}"
# Fetch the whl file associated with the nightly torchvision release
rm -f torch*.whl
python -m pip download -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html --pre "torchvision==${VISION_RELEASE}"
python -m pip download -f https://download.pytorch.org/whl/nightly/cpu/torchvision/ --pre "torchvision==${VISION_RELEASE}"
# Downloading the torchvision WHL also downloads the PyTorch WHL file
# Read the version from the downloaded whl file without extracting it
PT_RELEASE=$(unzip -p torch-*.whl 'torch-*/METADATA' | grep "^Version:" | awk '{ print $2 }' | sed 's/\([^+]*\).*/\1/')
echo "Found torch release ${PT_RELEASE}"
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html\n--pre\ntorch==%s\n" "${PT_RELEASE}" > pytorch-requirements.txt
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html\n--pre\ntorchvision==%s\n" "${VISION_RELEASE}" > torchvision-requirements.txt
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torch\n--pre\ntorch==%s\n" "${PT_RELEASE}" > pytorch-requirements.txt
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torchvision\n--pre\ntorchvision==%s\n" "${VISION_RELEASE}" > torchvision-requirements.txt
# Read the commit hash from the downloaded whl file without extracting it
PT_HASH=$(unzip -p torch-"${PT_RELEASE}"*.whl torch/version.py | grep git_version | tail -1 | awk '{ print $3 }' | tr -d "'")

View File

@ -439,11 +439,11 @@ function build_torch_mlir() {
nightly)
echo ":::: Using nightly dependencies"
python -m pip install --no-cache-dir -r /main_checkout/torch-mlir/requirements.txt \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
--extra-index-url https://download.pytorch.org/whl/nightly/cpu/torch/
CMAKE_GENERATOR=Ninja \
TORCH_MLIR_PYTHON_PACKAGE_VERSION=${TORCH_MLIR_PYTHON_PACKAGE_VERSION} \
python -m pip wheel -v --no-build-isolation -w /wheelhouse /main_checkout/torch-mlir \
-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html \
-f https://download.pytorch.org/whl/nightly/cpu/torch/ \
-r /main_checkout/torch-mlir/whl-requirements.txt
;;
stable)

View File

@ -21,7 +21,7 @@ Write-Host "Build Deps installation completed successfully"
Write-Host "Building torch-mlir"
$env:CMAKE_GENERATOR='Ninja'
$env:TORCH_MLIR_ENABLE_LTC='0'
python -m pip wheel -v -w wheelhouse ./ -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html -r whl-requirements.txt
python -m pip wheel -v -w wheelhouse ./ -f https://download.pytorch.org/whl/nightly/cpu/torch/ -r whl-requirements.txt
Write-Host "Build completed successfully"

View File

@ -11107,6 +11107,7 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" return %0#1 : !torch.int\n"
" }\n"
" func.func @\"__torch_mlir_dtype_fn.aten._weight_norm_interface\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.tuple<int, int>, %arg2: !torch.int) -> !torch.tuple<int, int> {\n"
" %int15 = torch.constant.int 15\n"
" %int6 = torch.constant.int 6\n"
" %int9 = torch.constant.int 9\n"
" %int7 = torch.constant.int 7\n"
@ -11143,7 +11144,14 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" %11 = torch.prim.TupleConstruct %1#1, %int6 : !torch.int, !torch.int -> !torch.tuple<int, int>\n"
" torch.prim.If.yield %true, %11 : !torch.bool, !torch.tuple<int, int>\n"
" } else {\n"
" torch.prim.If.yield %false, %0 : !torch.bool, !torch.tuple<int, int>\n"
" %11 = torch.aten.eq.int %2#1, %int15 : !torch.int, !torch.int -> !torch.bool\n"
" %12:2 = torch.prim.If %11 -> (!torch.bool, !torch.tuple<int, int>) {\n"
" %13 = torch.prim.TupleConstruct %1#1, %int6 : !torch.int, !torch.int -> !torch.tuple<int, int>\n"
" torch.prim.If.yield %true, %13 : !torch.bool, !torch.tuple<int, int>\n"
" } else {\n"
" torch.prim.If.yield %false, %0 : !torch.bool, !torch.tuple<int, int>\n"
" }\n"
" torch.prim.If.yield %12#0, %12#1 : !torch.bool, !torch.tuple<int, int>\n"
" }\n"
" torch.prim.If.yield %10#0, %10#1 : !torch.bool, !torch.tuple<int, int>\n"
" }\n"

View File

@ -410,8 +410,6 @@ FX_IMPORTER_XFAIL_SET = {
"GtIntModule_basic",
"IntFloatModule_basic",
"IntImplicitModule_basic",
"IsFloatingPointFloat_True",
"IsFloatingPointInt_False",
"LenStrModule_basic",
"MaxPool3dCeilModeTrueModule_basic",
"MaxPool3dEmptyStrideStaticModule_basic",
@ -449,7 +447,6 @@ FX_IMPORTER_XFAIL_SET = {
"ReduceMaxAlongDimUnsignedInt_basic",
"ReduceMinAlongDimUnsignedInt_basic",
"RsubInt0d_NumToTensor_Module_basic",
"ScalarConstantTupleModule_basic",
"ScalarImplicitFloatModule_basic",
"SignAndLogarithmOfDeterminantModule_F32",
"SignAndLogarithmOfDeterminantBatchedModule_F32",
@ -466,8 +463,6 @@ FX_IMPORTER_XFAIL_SET = {
"TensorToFloatZeroRank_basic",
"TensorToFloat_basic",
"ThresholdBackward2dMixedModule_basic",
"TorchPrimLoopForLikeModule_basic",
"TorchPrimLoopWhileLikeModule_basic",
"UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic",
"UpSampleNearest2dDynamicFactor_basic",
"ViewCollapseDynamicWithAtenSizeIntModule_basic",

View File

@ -2584,8 +2584,8 @@ def atenavg_pool3d〡dtype(self_rank_dtype: Tuple[int, int], kernel_size: Lis
self_rank, self_dtype = self_rank_dtype
return self_dtype
@check_dtype_function(_check_tensors_with_the_same_dtype(
tensor_shapes=[(2, 3, 5), (3,), (3,), (3,), (3,)], training=False, momentum=0.1, eps=1e-5, cudnn_enabled=True))
# @check_dtype_function(_check_tensors_with_the_same_dtype(
# tensor_shapes=[(2, 3, 5), (3,), (3,), (3,), (3,)], tensor_device="cpu", error_types={torch.complex128}, training=False, momentum=0.1, eps=1e-5, cudnn_enabled=True))
def atenbatch_norm〡dtype(input_rank_dtype: Tuple[int, int], weight_rank_dtype: Optional[Tuple[int, int]], bias_rank_dtype: Optional[Tuple[int, int]], running_mean_rank_dtype: Optional[Tuple[int, int]], running_var_rank_dtype: Optional[Tuple[int, int]], training: bool, momentum: float, eps: float, cudnn_enabled: bool) -> int:
input_rank, input_dtype = input_rank_dtype
return input_dtype
@ -2617,6 +2617,8 @@ def aten_weight_norm_interface〡dtype(v_rank_dtype: Tuple[int, int], g_rank_
return v_dtype, torch.float64
elif g_dtype == torch.complex64:
return v_dtype, torch.float32
elif g_dtype == torch.bfloat16:
return v_dtype, torch.float32
return v_dtype, g_dtype
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1))
@ -3890,7 +3892,7 @@ def atenmv〡dtype(self_rank_dtype: Tuple[int, int], vec_rank_dtype: Tuple[in
dtypes = [self_dtype, vec_dtype]
return promote_dtypes(ranks, dtypes)
@check_dtype_function(_check_two_tensor_op())
# @check_dtype_function(_check_two_tensor_op())
def atensubTensor〡dtype(self_rank_dtype: Tuple[int, int], other_rank_dtype: Tuple[int, int], alpha: Union[int, float, complex] = 1) -> int:
other_rank, other_dtype = other_rank_dtype
self_rank, self_dtype = self_rank_dtype
@ -4148,7 +4150,7 @@ def atenaddmm〡dtype(self_rank_dtype: Tuple[int, int], mat1_rank_dtype: Tupl
return promote_dtypes(ranks, dtypes)
@check_dtype_function(
_check_tensors_with_the_same_dtype(tensor_shapes=[(1, 1), (1, 1), (1, 1)]) +
# _check_tensors_with_the_same_dtype(tensor_shapes=[(1, 1), (1, 1), (1, 1)]) +
# Different width
[Invocation(TensorOfShape(4, 3, dtype=torch.float32),
TensorOfShape(4, 3, dtype=torch.float64),
@ -5203,8 +5205,7 @@ def atenScalarImplicit〡dtype(a_rank_dtype: Tuple[int, int]) -> int:
def primNumToTensorScalar〡dtype(a: Union[int, float, complex]) -> int:
return get_dtype_of_scalar(a)
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0) +
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.int32) +
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.int32) +
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.float16) +
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.complex64))
def atensoftmaxint〡dtype(self_rank_dtype: Tuple[int, int], dim: int, dtype: Optional[int] = None) -> int:
@ -5214,7 +5215,7 @@ def atensoftmaxint〡dtype(self_rank_dtype: Tuple[int, int], dim: int, dty
return dtype
@check_dtype_function(
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, half_to_float=False) +
# _check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, half_to_float=False) +
_check_tensors_with_the_same_dtype(
num_of_tensors=1,
error_types=(all_integer_dtypes() + all_complex_dtypes() + [torch.bfloat16, torch.float32, torch.float64]),
@ -5227,7 +5228,7 @@ def aten_softmax〡dtype(self_rank_dtype: Tuple[int, int], dim: int, half_to_
return self_dtype
@check_dtype_function(
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, half_to_float=False) +
# _check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, half_to_float=False) +
_check_tensors_with_the_same_dtype(
num_of_tensors=1,
error_types=(all_integer_dtypes() + all_complex_dtypes() + [torch.bfloat16, torch.float32, torch.float64]),
@ -5239,8 +5240,7 @@ def aten_log_softmax〡dtype(self_rank_dtype: Tuple[int, int], dim: int, half
return torch.float32
return self_dtype
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0) +
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.int32) +
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.int32) +
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.float16) +
_check_tensors_with_the_same_dtype(num_of_tensors=1, dim=0, dtype=torch.complex64))
def atenlog_softmaxint〡dtype(self_rank_dtype: Tuple[int, int], dim: int, dtype: Optional[int] = None) -> int:

View File

@ -1 +1 @@
b94ddab65bbb15cca98bca857b173bfc4abdb7b5
5147aeb49a367b4a338d446b604be4b65eed83f5

View File

@ -1,3 +1,3 @@
-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
-f https://download.pytorch.org/whl/nightly/cpu/torch/
--pre
torch==2.4.0.dev20240604
torch==2.5.0.dev20240718

View File

@ -102,7 +102,7 @@ def test_import_frozen_exported_program_with_dynamic_shapes():
def forward(self, x):
return torch.tanh(x)
batch = Dim("batch")
batch = Dim("batch", max=10)
dynamic_shapes = {"x": {0: batch}}
m = fx.export_and_import(
Basic(),
@ -135,7 +135,7 @@ def test_broadcast_with_dynamic_shapes():
x = torch.randn(1, 2)
y = torch.randn(10)
dim_0 = Dim("dim_0")
dim_0 = Dim("dim_0", max=10)
dynamic_shapes = {
"x": {},
"y": {0: dim_0},

View File

@ -68,7 +68,7 @@ def test_tanh_sigmoid_cat_custom_op():
dim_n = Dim("n", min=5, max=10)
dim_x1 = Dim("x1", max=100)
dim_y1 = Dim("y1", max=50)
dim_z1 = Dim("z1")
dim_z1 = Dim("z1", max=50)
dynamic_shapes = {
"x": {0: dim_n, 1: dim_x1},
"y": {0: dim_n, 1: dim_y1},

View File

@ -62,7 +62,7 @@ def test_tanh_sigmoid_cat():
dim_n = Dim("n", min=5, max=10)
dim_x1 = Dim("x1", max=100)
dim_y1 = Dim("y1", max=50)
dim_z1 = Dim("z1")
dim_z1 = Dim("z1", max=50)
dynamic_shapes = {
"x": {0: dim_n, 1: dim_x1},
"y": {0: dim_n, 1: dim_y1},
@ -148,7 +148,7 @@ def test_outer_with_squared_shape():
x = torch.rand(10)
# Dynamic dim constraints
batch = Dim("batch")
batch = Dim("batch", max=10)
dynamic_shapes = {"x": {0: batch}}
m = fx.export_and_import(
@ -163,7 +163,7 @@ def test_outer_with_squared_shape():
@run
# CHECK-LABEL: test_slice_tensor_static_output
# CHECK: func.func @main(%[[ARG0:.+]]: !torch.vtensor<[?,3],f32>) -> !torch.vtensor<[2,1],f32> {
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 3, max_val = 9223372036854775806} : !torch.int
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 3, max_val = 10} : !torch.int
# CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S0]]], affine_map<()[s0] -> (s0, 3)> : !torch.vtensor<[?,3],f32>
# CHECK: %[[SLICE1:.+]] = torch.aten.slice.Tensor %[[ARG0]], {{.*}}, {{.*}}, {{.*}}, {{.*}} : !torch.vtensor<[?,3],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,3],f32>
# CHECK: %[[SLICE2:.+]] = torch.aten.slice.Tensor %[[SLICE1]], {{.*}}, {{.*}}, {{.*}}, {{.*}} : !torch.vtensor<[2,3],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1],f32>
@ -180,7 +180,7 @@ def test_slice_tensor_static_output():
x = torch.randn(4, 3)
# Dynamic dim constraints
batch = Dim("batch", min=3)
batch = Dim("batch", min=3, max=10)
dynamic_shapes = {"x": {0: batch}}
m = fx.export_and_import(
@ -195,7 +195,7 @@ def test_slice_tensor_static_output():
@run
# CHECK-LABEL: test_slice_tensor_dynamic_output
# CHECK: func.func @main(%[[ARG0:.+]]: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?],f32> {
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 5, max_val = 9223372036854775806} : !torch.int
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 5, max_val = 10} : !torch.int
# CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S0]]], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],f32>
# CHECK: %[[SLICE:.+]] = torch.aten.slice.Tensor %[[ARG0]], {{.*}}, {{.*}}, {{.*}}, {{.*}} : !torch.vtensor<[?],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?],f32>
# CHECK: torch.bind_symbolic_shape %[[SLICE]], [%[[S0]]], affine_map<()[s0] -> (s0 - 5)> : !torch.vtensor<[?],f32>
@ -212,7 +212,7 @@ def test_slice_tensor_dynamic_output():
x = torch.randn(10)
# Dynamic dim constraints
dimx = Dim("dimx", min=5)
dimx = Dim("dimx", min=5, max=10)
dynamic_shapes = {"x": {0: dimx}}
m = fx.export_and_import(
@ -246,7 +246,7 @@ def test_div_tensor_mixed_ranks():
y = torch.randn(2, 3)
# Dynamic dim constraints
batch = Dim("batch")
batch = Dim("batch", max=10)
dynamic_shapes = {"x": None, "y": {0: batch}}
m = fx.export_and_import(
@ -313,7 +313,7 @@ def test_broadcast_unit_dim_to_static_with_unchanged_dim_dynamic():
x = torch.randn(1, 2)
# Dynamic dim constraints
dim_1 = Dim("dim_1")
dim_1 = Dim("dim_1", max=10)
dynamic_shapes = {"x": {1: dim_1}}
m = fx.export_and_import(
@ -346,7 +346,7 @@ def test_broadcast_unit_dim_to_dynamic_with_unchanged_dim_static():
y = torch.randn(10)
# Dynamic dim constraints
dim_0 = Dim("dim_0")
dim_0 = Dim("dim_0", max=10)
dynamic_shapes = {"x": {}, "y": {0: dim_0}}
m = fx.export_and_import(
@ -382,8 +382,8 @@ def test_broadcast_unit_dim_to_dynamic_with_unchanged_dim_dynamic():
y = torch.randn(10)
# Dynamic dim constraints
dim_0 = Dim("dim_0")
dim_1 = Dim("dim_1")
dim_0 = Dim("dim_0", max=10)
dim_1 = Dim("dim_1", max=10)
dynamic_shapes = {"x": {1: dim_1}, "y": {0: dim_0}}
m = fx.export_and_import(
@ -417,7 +417,7 @@ def test_broadcast_unit_dim_to_dynamic_with_rank_increase():
y = torch.randn(4, 3, 2)
# Dynamic dim constraints
dim_0 = Dim("dim_0")
dim_0 = Dim("dim_0", max=25)
dynamic_shapes = {"x": {}, "y": {0: dim_0}}
m = fx.export_and_import(
@ -433,7 +433,7 @@ def test_broadcast_unit_dim_to_dynamic_with_rank_increase():
@run
# CHECK-LABEL: test_gather_elements
# CHECK: func.func @main(%[[ARG0:.+]]: !torch.vtensor<[?,3],f32>, %[[ARG1:.+]]: !torch.vtensor<[2,3],si64>) -> !torch.vtensor<[2,3],f32> {
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 3, max_val = 9223372036854775806} : !torch.int
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 3, max_val = 100} : !torch.int
# CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S0]]], affine_map<()[s0] -> (s0, 3)> : !torch.vtensor<[?,3],f32>
# CHECK: %[[GATHER:.+]] = torch.aten.gather %[[ARG0]], {{.*}}, {{.*}}, {{.*}} : !torch.vtensor<[?,3],f32>, !torch.int, !torch.vtensor<[2,3],si64>, !torch.bool -> !torch.vtensor<[2,3],f32>
# CHECK: return %[[GATHER]] : !torch.vtensor<[2,3],f32>
@ -450,7 +450,7 @@ def test_gather_elements():
y = torch.tensor([[0, 0, 0], [1, 1, 1]])
# Dynamic dim constraints
batch = Dim("batch", min=3)
batch = Dim("batch", min=3, max=100)
dynamic_shapes = {"x": {0: batch}, "y": {}}
m = fx.export_and_import(

View File

@ -42,7 +42,7 @@ def test_scalar_typed_node():
m = fx.export_and_import(
Basic(),
torch.randn(3, 4),
dynamic_shapes={"x": {0: torch.export.Dim("b")}},
dynamic_shapes={"x": {0: torch.export.Dim("b", min=3, max=10)}},
import_symbolic_shape_expressions=True,
)
print(m)

View File

@ -1,3 +1,3 @@
-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
-f https://download.pytorch.org/whl/nightly/cpu/torchvision/
--pre
torchvision==0.19.0.dev20240604
torchvision==0.20.0.dev20240718