build: manually update PyTorch version (#3863)

This commit sets the PyTorch and TorchVision version to nightly release
2024-11-07.

This commit also updates the dtype check for the
`aten.fake_quantize_per_tensor_affine` and
`aten.fake_quantize_per_tensor_affine_cachemask` op since the op now
supports bfloat16 input.

Signed-Off By: Vivek Khandelwal <vivekkhandelwal1424@gmail.com>
pull/3868/head
Vivek Khandelwal 2024-11-11 21:26:56 +05:30 committed by GitHub
parent 8eb34dae78
commit 17c1985c4d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 8 additions and 34 deletions

View File

@ -11247,7 +11247,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" return %0#1 : !torch.int\n" " return %0#1 : !torch.int\n"
" }\n" " }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n" " func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n"
" %int15 = torch.constant.int 15\n"
" %none = torch.constant.none\n" " %none = torch.constant.none\n"
" %str = torch.constant.str \"AssertionError: \"\n" " %str = torch.constant.str \"AssertionError: \"\n"
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n" " %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
@ -11258,13 +11257,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" " torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
" torch.prim.If.yield\n" " torch.prim.If.yield\n"
" }\n" " }\n"
" %2 = torch.aten.ne.int %0#1, %int15 : !torch.int, !torch.int -> !torch.bool\n"
" torch.prim.If %2 -> () {\n"
" torch.prim.If.yield\n"
" } else {\n"
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
" torch.prim.If.yield\n"
" }\n"
" return %0#1 : !torch.int\n" " return %0#1 : !torch.int\n"
" }\n" " }\n"
" func.func @__torch__.torch_mlir.jit_ir_importer.build_tools.library_generator.is_float_dtype(%arg0: !torch.int) -> !torch.bool {\n" " func.func @__torch__.torch_mlir.jit_ir_importer.build_tools.library_generator.is_float_dtype(%arg0: !torch.int) -> !torch.bool {\n"
@ -11282,7 +11274,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" }\n" " }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine_cachemask\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.tuple<int, int> {\n" " func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine_cachemask\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.tuple<int, int> {\n"
" %int11 = torch.constant.int 11\n" " %int11 = torch.constant.int 11\n"
" %int15 = torch.constant.int 15\n"
" %none = torch.constant.none\n" " %none = torch.constant.none\n"
" %str = torch.constant.str \"AssertionError: \"\n" " %str = torch.constant.str \"AssertionError: \"\n"
" %int1 = torch.constant.int 1\n" " %int1 = torch.constant.int 1\n"
@ -11294,16 +11285,9 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" " torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
" torch.prim.If.yield\n" " torch.prim.If.yield\n"
" }\n" " }\n"
" %2 = torch.aten.ne.int %0#1, %int15 : !torch.int, !torch.int -> !torch.bool\n" " %2 = torch.prim.TupleIndex %arg0, %int1 : !torch.tuple<int, int>, !torch.int -> !torch.int\n"
" torch.prim.If %2 -> () {\n" " %3 = torch.prim.TupleConstruct %2, %int11 : !torch.int, !torch.int -> !torch.tuple<int, int>\n"
" torch.prim.If.yield\n" " return %3 : !torch.tuple<int, int>\n"
" } else {\n"
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
" torch.prim.If.yield\n"
" }\n"
" %3 = torch.prim.TupleIndex %arg0, %int1 : !torch.tuple<int, int>, !torch.int -> !torch.int\n"
" %4 = torch.prim.TupleConstruct %3, %int11 : !torch.int, !torch.int -> !torch.tuple<int, int>\n"
" return %4 : !torch.tuple<int, int>\n"
" }\n" " }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine.tensor_qparams\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.tuple<int, int>, %arg2: !torch.tuple<int, int>, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n" " func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine.tensor_qparams\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.tuple<int, int>, %arg2: !torch.tuple<int, int>, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n"
" %int15 = torch.constant.int 15\n" " %int15 = torch.constant.int 15\n"

View File

@ -403,7 +403,6 @@ FX_IMPORTER_XFAIL_SET = {
"QuantizedReluInt32_basic", "QuantizedReluInt32_basic",
"QuantizedReluInt8_basic", "QuantizedReluInt8_basic",
"QuantizedReluUint8_basic", "QuantizedReluUint8_basic",
"AtenSubFloatModule_basic",
"BincountMinlengthModule_basic", "BincountMinlengthModule_basic",
"BincountModule_basic", "BincountModule_basic",
"BincountStaticSizeModule_basic", "BincountStaticSizeModule_basic",
@ -431,20 +430,16 @@ FX_IMPORTER_XFAIL_SET = {
"ElementwiseQuantizePerTensorModule_basic", "ElementwiseQuantizePerTensorModule_basic",
"ElementwiseQuantizePerTensorUIntModule_basic", "ElementwiseQuantizePerTensorUIntModule_basic",
"ElementwiseToDtypeI64ToUI8Module_basic", "ElementwiseToDtypeI64ToUI8Module_basic",
"EqIntModule_basic",
"FloatImplicitModule_basic", "FloatImplicitModule_basic",
"GeFloatIntModule_basic", "GeFloatIntModule_basic",
"GeFloatModule_basic",
"GeIntModule_basic", "GeIntModule_basic",
"GtFloatIntModule_basic", "GtFloatIntModule_basic",
"GtIntModule_basic",
"IntFloatModule_basic", "IntFloatModule_basic",
"IntImplicitModule_basic", "IntImplicitModule_basic",
"LenStrModule_basic", "LenStrModule_basic",
"MulFloatModule_basic", "MulFloatModule_basic",
"NativeGroupNormBackwardModule_basic", "NativeGroupNormBackwardModule_basic",
"NeFloatIntModule_basic", "NeFloatIntModule_basic",
"NeIntModule_basic",
"NllLossModuleBackward1DMeanWeight_basic", "NllLossModuleBackward1DMeanWeight_basic",
"NllLossModuleBackward1DMean_basic", "NllLossModuleBackward1DMean_basic",
"NllLossModuleBackward1DSumWeight_basic", "NllLossModuleBackward1DSumWeight_basic",
@ -472,7 +467,6 @@ FX_IMPORTER_XFAIL_SET = {
"SortIntList_basic", "SortIntList_basic",
"SplitDimDynamicModule_basic", "SplitDimDynamicModule_basic",
"SplitDimStaticModule_basic", "SplitDimStaticModule_basic",
"SqrtIntConstantModule_basic",
"SqrtIntModule_basic", "SqrtIntModule_basic",
"SubFloatModule_basic", "SubFloatModule_basic",
"TensorToBoolZeroRank_basic", "TensorToBoolZeroRank_basic",
@ -653,7 +647,6 @@ FX_IMPORTER_STABLEHLO_XFAIL_SET = {
"AtenMmQuint8_basic", "AtenMmQuint8_basic",
"AtenRealView128Module_basic", "AtenRealView128Module_basic",
"AtenRealView64Module_basic", "AtenRealView64Module_basic",
"AtenSubFloatModule_basic",
"AtenTopKModule_basic", "AtenTopKModule_basic",
"AtenTopKSmallestModule_basic", "AtenTopKSmallestModule_basic",
"Aten_EmbeddingBagExample_basic", "Aten_EmbeddingBagExample_basic",
@ -878,7 +871,6 @@ FX_IMPORTER_STABLEHLO_XFAIL_SET = {
"SortTensor_basic", "SortTensor_basic",
"SplitDimDynamicModule_basic", "SplitDimDynamicModule_basic",
"SplitDimStaticModule_basic", "SplitDimStaticModule_basic",
"SqrtIntConstantModule_basic",
"SqrtIntModule_basic", "SqrtIntModule_basic",
"SubFloatModule_basic", "SubFloatModule_basic",
"TModuleRank0_basic", "TModuleRank0_basic",

View File

@ -2580,19 +2580,17 @@ def primssplit_dim〡dtype(a_rank_dtype: Tuple[int, int], dim: int, outer_len
return a_dtype return a_dtype
# note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead. # note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead.
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.bfloat16, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool})) @check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool}))
def atenfake_quantize_per_tensor_affine〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> int: def atenfake_quantize_per_tensor_affine〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> int:
self_rank, self_dtype = self_rank_dtype self_rank, self_dtype = self_rank_dtype
assert is_float_dtype(self_dtype) assert is_float_dtype(self_dtype)
assert self_dtype != torch.bfloat16
return self_dtype return self_dtype
# note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead. # note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead.
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.bfloat16, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool})) @check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool}))
def atenfake_quantize_per_tensor_affine_cachemask〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> Tuple[int, int]: def atenfake_quantize_per_tensor_affine_cachemask〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> Tuple[int, int]:
self_rank, self_dtype = self_rank_dtype self_rank, self_dtype = self_rank_dtype
assert is_float_dtype(self_dtype) assert is_float_dtype(self_dtype)
assert self_dtype != torch.bfloat16
return (self_rank_dtype[1], torch.bool) return (self_rank_dtype[1], torch.bool)
# note: fake_quantize_per_tensor_affine.tensor_qparams doesn't support "meta" device, use "cpu" instead. # note: fake_quantize_per_tensor_affine.tensor_qparams doesn't support "meta" device, use "cpu" instead.

View File

@ -1 +1 @@
c787213d413e85c66bdad0d8c9cde1c5ced34b1b 0d5247caf3ffd618d31cf4cf880c47b7dbd323a7

View File

@ -1,3 +1,3 @@
-f https://download.pytorch.org/whl/nightly/cpu/torch/ -f https://download.pytorch.org/whl/nightly/cpu/torch/
--pre --pre
torch==2.6.0.dev20241029 torch==2.6.0.dev20241107

View File

@ -1,3 +1,3 @@
-f https://download.pytorch.org/whl/nightly/cpu/torchvision/ -f https://download.pytorch.org/whl/nightly/cpu/torchvision/
--pre --pre
torchvision==0.20.0.dev20241029 torchvision==0.20.0.dev20241107