diff --git a/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp b/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp index 624c4b48c..0e4d7c40a 100644 --- a/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp +++ b/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp @@ -11247,7 +11247,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " return %0#1 : !torch.int\n" " }\n" " func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine\"(%arg0: !torch.tuple, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n" -" %int15 = torch.constant.int 15\n" " %none = torch.constant.none\n" " %str = torch.constant.str \"AssertionError: \"\n" " %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple -> !torch.int, !torch.int\n" @@ -11258,13 +11257,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" " torch.prim.If.yield\n" " }\n" -" %2 = torch.aten.ne.int %0#1, %int15 : !torch.int, !torch.int -> !torch.bool\n" -" torch.prim.If %2 -> () {\n" -" torch.prim.If.yield\n" -" } else {\n" -" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" -" torch.prim.If.yield\n" -" }\n" " return %0#1 : !torch.int\n" " }\n" " func.func @__torch__.torch_mlir.jit_ir_importer.build_tools.library_generator.is_float_dtype(%arg0: !torch.int) -> !torch.bool {\n" @@ -11282,7 +11274,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " }\n" " func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine_cachemask\"(%arg0: !torch.tuple, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.tuple {\n" " %int11 = torch.constant.int 11\n" -" %int15 = torch.constant.int 15\n" " %none = torch.constant.none\n" " %str = torch.constant.str \"AssertionError: \"\n" " %int1 = torch.constant.int 1\n" @@ -11294,16 +11285,9 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" " torch.prim.If.yield\n" " }\n" -" %2 = torch.aten.ne.int %0#1, %int15 : !torch.int, !torch.int -> !torch.bool\n" -" torch.prim.If %2 -> () {\n" -" torch.prim.If.yield\n" -" } else {\n" -" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" -" torch.prim.If.yield\n" -" }\n" -" %3 = torch.prim.TupleIndex %arg0, %int1 : !torch.tuple, !torch.int -> !torch.int\n" -" %4 = torch.prim.TupleConstruct %3, %int11 : !torch.int, !torch.int -> !torch.tuple\n" -" return %4 : !torch.tuple\n" +" %2 = torch.prim.TupleIndex %arg0, %int1 : !torch.tuple, !torch.int -> !torch.int\n" +" %3 = torch.prim.TupleConstruct %2, %int11 : !torch.int, !torch.int -> !torch.tuple\n" +" return %3 : !torch.tuple\n" " }\n" " func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine.tensor_qparams\"(%arg0: !torch.tuple, %arg1: !torch.tuple, %arg2: !torch.tuple, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n" " %int15 = torch.constant.int 15\n" diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index 2acc3afe5..8c38d0112 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -403,7 +403,6 @@ FX_IMPORTER_XFAIL_SET = { "QuantizedReluInt32_basic", "QuantizedReluInt8_basic", "QuantizedReluUint8_basic", - "AtenSubFloatModule_basic", "BincountMinlengthModule_basic", "BincountModule_basic", "BincountStaticSizeModule_basic", @@ -431,20 +430,16 @@ FX_IMPORTER_XFAIL_SET = { "ElementwiseQuantizePerTensorModule_basic", "ElementwiseQuantizePerTensorUIntModule_basic", "ElementwiseToDtypeI64ToUI8Module_basic", - "EqIntModule_basic", "FloatImplicitModule_basic", "GeFloatIntModule_basic", - "GeFloatModule_basic", "GeIntModule_basic", "GtFloatIntModule_basic", - "GtIntModule_basic", "IntFloatModule_basic", "IntImplicitModule_basic", "LenStrModule_basic", "MulFloatModule_basic", "NativeGroupNormBackwardModule_basic", "NeFloatIntModule_basic", - "NeIntModule_basic", "NllLossModuleBackward1DMeanWeight_basic", "NllLossModuleBackward1DMean_basic", "NllLossModuleBackward1DSumWeight_basic", @@ -472,7 +467,6 @@ FX_IMPORTER_XFAIL_SET = { "SortIntList_basic", "SplitDimDynamicModule_basic", "SplitDimStaticModule_basic", - "SqrtIntConstantModule_basic", "SqrtIntModule_basic", "SubFloatModule_basic", "TensorToBoolZeroRank_basic", @@ -653,7 +647,6 @@ FX_IMPORTER_STABLEHLO_XFAIL_SET = { "AtenMmQuint8_basic", "AtenRealView128Module_basic", "AtenRealView64Module_basic", - "AtenSubFloatModule_basic", "AtenTopKModule_basic", "AtenTopKSmallestModule_basic", "Aten_EmbeddingBagExample_basic", @@ -878,7 +871,6 @@ FX_IMPORTER_STABLEHLO_XFAIL_SET = { "SortTensor_basic", "SplitDimDynamicModule_basic", "SplitDimStaticModule_basic", - "SqrtIntConstantModule_basic", "SqrtIntModule_basic", "SubFloatModule_basic", "TModuleRank0_basic", diff --git a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py index 0dac7b3d5..12b1f8c76 100644 --- a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py +++ b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py @@ -2580,19 +2580,17 @@ def prims〇split_dim〡dtype(a_rank_dtype: Tuple[int, int], dim: int, outer_len return a_dtype # note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead. -@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.bfloat16, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool})) +@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool})) def aten〇fake_quantize_per_tensor_affine〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> int: self_rank, self_dtype = self_rank_dtype assert is_float_dtype(self_dtype) - assert self_dtype != torch.bfloat16 return self_dtype # note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead. -@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.bfloat16, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool})) +@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool})) def aten〇fake_quantize_per_tensor_affine_cachemask〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> Tuple[int, int]: self_rank, self_dtype = self_rank_dtype assert is_float_dtype(self_dtype) - assert self_dtype != torch.bfloat16 return (self_rank_dtype[1], torch.bool) # note: fake_quantize_per_tensor_affine.tensor_qparams doesn't support "meta" device, use "cpu" instead. diff --git a/pytorch-hash.txt b/pytorch-hash.txt index dd4f3a19a..ad873201d 100644 --- a/pytorch-hash.txt +++ b/pytorch-hash.txt @@ -1 +1 @@ -c787213d413e85c66bdad0d8c9cde1c5ced34b1b +0d5247caf3ffd618d31cf4cf880c47b7dbd323a7 diff --git a/pytorch-requirements.txt b/pytorch-requirements.txt index 960ca904e..c18413eac 100644 --- a/pytorch-requirements.txt +++ b/pytorch-requirements.txt @@ -1,3 +1,3 @@ -f https://download.pytorch.org/whl/nightly/cpu/torch/ --pre -torch==2.6.0.dev20241029 +torch==2.6.0.dev20241107 diff --git a/torchvision-requirements.txt b/torchvision-requirements.txt index 901fbd3d9..8c8d45bea 100644 --- a/torchvision-requirements.txt +++ b/torchvision-requirements.txt @@ -1,3 +1,3 @@ -f https://download.pytorch.org/whl/nightly/cpu/torchvision/ --pre -torchvision==0.20.0.dev20241029 +torchvision==0.20.0.dev20241107