diff --git a/lib/Dialect/TorchConversion/Transforms/VerifyInvariantsBeforeBackendLowering.cpp b/lib/Dialect/TorchConversion/Transforms/VerifyInvariantsBeforeBackendLowering.cpp index aba7115ab..5e0406ee5 100644 --- a/lib/Dialect/TorchConversion/Transforms/VerifyInvariantsBeforeBackendLowering.cpp +++ b/lib/Dialect/TorchConversion/Transforms/VerifyInvariantsBeforeBackendLowering.cpp @@ -30,8 +30,8 @@ static LogicalResult checkValueInvariants(Operation *errorReportOp, Value v) { .append("unsupported by backend lowering: tensor with unknown rank " "or dtype") .attachNote() - .append("this is likely due to a missing case in RefineTypes or a " - "missing shape transfer function in shape_lib_gen.py"); + .append("this is likely due to a missing shape transfer function in " + "shape_lib_gen.py"); } return success(); } diff --git a/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir b/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir index 740e875c4..3b9a8a244 100644 --- a/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir +++ b/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir @@ -4,7 +4,7 @@ func @unknown_rank(%arg0: !torch.vtensor<[],f32>) { // expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}} - // expected-note@+1 {{this is likely due to a missing case in RefineTypes}} + // expected-note@+1 {{this is likely due to a missing shape transfer function in shape_lib_gen.py}} %0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<*,f32> return } @@ -13,7 +13,7 @@ func @unknown_rank(%arg0: !torch.vtensor<[],f32>) { func @unknown_dtype(%arg0: !torch.vtensor<[],f32>) { // expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}} - // expected-note@+1 {{this is likely due to a missing case in RefineTypes}} + // expected-note@+1 {{this is likely due to a missing shape transfer function in shape_lib_gen.py}} %0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[],unk> return }