Fix error message.

RefineTypes doesn't handle shape refinement anymore.
pull/693/head snapshot-20220407.375
Sean Silva 2022-04-07 21:28:06 +00:00
parent 1d5b5a89e8
commit e7721fb784
2 changed files with 4 additions and 4 deletions

View File

@ -30,8 +30,8 @@ static LogicalResult checkValueInvariants(Operation *errorReportOp, Value v) {
.append("unsupported by backend lowering: tensor with unknown rank " .append("unsupported by backend lowering: tensor with unknown rank "
"or dtype") "or dtype")
.attachNote() .attachNote()
.append("this is likely due to a missing case in RefineTypes or a " .append("this is likely due to a missing shape transfer function in "
"missing shape transfer function in shape_lib_gen.py"); "shape_lib_gen.py");
} }
return success(); return success();
} }

View File

@ -4,7 +4,7 @@
func @unknown_rank(%arg0: !torch.vtensor<[],f32>) { func @unknown_rank(%arg0: !torch.vtensor<[],f32>) {
// expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}} // expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}}
// expected-note@+1 {{this is likely due to a missing case in RefineTypes}} // expected-note@+1 {{this is likely due to a missing shape transfer function in shape_lib_gen.py}}
%0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<*,f32> %0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<*,f32>
return return
} }
@ -13,7 +13,7 @@ func @unknown_rank(%arg0: !torch.vtensor<[],f32>) {
func @unknown_dtype(%arg0: !torch.vtensor<[],f32>) { func @unknown_dtype(%arg0: !torch.vtensor<[],f32>) {
// expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}} // expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}}
// expected-note@+1 {{this is likely due to a missing case in RefineTypes}} // expected-note@+1 {{this is likely due to a missing shape transfer function in shape_lib_gen.py}}
%0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[],unk> %0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[],unk>
return return
} }