[onnx] Fix lowering `onnx.Shrink` to Torch (#3603)

This fixes the result type of the `torch.aten.lt.Scalar` and
`torch.aten.ge.Scalar` ops created during the lowering of `onnx.Shrink`
to Torch.
pull/3607/head
Marius Brehler 2024-08-07 21:25:14 +02:00 committed by GitHub
parent 18139994e8
commit 341f415b1e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 14 additions and 10 deletions

View File

@ -3229,6 +3229,10 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
return rewriter.notifyMatchFailure(
binder.op, "unimplemented: non-floating point dtype");
Torch::ValueTensorType comparisonResultType =
rewriter.getType<Torch::ValueTensorType>(
ArrayRef<int64_t>(inputType.getSizes()), rewriter.getI1Type());
// The formula of this operator is: If x < -lambd, y = x + bias; If x >
// lambd, y = x - bias; Otherwise, y = 0.
// The implementation is based on the following algorithm:
@ -3261,13 +3265,13 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
loc, rewriter.getFloatAttr(rewriter.getF64Type(), -lambd));
Value inputLTNegLambd = rewriter.create<Torch::AtenLtScalarOp>(
loc, inputType, input, constNegLambd);
loc, comparisonResultType, input, constNegLambd);
Value inputPlusBias = rewriter.create<Torch::AtenAddScalarOp>(
loc, inputType, input, constBias, /*alpha=*/constOne);
Value inputSubBias = rewriter.create<Torch::AtenSubScalarOp>(
loc, inputType, input, constBias, /*alpha=*/constOne);
Value inputGTLambd = rewriter.create<Torch::AtenGtScalarOp>(
loc, inputType, input, constLambd);
loc, comparisonResultType, input, constLambd);
Value inputSubBiasOrZero =
rewriter.create<Torch::AtenWhereScalarOtherOp>(

View File

@ -2377,12 +2377,12 @@ func.func @Shrink(%arg0: !torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32> att
// CHECK: %float0.000000e00 = torch.constant.float 0.000000e+00
// CHECK: %float1.000000e00 = torch.constant.float 1.000000e+00
// CHECK: %float-1.500000e00 = torch.constant.float -1.500000e+00
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %1 = torch.aten.add.Scalar %arg0, %float1.500000e00_0, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %2 = torch.aten.sub.Scalar %arg0, %float1.500000e00_0, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: return %5 : !torch.vtensor<[5],f32>
%0 = torch.operator "onnx.Shrink"(%arg0) {torch.onnx.bias = 1.500000e+00 : f32, torch.onnx.lambd = 1.500000e+00 : f32} : (!torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32>
return %0 : !torch.vtensor<[5],f32>
@ -2397,12 +2397,12 @@ func.func @test_shrink_hard(%arg0: !torch.vtensor<[5],f32>) -> !torch.vtensor<[5
// CHECK: %float0.000000e00_0 = torch.constant.float 0.000000e+00
// CHECK: %float1.000000e00 = torch.constant.float 1.000000e+00
// CHECK: %float-1.500000e00 = torch.constant.float -1.500000e+00
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %1 = torch.aten.add.Scalar %arg0, %float0.000000e00, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %2 = torch.aten.sub.Scalar %arg0, %float0.000000e00, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00_0 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00_0 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: return %5 : !torch.vtensor<[5],f32>
%0 = torch.operator "onnx.Shrink"(%arg0) {torch.onnx.lambd = 1.500000e+00 : f32} : (!torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32>
return %0 : !torch.vtensor<[5],f32>