mirror of https://github.com/llvm/torch-mlir
Revert hyperbolic trigonometric decompositions (#3271)
We should be using the `torch` path and handling decomposition in the `math` dialect.pull/3285/head
parent
67d6a665a4
commit
321b844df7
|
@ -300,29 +300,17 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
|
|||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
"Asinh", 9, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
|
||||
// log(x + sqrt(x**2 + 1))
|
||||
Value square = rewriter.create<Torch::AtenSquareOp>(
|
||||
binder.getLoc(), resultType, operand);
|
||||
Value cstOne = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(1));
|
||||
Value add0 = rewriter.create<Torch::AtenAddScalarOp>(
|
||||
binder.getLoc(), resultType, square, cstOne, cstOne);
|
||||
Value sqrt = rewriter.create<Torch::AtenSqrtOp>(binder.getLoc(),
|
||||
resultType, add0);
|
||||
Value add1 = rewriter.create<Torch::AtenAddTensorOp>(
|
||||
binder.getLoc(), resultType, operand, sqrt, cstOne);
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenLogOp>(binder.op, resultType,
|
||||
add1);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("Asinh", 9,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenAsinhOp>(
|
||||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("Atan", 7,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
|
@ -334,33 +322,17 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
|
|||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
"Atanh", 9, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
|
||||
// 1/2 * log((1 + x) / (1 - x))
|
||||
Value cstOne = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(1));
|
||||
Value add = rewriter.create<Torch::AtenAddScalarOp>(
|
||||
binder.getLoc(), resultType, operand, cstOne, cstOne);
|
||||
Value neg = rewriter.create<Torch::AtenNegOp>(binder.getLoc(),
|
||||
resultType, operand);
|
||||
Value sub = rewriter.create<Torch::AtenAddScalarOp>(
|
||||
binder.getLoc(), resultType, neg, cstOne, cstOne);
|
||||
Value div = rewriter.create<Torch::AtenDivTensorOp>(
|
||||
binder.getLoc(), resultType, add, sub);
|
||||
Value log =
|
||||
rewriter.create<Torch::AtenLogOp>(binder.getLoc(), resultType, div);
|
||||
Value cstTwo = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(2));
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenDivScalarOp>(
|
||||
binder.op, resultType, log, cstTwo);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("Atanh", 9,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenAtanhOp>(
|
||||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("Acos", 7,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
|
@ -372,29 +344,17 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
|
|||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
"Acosh", 9, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
|
||||
// log(x + sqrt(x**2 - 1))
|
||||
Value square = rewriter.create<Torch::AtenSquareOp>(
|
||||
binder.getLoc(), resultType, operand);
|
||||
Value cstOne = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(1));
|
||||
Value sub = rewriter.create<Torch::AtenSubScalarOp>(
|
||||
binder.getLoc(), resultType, square, cstOne, cstOne);
|
||||
Value sqrt = rewriter.create<Torch::AtenSqrtOp>(binder.getLoc(),
|
||||
resultType, sub);
|
||||
Value add = rewriter.create<Torch::AtenAddTensorOp>(
|
||||
binder.getLoc(), resultType, operand, sqrt, cstOne);
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenLogOp>(binder.op, resultType,
|
||||
add);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("Acosh", 9,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenAcoshOp>(
|
||||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("BatchNormalization", 15,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
|
@ -1490,31 +1450,17 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
|
|||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
"Cosh", 9, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
|
||||
// 1/2 * (exp(x) + exp(-x))
|
||||
Value x = rewriter.create<Torch::AtenExpOp>(binder.getLoc(), resultType,
|
||||
operand);
|
||||
Value neg = rewriter.create<Torch::AtenNegOp>(binder.getLoc(),
|
||||
resultType, operand);
|
||||
Value y =
|
||||
rewriter.create<Torch::AtenExpOp>(binder.getLoc(), resultType, neg);
|
||||
Value cstOne = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(1));
|
||||
Value z = rewriter.create<Torch::AtenAddTensorOp>(
|
||||
binder.getLoc(), resultType, x, y, cstOne);
|
||||
Value cstTwo = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(2));
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenDivScalarOp>(
|
||||
binder.op, resultType, z, cstTwo);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("Cosh", 9,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenCoshOp>(
|
||||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
"CumSum", 11, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Location loc = binder.getLoc();
|
||||
|
|
|
@ -1439,31 +1439,18 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
|
|||
return success();
|
||||
});
|
||||
|
||||
patterns.onOp(
|
||||
"Sinh", 9, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
patterns.onOp("Sinh", 9,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
|
||||
// 1/2 * (exp(x) – exp(-x))
|
||||
Value x = rewriter.create<Torch::AtenExpOp>(binder.getLoc(), resultType,
|
||||
operand);
|
||||
Value neg = rewriter.create<Torch::AtenNegOp>(binder.getLoc(),
|
||||
resultType, operand);
|
||||
Value y =
|
||||
rewriter.create<Torch::AtenExpOp>(binder.getLoc(), resultType, neg);
|
||||
Value cstOne = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(1));
|
||||
Value z = rewriter.create<Torch::AtenSubTensorOp>(
|
||||
binder.getLoc(), resultType, x, y, cstOne);
|
||||
Value cstTwo = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(2));
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenDivScalarOp>(
|
||||
binder.op, resultType, z, cstTwo);
|
||||
return success();
|
||||
});
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenSinhOp>(
|
||||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
|
||||
// split with fixed-size parts
|
||||
// Arguments:
|
||||
|
|
|
@ -201,14 +201,7 @@ func.func @test_atan(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,
|
|||
|
||||
// CHECK-LABEL: @test_atanh
|
||||
func.func @test_atanh(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[C1:.*]] = torch.constant.int 1
|
||||
// CHECK: %[[ADD:.*]] = torch.aten.add.Scalar %arg0, %[[C1]], %[[C1]] : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[NEG:.*]] = torch.aten.neg %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[SUB:.*]] = torch.aten.add.Scalar %[[NEG]], %[[C1]], %[[C1]] : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[DIV:.*]] = torch.aten.div.Tensor %[[ADD]], %[[SUB]] : !torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[LOG:.*]] = torch.aten.log %[[DIV]] : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[C2:.*]] = torch.constant.int 2
|
||||
// CHECK: torch.aten.div.Scalar %[[LOG]], %[[C2]] : !torch.vtensor<[3,4,5],f32>, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: torch.aten.atanh %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
%0 = torch.operator "onnx.Atanh"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
|
||||
return %0 : !torch.vtensor<[3,4,5],f32>
|
||||
}
|
||||
|
@ -672,13 +665,7 @@ func.func @test_cos(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5
|
|||
|
||||
// CHECK-LABEL: @test_cosh_example
|
||||
func.func @test_cosh_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[X:.+]] = torch.aten.exp %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[NEG:.+]] = torch.aten.neg %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[Y:.+]] = torch.aten.exp %[[NEG]] : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[C1:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Tensor %[[X]], %[[Y]], %[[C1]] : !torch.vtensor<[3],f32>, !torch.vtensor<[3],f32>, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[C2:.+]] = torch.constant.int 2
|
||||
// CHECK: torch.aten.div.Scalar %[[ADD]], %[[C2]] : !torch.vtensor<[3],f32>, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: torch.aten.cosh %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
%0 = torch.operator "onnx.Cosh"(%arg0) : (!torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32>
|
||||
return %0 : !torch.vtensor<[3],f32>
|
||||
}
|
||||
|
@ -687,13 +674,7 @@ func.func @test_cosh_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<[
|
|||
|
||||
// CHECK-LABEL: @test_cosh
|
||||
func.func @test_cosh(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[X:.+]] = torch.aten.exp %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[NEG:.+]] = torch.aten.neg %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[Y:.+]] = torch.aten.exp %[[NEG]] : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[C1:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Tensor %[[X]], %[[Y]], %[[C1]] : !torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],f32>, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[C2:.+]] = torch.constant.int 2
|
||||
// CHECK: torch.aten.div.Scalar %[[ADD]], %[[C2]] : !torch.vtensor<[3,4,5],f32>, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: torch.aten.cosh %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
%0 = torch.operator "onnx.Cosh"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
|
||||
return %0 : !torch.vtensor<[3,4,5],f32>
|
||||
}
|
||||
|
@ -702,12 +683,7 @@ func.func @test_cosh(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,
|
|||
|
||||
// CHECK-LABEL: @test_acosh_example
|
||||
func.func @test_acosh_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[SQUARE:.+]] = torch.aten.square %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[C1:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[SUB:.+]] = torch.aten.sub.Scalar %[[SQUARE]], %[[C1]], %[[C1]] : !torch.vtensor<[3],f32>, !torch.int, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[SQRT:.+]] = torch.aten.sqrt %[[SUB]] : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Tensor %arg0, %[[SQRT]], %[[C1]] : !torch.vtensor<[3],f32>, !torch.vtensor<[3],f32>, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: torch.aten.log %[[ADD]] : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: torch.aten.acosh %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
%0 = torch.operator "onnx.Acosh"(%arg0) : (!torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32>
|
||||
return %0 : !torch.vtensor<[3],f32>
|
||||
}
|
||||
|
@ -716,12 +692,7 @@ func.func @test_acosh_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<
|
|||
|
||||
// CHECK-LABEL: @test_acosh
|
||||
func.func @test_acosh(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[SQUARE:.+]] = torch.aten.square %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[C1:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[SUB:.+]] = torch.aten.sub.Scalar %[[SQUARE]], %[[C1]], %[[C1]] : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[SQRT:.+]] = torch.aten.sqrt %[[SUB]] : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Tensor %arg0, %[[SQRT]], %[[C1]] : !torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],f32>, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: torch.aten.log %[[ADD]] : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: torch.aten.acosh %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
%0 = torch.operator "onnx.Acosh"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
|
||||
return %0 : !torch.vtensor<[3,4,5],f32>
|
||||
}
|
||||
|
@ -748,12 +719,7 @@ func.func @test_asin(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,
|
|||
|
||||
// CHECK-LABEL: @test_asinh_example
|
||||
func.func @test_asinh_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[SQUARE:.+]] = torch.aten.square %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[C1:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Scalar %[[SQUARE]], %[[C1]], %[[C1]] : !torch.vtensor<[3],f32>, !torch.int, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[SQRT:.+]] = torch.aten.sqrt %[[ADD]] : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[ADD_0:.+]] = torch.aten.add.Tensor %arg0, %[[SQRT]], %[[C1]] : !torch.vtensor<[3],f32>, !torch.vtensor<[3],f32>, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: torch.aten.log %[[ADD_0]] : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: torch.aten.asinh %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
%0 = torch.operator "onnx.Asinh"(%arg0) : (!torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32>
|
||||
return %0 : !torch.vtensor<[3],f32>
|
||||
}
|
||||
|
@ -762,12 +728,7 @@ func.func @test_asinh_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<
|
|||
|
||||
// CHECK-LABEL: @test_asinh
|
||||
func.func @test_asinh(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 3 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[SQUARE:.+]] = torch.aten.square %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[C1:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Scalar %[[SQUARE]], %[[C1]], %[[C1]] : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[SQRT:.+]] = torch.aten.sqrt %[[ADD]] : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: %[[ADD_0:.+]] = torch.aten.add.Tensor %arg0, %[[SQRT]], %[[C1]] : !torch.vtensor<[3,4,5],f32>, !torch.vtensor<[3,4,5],f32>, !torch.int -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: torch.aten.log %[[ADD_0]] : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
// CHECK: torch.aten.asinh %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
||||
%0 = torch.operator "onnx.Asinh"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
|
||||
return %0 : !torch.vtensor<[3,4,5],f32>
|
||||
}
|
||||
|
|
|
@ -1341,15 +1341,9 @@ func.func @test_reduce_prod_keepdims_random(%arg0: !torch.vtensor<[3,2,2],f32>,
|
|||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: func.func @test_sinh_example
|
||||
// CHECK-LABEL: func.func @test_sinh
|
||||
func.func @test_sinh_example(%arg0: !torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32> attributes {torch.onnx_meta.ir_version = 4 : si64, torch.onnx_meta.opset_version = 9 : si64} {
|
||||
// CHECK: %[[X:.+]] = torch.aten.exp %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[NEG:.+]] = torch.aten.neg %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[Y:.+]] = torch.aten.exp %[[NEG]] : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[C1:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[SUB:.+]] = torch.aten.sub.Tensor %[[X]], %[[Y]], %[[C1]] : !torch.vtensor<[3],f32>, !torch.vtensor<[3],f32>, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: %[[C2:.+]] = torch.constant.int 2
|
||||
// CHECK: torch.aten.div.Scalar %[[SUB]], %[[C2]] : !torch.vtensor<[3],f32>, !torch.int -> !torch.vtensor<[3],f32>
|
||||
// CHECK: torch.aten.sinh %arg0 : !torch.vtensor<[3],f32> -> !torch.vtensor<[3],f32>
|
||||
%0 = torch.operator "onnx.Sinh"(%arg0) : (!torch.vtensor<[3],f32>) -> !torch.vtensor<[3],f32>
|
||||
return %0 : !torch.vtensor<[3],f32>
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue