mirror of https://github.com/llvm/torch-mlir
OnnxToTorch lower celu op (#2920)
parent
5ecc1d5c0d
commit
6fa21bd8b1
|
@ -601,6 +601,47 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
|
||||||
binder.op, resultType, operand);
|
binder.op, resultType, operand);
|
||||||
return success();
|
return success();
|
||||||
});
|
});
|
||||||
|
patterns.onOp(
|
||||||
|
"Celu", 12, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||||
|
Torch::ValueTensorType resultType;
|
||||||
|
Value operand;
|
||||||
|
float alpha;
|
||||||
|
if (binder.tensorOperand(operand) ||
|
||||||
|
binder.tensorResultType(resultType) ||
|
||||||
|
binder.f32FloatAttr(alpha, "alpha", 1.0f))
|
||||||
|
return failure();
|
||||||
|
// exp(x/alpha)
|
||||||
|
Value constAlpha = rewriter.create<Torch::ConstantFloatOp>(
|
||||||
|
binder.getLoc(), rewriter.getType<Torch::FloatType>(),
|
||||||
|
rewriter.getF64FloatAttr(alpha));
|
||||||
|
Value xDivAlpha = rewriter.create<Torch::AtenDivScalarOp>(
|
||||||
|
binder.getLoc(), resultType, operand, constAlpha);
|
||||||
|
Value expXDivAlpha = rewriter.create<Torch::AtenExpOp>(
|
||||||
|
binder.getLoc(), resultType, xDivAlpha);
|
||||||
|
// alpha * (exp(x/alpha) - 1)
|
||||||
|
Value constantOne = rewriter.create<Torch::ConstantIntOp>(
|
||||||
|
binder.getLoc(), rewriter.getI64IntegerAttr(1));
|
||||||
|
Value subOne = rewriter.create<Torch::AtenSubScalarOp>(
|
||||||
|
binder.getLoc(), resultType, expXDivAlpha, constantOne,
|
||||||
|
constantOne);
|
||||||
|
Value mulAlpha = rewriter.create<Torch::AtenMulScalarOp>(
|
||||||
|
binder.getLoc(), resultType, subOne, constAlpha);
|
||||||
|
Value constantZero = rewriter.create<Torch::ConstantIntOp>(
|
||||||
|
binder.getLoc(), rewriter.getI64IntegerAttr(0));
|
||||||
|
Value zeroTensor = createRank0Tensor(rewriter, binder.getLoc(),
|
||||||
|
resultType, constantZero);
|
||||||
|
// min(0, alpha * (exp(x/alpha) - 1))
|
||||||
|
Value minExpression = rewriter.create<Torch::AtenMinimumOp>(
|
||||||
|
binder.getLoc(), resultType, zeroTensor, mulAlpha);
|
||||||
|
|
||||||
|
// max(0, x)
|
||||||
|
Value maxExpression = rewriter.create<Torch::AtenMaximumOp>(
|
||||||
|
binder.getLoc(), resultType, zeroTensor, operand);
|
||||||
|
// max(0,x) + min(0, alpha * (exp(x/alpha) - 1))
|
||||||
|
rewriter.replaceOpWithNewOp<Torch::AtenAddTensorOp>(
|
||||||
|
binder.op, resultType, maxExpression, minExpression, constantOne);
|
||||||
|
return success();
|
||||||
|
});
|
||||||
patterns.onOp(
|
patterns.onOp(
|
||||||
"Clip", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
"Clip", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||||
// https://onnx.ai/onnx/operators/onnx__Clip.html
|
// https://onnx.ai/onnx/operators/onnx__Clip.html
|
||||||
|
|
|
@ -1645,3 +1645,23 @@ func.func @test_constant_of_shape_dense_int_cst() -> !torch.vtensor<[2,3,4], si6
|
||||||
%0 = "torch.operator"(%cst) <{name = "onnx.ConstantOfShape"}> {torch.onnx.value = dense<3> : tensor<1xsi64>}: (!torch.vtensor<[3], si64>) -> !torch.vtensor<[2,3,4], si64>
|
%0 = "torch.operator"(%cst) <{name = "onnx.ConstantOfShape"}> {torch.onnx.value = dense<3> : tensor<1xsi64>}: (!torch.vtensor<[3], si64>) -> !torch.vtensor<[2,3,4], si64>
|
||||||
return %0 : !torch.vtensor<[2,3,4], si64>
|
return %0 : !torch.vtensor<[2,3,4], si64>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: func.func @test_celu
|
||||||
|
func.func @test_celu(%arg0: !torch.vtensor<[3,3,3,1],f32>) -> !torch.vtensor<[3,3,3,1],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 12 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||||
|
// CHECK: %[[ALPHA:.*]] = torch.constant.float 2.000000e+00
|
||||||
|
// CHECK: %0 = torch.aten.div.Scalar %arg0, %[[ALPHA]] : !torch.vtensor<[3,3,3,1],f32>, !torch.float -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
// CHECK: %1 = torch.aten.exp %0 : !torch.vtensor<[3,3,3,1],f32> -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
// CHECK: %int1 = torch.constant.int 1
|
||||||
|
// CHECK: %2 = torch.aten.sub.Scalar %1, %int1, %int1 : !torch.vtensor<[3,3,3,1],f32>, !torch.int, !torch.int -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
// CHECK: %3 = torch.aten.mul.Scalar %2, %[[ALPHA]] : !torch.vtensor<[3,3,3,1],f32>, !torch.float -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
// CHECK: %int0 = torch.constant.int 0
|
||||||
|
// CHECK: %4 = torch.prim.ListConstruct : () -> !torch.list<int>
|
||||||
|
// CHECK: %none = torch.constant.none
|
||||||
|
// CHECK: %int6 = torch.constant.int 6
|
||||||
|
// CHECK: %[[ZERO:.*]] = torch.aten.full %4, %int0, %int6, %none, %none, %none : !torch.list<int>, !torch.int, !torch.int, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[],f32>
|
||||||
|
// CHECK: %[[MIN:.*]] = torch.aten.minimum %[[ZERO]], %3 : !torch.vtensor<[],f32>, !torch.vtensor<[3,3,3,1],f32> -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
// CHECK: %[[MAX:.*]] = torch.aten.maximum %[[ZERO]], %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[3,3,3,1],f32> -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
// CHECK: %8 = torch.aten.add.Tensor %[[MAX]], %[[MIN]], %int1 : !torch.vtensor<[3,3,3,1],f32>, !torch.vtensor<[3,3,3,1],f32>, !torch.int -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
%0 = torch.operator "onnx.Celu"(%arg0) {torch.onnx.alpha = 2.000000e+00 : f32} : (!torch.vtensor<[3,3,3,1],f32>) -> !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
return %0 : !torch.vtensor<[3,3,3,1],f32>
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue