mirror of https://github.com/llvm/torch-mlir
add basic cumsum. this doesn't support the exclusive and reverse attrs (#2717)
fixes #2711pull/2722/head
parent
690827fe52
commit
1778314620
|
@ -836,6 +836,62 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
|
|||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
"CumSum", 11, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Location loc = binder.getLoc();
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
Value axisTensor;
|
||||
if (binder.tensorOperands(operand, axisTensor) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
|
||||
int64_t exclusive;
|
||||
int64_t reverse;
|
||||
// if bind succeeds and either is set, fail because not implemented
|
||||
if (binder.s64IntegerAttr(exclusive, "exclusive", 0))
|
||||
if (exclusive != 0)
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "unsupported onnx.CumSum conversion: exclusive");
|
||||
if (binder.s64IntegerAttr(reverse, "reverse", 0))
|
||||
if (reverse != 0)
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "unsupported onnx.CumSum conversion: reverse");
|
||||
|
||||
// deal with neg axis: if (axis < 0) axis += rank
|
||||
int64_t rank =
|
||||
cast<Torch::ValueTensorType>(operand.getType()).getSizes().size();
|
||||
Value rankVal = rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getType<Torch::IntType>(),
|
||||
rewriter.getIntegerAttr(rewriter.getIntegerType(64),
|
||||
rank));
|
||||
Value zero = rewriter.create<Torch::ConstantIntOp>(
|
||||
loc, rewriter.getI64IntegerAttr(0));
|
||||
|
||||
Value axisScalar = rewriter.create<Torch::AtenItemOp>(
|
||||
binder.getLoc(), rewriter.getType<Torch::IntType>(), axisTensor);
|
||||
Value isNegative =
|
||||
rewriter.create<Torch::AtenLtIntOp>(binder.getLoc(), axisScalar, zero);
|
||||
isNegative = rewriter.create<Torch::AtenIntBoolOp>(binder.getLoc(),
|
||||
isNegative);
|
||||
Value finalOffset = rewriter.create<Torch::AtenMulIntOp>(
|
||||
binder.getLoc(), isNegative, rankVal);
|
||||
Value dim = rewriter.create<Torch::AtenAddIntOp>(
|
||||
binder.getLoc(), axisScalar, finalOffset);
|
||||
|
||||
Torch::BaseTensorType resultTensorType = resultType.cast<Torch::BaseTensorType>();
|
||||
if (!resultTensorType.hasDtype()) {
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "expected result type to have a dtype");
|
||||
}
|
||||
// resultTensorType.print(llvm::outs());
|
||||
Value resultDType =
|
||||
Torch::getDtypeIntValueForType(rewriter, loc, resultTensorType.getDtype());
|
||||
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenCumsumOp>(
|
||||
binder.op, resultType, operand, dim, resultDType);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp("Div", 14,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
|
|
|
@ -37,3 +37,12 @@ func.func @reduce_mean_operation(%arg0: !torch.vtensor<[1,64,768],f32>)
|
|||
%211 = torch.operator "onnx.ReduceMean"(%arg0) {torch.onnx.axes = [-1 : si64]} : (!torch.vtensor<[1,64,768],f32>) -> !torch.vtensor<[1,64,1],f32>
|
||||
return %211 : !torch.vtensor<[1,64,1],f32>
|
||||
}
|
||||
|
||||
// -----
|
||||
// Fixed.
|
||||
func.func @cumsum_operation(%arg0: !torch.vtensor<[2,3],f64>,
|
||||
%arg1: !torch.vtensor<[],si32>)
|
||||
-> !torch.vtensor<[2,3],f64> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 11 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
%212 = torch.operator "onnx.CumSum"(%arg0, %arg1) : (!torch.vtensor<[2,3],f64>, !torch.vtensor<[],si32>) -> !torch.vtensor<[2,3],f64>
|
||||
return %212 : !torch.vtensor<[2,3],f64>
|
||||
}
|
Loading…
Reference in New Issue