mirror of https://github.com/llvm/torch-mlir
[MLIR][ONNX] Add OnnxToTorch support for AveragePool op. (#2672)
This commit adds the OnnxToTorch support for AveragePool op. Signed-Off By: vivekkhandelwal1424@gmail.compull/2674/head
parent
698ff3a736
commit
8649b84e3f
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include "torch-mlir/Conversion/TorchOnnxToTorch/Patterns.h"
|
||||
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
||||
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
||||
|
||||
using namespace mlir;
|
||||
using namespace mlir::torch;
|
||||
|
@ -164,6 +165,115 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
|
|||
binder.op, resultType, operand);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
"AveragePool", 19,
|
||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
std::string autoPad;
|
||||
SmallVector<int64_t> dilation;
|
||||
if (binder.customOpNameStringAttr(autoPad, "auto_pad", "NOTSET"))
|
||||
return failure();
|
||||
if (autoPad != "NOTSET") {
|
||||
// TODO: Add support for `auto_pad` != "NOTSET"
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "unsupported conversion: auto_pad != NOTSET");
|
||||
}
|
||||
if (binder.s64IntegerArrayAttr(dilation, "dilations", {})) {
|
||||
return failure();
|
||||
}
|
||||
if (dilation.size() > 0) {
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "dilation is not supported by torch.aten.avgpool op");
|
||||
}
|
||||
|
||||
Torch::ValueTensorType resultType;
|
||||
Value operand;
|
||||
bool ceilMode, countIncludePad;
|
||||
if (binder.tensorOperand(operand) ||
|
||||
binder.s64BoolAttr(ceilMode, "ceil_mode", false) ||
|
||||
binder.s64BoolAttr(countIncludePad, "count_include_pad", false) ||
|
||||
binder.tensorResultType(resultType))
|
||||
return failure();
|
||||
// Determine the rank of input tensor.
|
||||
std::optional<unsigned> maybeRank = Torch::getTensorRank(operand);
|
||||
if (!maybeRank)
|
||||
return rewriter.notifyMatchFailure(binder.op,
|
||||
"Unimplemented: unranked tensor");
|
||||
unsigned rank = *maybeRank;
|
||||
|
||||
SmallVector<int64_t> kernel, padding, strides;
|
||||
if (binder.s64IntegerArrayAttr(kernel, "kernel_shape", {})) {
|
||||
return failure();
|
||||
}
|
||||
if (kernel.size() != rank - 2) {
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "kernel list size does not match the number of axes");
|
||||
}
|
||||
if (binder.s64IntegerArrayAttr(padding, "pads", {0})) {
|
||||
return failure();
|
||||
}
|
||||
if (padding.size() != 1 && padding.size() != rank - 2) {
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "padding list size does not match the number of axes");
|
||||
}
|
||||
if (binder.s64IntegerArrayAttr(strides, "strides", {1})) {
|
||||
return failure();
|
||||
}
|
||||
if (strides.size() != 1 && strides.size() != rank - 2) {
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "strides list size does not match the number of axes");
|
||||
}
|
||||
|
||||
SmallVector<Value> cstKernel, cstPadding, cstStrides;
|
||||
for (int64_t i : kernel) {
|
||||
cstKernel.push_back(rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(i)));
|
||||
}
|
||||
for (int64_t i : padding) {
|
||||
cstPadding.push_back(rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(i)));
|
||||
}
|
||||
for (int64_t i : strides) {
|
||||
cstStrides.push_back(rewriter.create<Torch::ConstantIntOp>(
|
||||
binder.getLoc(), rewriter.getI64IntegerAttr(i)));
|
||||
}
|
||||
Value kernelSizeList = rewriter.create<Torch::PrimListConstructOp>(
|
||||
binder.getLoc(),
|
||||
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
|
||||
cstKernel);
|
||||
Value paddingList = rewriter.create<Torch::PrimListConstructOp>(
|
||||
binder.getLoc(),
|
||||
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
|
||||
cstPadding);
|
||||
Value stridesList = rewriter.create<Torch::PrimListConstructOp>(
|
||||
binder.getLoc(),
|
||||
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
|
||||
cstStrides);
|
||||
Value cstCeilMode =
|
||||
rewriter.create<Torch::ConstantBoolOp>(binder.getLoc(), ceilMode);
|
||||
Value cstCountIncludePad = rewriter.create<Torch::ConstantBoolOp>(
|
||||
binder.getLoc(), countIncludePad);
|
||||
Value cstNone = rewriter.create<Torch::ConstantNoneOp>(binder.getLoc());
|
||||
|
||||
if (rank == 3) {
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenAvgPool1dOp>(
|
||||
binder.op, resultType, operand, kernelSizeList, stridesList,
|
||||
paddingList, cstCeilMode, cstCountIncludePad);
|
||||
return success();
|
||||
} else if (rank == 4) {
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenAvgPool2dOp>(
|
||||
binder.op, resultType, operand, kernelSizeList, stridesList,
|
||||
paddingList, cstCeilMode, cstCountIncludePad,
|
||||
/*divisor_override=*/cstNone);
|
||||
return success();
|
||||
} else if (rank == 5) {
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenAvgPool3dOp>(
|
||||
binder.op, resultType, operand, kernelSizeList, stridesList,
|
||||
paddingList, cstCeilMode, cstCountIncludePad,
|
||||
/*divisor_override=*/cstNone);
|
||||
return success();
|
||||
}
|
||||
return failure();
|
||||
});
|
||||
patterns.onOp(
|
||||
"BitShift", 11, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||
Torch::ValueTensorType resultType;
|
||||
|
|
|
@ -434,3 +434,24 @@ func.func @test_floor(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4
|
|||
%0 = torch.operator "onnx.Floor"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
|
||||
return %0 : !torch.vtensor<[3,4,5],f32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_averagepool_1d_default
|
||||
func.func @test_averagepool_1d_default(%arg0: !torch.vtensor<[1,3,32],f32>) -> !torch.vtensor<[1,3,31],f32> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 19 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: torch.aten.avg_pool1d %arg0, %0, %2, %1, %false, %true : !torch.vtensor<[1,3,32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool -> !torch.vtensor<[1,3,31],f32>
|
||||
%0 = torch.operator "onnx.AveragePool"(%arg0) {torch.onnx.kernel_shape = [2 : si64], torch.onnx.count_include_pad = 1 : si64} : (!torch.vtensor<[1,3,32],f32>) -> !torch.vtensor<[1,3,31],f32>
|
||||
return %0 : !torch.vtensor<[1,3,31],f32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_averagepool_2d_ceil
|
||||
func.func @test_averagepool_2d_ceil(%arg0: !torch.vtensor<[1,1,4,4],f32>) -> !torch.vtensor<[1,1,2,2],f32> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 19 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: torch.aten.avg_pool2d %arg0, %0, %2, %1, %true, %false, %none : !torch.vtensor<[1,1,4,4],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,1,2,2],f32>
|
||||
%0 = torch.operator "onnx.AveragePool"(%arg0) {torch.onnx.ceil_mode = 1 : si64, torch.onnx.kernel_shape = [3 : si64, 3 : si64], torch.onnx.strides = [2 : si64, 2 : si64]} : (!torch.vtensor<[1,1,4,4],f32>) -> !torch.vtensor<[1,1,2,2],f32>
|
||||
return %0 : !torch.vtensor<[1,1,2,2],f32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_averagepool_3d_default
|
||||
func.func @test_averagepool_3d_default(%arg0: !torch.vtensor<[1,3,32,32,32],f32>) -> !torch.vtensor<[1,3,31,31,31],f32> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 19 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: torch.aten.avg_pool3d %arg0, %0, %2, %1, %false, %false_2, %none : !torch.vtensor<[1,3,32,32,32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,3,31,31,31],f32>
|
||||
%0 = torch.operator "onnx.AveragePool"(%arg0) {torch.onnx.kernel_shape = [2 : si64, 2 : si64, 2 : si64]} : (!torch.vtensor<[1,3,32,32,32],f32>) -> !torch.vtensor<[1,3,31,31,31],f32>
|
||||
return %0 : !torch.vtensor<[1,3,31,31,31],f32>
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue