mirror of https://github.com/llvm/torch-mlir
[ONNX] Fix onnx.ScatterElements with AtenScatterReduceTwoOp lowering to tm_tensor/linalg_ext dialect (#3754)
- To fix issue onnx.ScatterElements: https://github.com/nod-ai/SHARK-ModelDev/issues/823 - E2E test: https://github.com/nod-ai/SHARK-TestSuite/pull/363pull/3644/merge
parent
53f7532e76
commit
f4840ed886
|
@ -635,18 +635,21 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
|
|||
|
||||
// TODO: Implement max and min cases
|
||||
if (reduction == "mul") {
|
||||
reduction = "multiply";
|
||||
reduction = "prod";
|
||||
} else if (reduction == "max" || reduction == "min") {
|
||||
return rewriter.notifyMatchFailure(
|
||||
binder.op, "max/min reduction unsupported for scatter elements");
|
||||
} else if (reduction == "add") {
|
||||
reduction = "sum";
|
||||
}
|
||||
|
||||
Value cstStrReduction =
|
||||
rewriter.create<Torch::ConstantStrOp>(binder.getLoc(), reduction);
|
||||
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenScatterReduceOp>(
|
||||
Value cstTrue =
|
||||
rewriter.create<Torch::ConstantBoolOp>(binder.getLoc(), true);
|
||||
rewriter.replaceOpWithNewOp<Torch::AtenScatterReduceTwoOp>(
|
||||
binder.op, resultType, data, constAxis, indices, updates,
|
||||
cstStrReduction);
|
||||
cstStrReduction, cstTrue);
|
||||
return success();
|
||||
});
|
||||
patterns.onOp(
|
||||
|
|
|
@ -3084,7 +3084,6 @@ ONNX_XFAIL_SET = {
|
|||
"ScatterReduceIntMaxModuleIncludeSelf",
|
||||
"ScatterReduceIntMinModuleIncludeSelf",
|
||||
"ScatterValueFloatModule_basic",
|
||||
"ScatterAddStaticModule_basic",
|
||||
# Failure - onnx_lowering: onnx.ScatterND
|
||||
"IndexPut1DFloatAccumulateModule_basic",
|
||||
"IndexPut1DIntAccumulateModule_basic",
|
||||
|
|
|
@ -261,15 +261,16 @@ func.func @test_scatter_elements_with_axis(%arg0: !torch.vtensor<[1,5],f32>, %ar
|
|||
|
||||
// CHECK-LABEL: func.func @test_scatter_elements_with_duplicate_indices
|
||||
func.func @test_scatter_elements_with_duplicate_indices(%arg0: !torch.vtensor<[1,5],f32>, %arg1: !torch.vtensor<[1,2],si64>, %arg2: !torch.vtensor<[1,2],f32>) -> !torch.vtensor<[1,5],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 18 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[AXIS:.*]] = torch.constant.int 1
|
||||
// CHECK: %[[ZERO:.+]] = torch.constant.int 0
|
||||
// CHECK: %[[ONE:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[SZ:.+]] = torch.aten.size.int %arg0, %[[AXIS]]
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Scalar %arg1, %[[SZ]], %[[ONE]]
|
||||
// CHECK: %[[CMP:.+]] = torch.aten.lt.Scalar %arg1, %[[ZERO]]
|
||||
// CHECK: %[[WHERE:.+]] = torch.aten.where.self %[[CMP]], %[[ADD]], %arg1
|
||||
// CHECK: %[[STR:.*]] = torch.constant.str "add"
|
||||
// CHECK: torch.aten.scatter.reduce %arg0, %[[AXIS]], %[[WHERE]], %arg2, %str : !torch.vtensor<[1,5],f32>, !torch.int, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],f32>, !torch.str -> !torch.vtensor<[1,5],f32>
|
||||
// CHECK: %[[AXIS:.*]] = torch.constant.int 1
|
||||
// CHECK: %[[ZERO:.*]] = torch.constant.int 0
|
||||
// CHECK: %[[FIVE:.*]] = torch.constant.int 1
|
||||
// CHECK: %[[SZ:.*]] = torch.aten.size.int %arg0, %[[AXIS]] : !torch.vtensor<[1,5],f32>, !torch.int -> !torch.int
|
||||
// CHECK: %[[ADD:.*]] = torch.aten.add.Scalar %arg1, %[[SZ]], %[[FIVE]] : !torch.vtensor<[1,2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,2],si64>
|
||||
// CHECK: %[[CMP:.*]] = torch.aten.lt.Scalar %arg1, %[[ZERO]] : !torch.vtensor<[1,2],si64>, !torch.int -> !torch.vtensor<[1,2],i1>
|
||||
// CHECK: %[[WHERE:.*]] = torch.aten.where.self %[[CMP]], %[[ADD]], %arg1 : !torch.vtensor<[1,2],i1>, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],si64> -> !torch.vtensor<[1,2],si64>
|
||||
// CHECK: %[[STR:.*]] = torch.constant.str "sum"
|
||||
// CHECK: %[[TRUE:.*]] = torch.constant.bool true
|
||||
// CHECK: torch.aten.scatter_reduce.two %arg0, %[[AXIS]], %[[WHERE]], %arg2, %[[STR]], %[[TRUE]] : !torch.vtensor<[1,5],f32>, !torch.int, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],f32>, !torch.str, !torch.bool -> !torch.vtensor<[1,5],f32>
|
||||
%0 = torch.operator "onnx.ScatterElements"(%arg0, %arg1, %arg2) {torch.onnx.axis = 1 : si64, torch.onnx.reduction = "add"} : (!torch.vtensor<[1,5],f32>, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],f32>) -> !torch.vtensor<[1,5],f32>
|
||||
return %0 : !torch.vtensor<[1,5],f32>
|
||||
}
|
||||
|
@ -294,15 +295,16 @@ func.func @test_scatter_elements_without_axis(%arg0: !torch.vtensor<[3,3],f32>,
|
|||
|
||||
// CHECK-LABEL: func.func @test_scatter_elements_with_reduction_mul
|
||||
func.func @test_scatter_elements_with_reduction_mul(%arg0: !torch.vtensor<[1,5],f32>, %arg1: !torch.vtensor<[1,2],si64>, %arg2: !torch.vtensor<[1,2],f32>) -> !torch.vtensor<[1,5],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 18 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
||||
// CHECK: %[[AXIS:.*]] = torch.constant.int 1
|
||||
// CHECK: %[[ZERO:.+]] = torch.constant.int 0
|
||||
// CHECK: %[[ONE:.+]] = torch.constant.int 1
|
||||
// CHECK: %[[SZ:.+]] = torch.aten.size.int %arg0, %[[AXIS]]
|
||||
// CHECK: %[[ADD:.+]] = torch.aten.add.Scalar %arg1, %[[SZ]], %[[ONE]]
|
||||
// CHECK: %[[CMP:.+]] = torch.aten.lt.Scalar %arg1, %[[ZERO]]
|
||||
// CHECK: %[[WHERE:.+]] = torch.aten.where.self %[[CMP]], %[[ADD]], %arg1
|
||||
// CHECK: %[[STR:.*]] = torch.constant.str "multiply"
|
||||
// CHECK: torch.aten.scatter.reduce %arg0, %[[AXIS]], %[[WHERE]], %arg2, %str : !torch.vtensor<[1,5],f32>, !torch.int, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],f32>, !torch.str -> !torch.vtensor<[1,5],f32>
|
||||
// CHECK: %[[AXIS:.*]] = torch.constant.int 1
|
||||
// CHECK: %[[ZERO:.*]] = torch.constant.int 0
|
||||
// CHECK: %[[FIVE:.*]] = torch.constant.int 1
|
||||
// CHECK: %[[SZ:.*]] = torch.aten.size.int %arg0, %[[AXIS]] : !torch.vtensor<[1,5],f32>, !torch.int -> !torch.int
|
||||
// CHECK: %[[ADD:.*]] = torch.aten.add.Scalar %arg1, %[[SZ]], %[[FIVE]] : !torch.vtensor<[1,2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,2],si64>
|
||||
// CHECK: %[[CMP:.*]] = torch.aten.lt.Scalar %arg1, %[[ZERO]] : !torch.vtensor<[1,2],si64>, !torch.int -> !torch.vtensor<[1,2],i1>
|
||||
// CHECK: %[[WHERE:.*]] = torch.aten.where.self %[[CMP]], %[[ADD]], %arg1 : !torch.vtensor<[1,2],i1>, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],si64> -> !torch.vtensor<[1,2],si64>
|
||||
// CHECK: %[[STR:.*]] = torch.constant.str "prod"
|
||||
// CHECK: %[[TRUE:.*]] = torch.constant.bool true
|
||||
// CHECK: torch.aten.scatter_reduce.two %arg0, %[[AXIS]], %[[WHERE]], %arg2, %[[STR]], %[[TRUE]] : !torch.vtensor<[1,5],f32>, !torch.int, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],f32>, !torch.str, !torch.bool -> !torch.vtensor<[1,5],f32>
|
||||
%0 = torch.operator "onnx.ScatterElements"(%arg0, %arg1, %arg2) {torch.onnx.axis = 1 : si64, torch.onnx.reduction = "mul"} : (!torch.vtensor<[1,5],f32>, !torch.vtensor<[1,2],si64>, !torch.vtensor<[1,2],f32>) -> !torch.vtensor<[1,5],f32>
|
||||
return %0 : !torch.vtensor<[1,5],f32>
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue