From cc06391630dc6d2f189787389873a6310212fba6 Mon Sep 17 00:00:00 2001 From: "Xida Ren (Cedar)" Date: Tue, 6 Feb 2024 16:12:12 -0500 Subject: [PATCH] AtenSortOp Folder (#2864) A chunk off https://github.com/llvm/torch-mlir/pull/2856 https://github.com/llvm/torch-mlir/pull/2860 --------- Co-authored-by: Xida Ren Co-authored-by: Rob Suderman --- .../Dialect/Torch/IR/GeneratedTorchOps.td | 1 + lib/Dialect/Torch/IR/TorchOps.cpp | 46 +++++++++++++++++++ .../build_tools/torch_ods_gen.py | 2 +- test/Dialect/Torch/canonicalize.mlir | 29 ++++++++++++ 4 files changed, 77 insertions(+), 1 deletion(-) diff --git a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td index fad589576..7f0f5af7e 100644 --- a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td +++ b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td @@ -12559,6 +12559,7 @@ def Torch_AtenSortOp : Torch_Op<"aten.sort", [ printDefaultTorchOp(printer, *this, 3, 2); } }]; + let hasFolder = 1; } def Torch_AtenSplitTensorOp : Torch_Op<"aten.split.Tensor", [ diff --git a/lib/Dialect/Torch/IR/TorchOps.cpp b/lib/Dialect/Torch/IR/TorchOps.cpp index c557d2595..1857aff4d 100644 --- a/lib/Dialect/Torch/IR/TorchOps.cpp +++ b/lib/Dialect/Torch/IR/TorchOps.cpp @@ -1710,6 +1710,52 @@ void AtenSortIntOp::getCanonicalizationPatterns(RewritePatternSet &patterns, }); } +//===----------------------------------------------------------------------===// +// AtenSortOp +//===----------------------------------------------------------------------===// + +LogicalResult AtenSortOp::fold(FoldAdaptor adaptor, + SmallVectorImpl &results) { + auto operand = getSelf(); + auto operandType = dyn_cast(operand.getType()); + if (!operandType || !operandType.hasSizes()) + return failure(); + + // only ValueTensorType has toBuiltinTensor + auto indicesTensorType = dyn_cast(getResult(1).getType()); + if (!indicesTensorType) + return failure(); + + if (!indicesTensorType.hasDtype()) + return failure(); + auto indicesType = + indicesTensorType.toBuiltinTensor().clone(indicesTensorType.getDtype()); + if (!indicesType || !indicesType.hasStaticShape()) + return failure(); + + bool unaryDim = false; + IntegerAttr dimAttribute = dyn_cast_if_present(adaptor.getDim()); + if (!dimAttribute) + return failure(); + int64_t dimInt = dimAttribute.getValue().getSExtValue(); + if (dimInt < 0) + dimInt += operandType.getSizes().size(); + if (dimAttribute) { + unaryDim = operandType.getSizes()[dimInt] == 1; + } + + OpBuilder builder(getContext()); + if (unaryDim || llvm::all_of(operandType.getSizes(), + [](int64_t dim) { return dim == 1; })) { + results.push_back(operand); + results.push_back(DenseElementsAttr::get( + indicesType, builder.getZeroAttr(indicesType.getElementType()))); + return success(); + } + + return failure(); +} + //===----------------------------------------------------------------------===// // NonValueTensorLiteralOp //===----------------------------------------------------------------------===// diff --git a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py index cb9c484b7..7893c26db 100644 --- a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py +++ b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py @@ -728,7 +728,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry): emit("aten::ne.int_list : (int[], int[]) -> (bool)") emit("aten::any.bool : (bool[]) -> (bool)", has_folder=True) emit("aten::sort.int : (int[], bool) -> ()", has_canonicalizer=True) - emit("aten::sort : (Tensor, int, bool) -> (Tensor, Tensor)") + emit("aten::sort : (Tensor, int, bool) -> (Tensor, Tensor)", has_folder=True) emit("aten::split.Tensor : (Tensor, int, int) -> (Tensor[])") emit("aten::split_with_sizes : (Tensor, int[], int) -> (Tensor[])") emit("aten::unbind.int : (Tensor, int) -> (Tensor[])") diff --git a/test/Dialect/Torch/canonicalize.mlir b/test/Dialect/Torch/canonicalize.mlir index 83055f3be..77a9e8ad3 100644 --- a/test/Dialect/Torch/canonicalize.mlir +++ b/test/Dialect/Torch/canonicalize.mlir @@ -2012,6 +2012,35 @@ func.func @torch.aten.sort.int$reverse_true() -> !torch.list { return %0 : !torch.list } +// CHECK-LABEL: @torch.aten.sort$unary_element +// CHECK : %[[INDICES:.*]] = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64> +// CHECK-NOT : torch.aten.sort %arg +// CHECK : return %arg0, %[[INDICES]] : !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64> +func.func @torch.aten.sort$unary_element(%arg0 : !torch.vtensor<[1],si64>, %arg1 : !torch.int, %arg2 : !torch.bool) -> (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) { + %0, %1 = torch.aten.sort %arg0, %arg1, %arg2 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64> + return %0, %1 : !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64> +} + + +// CHECK-LABEL: @torch.aten.sort$unary_dim +// CHECK : %[[INDICES:.*]] = torch.vtensor.literal(dense<1> : tensor<1xsi64>) : !torch.vtensor<[1],si64> +// CHECK-NOT : torch.aten.sort %arg +// CHECK : return %arg0, %[[INDICES]] : !torch.vtensor<[3, 1,4],si64>, !torch.vtensor<[1],si64> +func.func @torch.aten.sort$unary_dim(%arg0 : !torch.vtensor<[3, 1, 4],si64>, %arg1 : !torch.bool) -> (!torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[1],si64>) { + %dim = torch.constant.int 1 + %0, %1 = torch.aten.sort %arg0, %dim, %arg1 : !torch.vtensor<[3, 1, 4],si64>, !torch.int, !torch.bool -> !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[1],si64> + return %0, %1 : !torch.vtensor<[3, 1,4],si64>, !torch.vtensor<[1],si64> +} + +// CHECK-LABEL: @torch.aten.sort$nofold +// CHECK : torch.aten.sort %arg +func.func @torch.aten.sort$nofold (%arg0 : !torch.vtensor<[3, 1, 4],si64>, %arg1 : !torch.bool) -> (!torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64>) { + %dim = torch.constant.int 0 + %0, %1 = torch.aten.sort %arg0, %dim, %arg1 : !torch.vtensor<[3, 1, 4],si64>, !torch.int, !torch.bool -> !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64> + return %0, %1 : !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64> +} + + // CHECK-LABEL: @torch.aten.cat$fold_single_operand // CHECK-SAME: %[[ARG0:.+]]: !torch.tensor // CHECK: return %[[ARG0]] : !torch.tensor