AtenSortOp Folder (#2864)

A chunk off

https://github.com/llvm/torch-mlir/pull/2856
https://github.com/llvm/torch-mlir/pull/2860

---------

Co-authored-by: Xida Ren <xida.ren.dev@gmail.com>
Co-authored-by: Rob Suderman <rob.suderman@gmail.com>
pull/2882/head
Xida Ren (Cedar) 2024-02-06 16:12:12 -05:00 committed by GitHub
parent faf7d4aaa5
commit cc06391630
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 77 additions and 1 deletions

View File

@ -12559,6 +12559,7 @@ def Torch_AtenSortOp : Torch_Op<"aten.sort", [
printDefaultTorchOp(printer, *this, 3, 2);
}
}];
let hasFolder = 1;
}
def Torch_AtenSplitTensorOp : Torch_Op<"aten.split.Tensor", [

View File

@ -1710,6 +1710,52 @@ void AtenSortIntOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
});
}
//===----------------------------------------------------------------------===//
// AtenSortOp
//===----------------------------------------------------------------------===//
LogicalResult AtenSortOp::fold(FoldAdaptor adaptor,
SmallVectorImpl<OpFoldResult> &results) {
auto operand = getSelf();
auto operandType = dyn_cast<BaseTensorType>(operand.getType());
if (!operandType || !operandType.hasSizes())
return failure();
// only ValueTensorType has toBuiltinTensor
auto indicesTensorType = dyn_cast<ValueTensorType>(getResult(1).getType());
if (!indicesTensorType)
return failure();
if (!indicesTensorType.hasDtype())
return failure();
auto indicesType =
indicesTensorType.toBuiltinTensor().clone(indicesTensorType.getDtype());
if (!indicesType || !indicesType.hasStaticShape())
return failure();
bool unaryDim = false;
IntegerAttr dimAttribute = dyn_cast_if_present<IntegerAttr>(adaptor.getDim());
if (!dimAttribute)
return failure();
int64_t dimInt = dimAttribute.getValue().getSExtValue();
if (dimInt < 0)
dimInt += operandType.getSizes().size();
if (dimAttribute) {
unaryDim = operandType.getSizes()[dimInt] == 1;
}
OpBuilder builder(getContext());
if (unaryDim || llvm::all_of(operandType.getSizes(),
[](int64_t dim) { return dim == 1; })) {
results.push_back(operand);
results.push_back(DenseElementsAttr::get(
indicesType, builder.getZeroAttr(indicesType.getElementType())));
return success();
}
return failure();
}
//===----------------------------------------------------------------------===//
// NonValueTensorLiteralOp
//===----------------------------------------------------------------------===//

View File

@ -728,7 +728,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit("aten::ne.int_list : (int[], int[]) -> (bool)")
emit("aten::any.bool : (bool[]) -> (bool)", has_folder=True)
emit("aten::sort.int : (int[], bool) -> ()", has_canonicalizer=True)
emit("aten::sort : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::sort : (Tensor, int, bool) -> (Tensor, Tensor)", has_folder=True)
emit("aten::split.Tensor : (Tensor, int, int) -> (Tensor[])")
emit("aten::split_with_sizes : (Tensor, int[], int) -> (Tensor[])")
emit("aten::unbind.int : (Tensor, int) -> (Tensor[])")

View File

@ -2012,6 +2012,35 @@ func.func @torch.aten.sort.int$reverse_true() -> !torch.list<int> {
return %0 : !torch.list<int>
}
// CHECK-LABEL: @torch.aten.sort$unary_element
// CHECK : %[[INDICES:.*]] = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
// CHECK-NOT : torch.aten.sort %arg
// CHECK : return %arg0, %[[INDICES]] : !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
func.func @torch.aten.sort$unary_element(%arg0 : !torch.vtensor<[1],si64>, %arg1 : !torch.int, %arg2 : !torch.bool) -> (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) {
%0, %1 = torch.aten.sort %arg0, %arg1, %arg2 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
return %0, %1 : !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
}
// CHECK-LABEL: @torch.aten.sort$unary_dim
// CHECK : %[[INDICES:.*]] = torch.vtensor.literal(dense<1> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
// CHECK-NOT : torch.aten.sort %arg
// CHECK : return %arg0, %[[INDICES]] : !torch.vtensor<[3, 1,4],si64>, !torch.vtensor<[1],si64>
func.func @torch.aten.sort$unary_dim(%arg0 : !torch.vtensor<[3, 1, 4],si64>, %arg1 : !torch.bool) -> (!torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[1],si64>) {
%dim = torch.constant.int 1
%0, %1 = torch.aten.sort %arg0, %dim, %arg1 : !torch.vtensor<[3, 1, 4],si64>, !torch.int, !torch.bool -> !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[1],si64>
return %0, %1 : !torch.vtensor<[3, 1,4],si64>, !torch.vtensor<[1],si64>
}
// CHECK-LABEL: @torch.aten.sort$nofold
// CHECK : torch.aten.sort %arg
func.func @torch.aten.sort$nofold (%arg0 : !torch.vtensor<[3, 1, 4],si64>, %arg1 : !torch.bool) -> (!torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64>) {
%dim = torch.constant.int 0
%0, %1 = torch.aten.sort %arg0, %dim, %arg1 : !torch.vtensor<[3, 1, 4],si64>, !torch.int, !torch.bool -> !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64>
return %0, %1 : !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64>
}
// CHECK-LABEL: @torch.aten.cat$fold_single_operand
// CHECK-SAME: %[[ARG0:.+]]: !torch.tensor
// CHECK: return %[[ARG0]] : !torch.tensor