mirror of https://github.com/llvm/torch-mlir
Add aten.slice.Tensor & aten.cat folders (#1691)
parent
731c313231
commit
b1f6832849
|
@ -7194,6 +7194,7 @@ def Torch_AtenSliceTensorOp : Torch_Op<"aten.slice.Tensor", [
|
|||
printDefaultTorchOp(printer, *this, 5, 1);
|
||||
}
|
||||
}];
|
||||
let hasFolder = 1;
|
||||
}
|
||||
|
||||
def Torch_AtenLenTensorOp : Torch_Op<"aten.len.Tensor", [
|
||||
|
@ -8378,6 +8379,7 @@ def Torch_AtenCatOp : Torch_Op<"aten.cat", [
|
|||
printDefaultTorchOp(printer, *this, 2, 1);
|
||||
}
|
||||
}];
|
||||
let hasFolder = 1;
|
||||
}
|
||||
|
||||
def Torch_AtenAppendTOp : Torch_Op<"aten.append.t", [
|
||||
|
|
|
@ -2084,6 +2084,36 @@ OpFoldResult AtenSubIntOp::fold(ArrayRef<Attribute> operands) {
|
|||
operands, [](int64_t a, int64_t b) { return a - b; });
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AtenCatOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
OpFoldResult AtenCatOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
|
||||
auto list = getOperand(0).getDefiningOp<PrimListConstructOp>();
|
||||
if (!list || !list->hasOneUse() || list.getElements().size() != 1)
|
||||
return nullptr;
|
||||
return list.getElements()[0];
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AtenSliceTensorOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
OpFoldResult AtenSliceTensorOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
|
||||
auto inType = getOperand(0).getType().dyn_cast<ValueTensorType>();
|
||||
auto outType = getResult().getType().dyn_cast<ValueTensorType>();
|
||||
if (!inType || !outType || !inType.hasSizes() || !outType.hasSizes())
|
||||
return nullptr;
|
||||
if (inType.getSizes().size() != outType.getSizes().size() ||
|
||||
!inType.areAllSizesKnown() || !outType.areAllSizesKnown())
|
||||
return nullptr;
|
||||
for (size_t i = 0; i < inType.getSizes().size(); ++i) {
|
||||
if (inType.getSizes()[i] != outType.getSizes()[i])
|
||||
return nullptr;
|
||||
}
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AtenMulIntOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
|
|
@ -494,7 +494,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
|||
emit("aten::where.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
|
||||
emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)")
|
||||
emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)")
|
||||
emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)")
|
||||
emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)", has_folder=True)
|
||||
emit("aten::len.Tensor : (Tensor) -> (int)")
|
||||
emit("aten::cpu : (Tensor) -> (Tensor)")
|
||||
emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)")
|
||||
|
@ -547,7 +547,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
|||
emit("aten::Delete.Dict_str : (Dict(str, t), str) -> ()")
|
||||
|
||||
# List ops.
|
||||
emit("aten::cat : (Tensor[], int) -> (Tensor)")
|
||||
emit("aten::cat : (Tensor[], int) -> (Tensor)", has_folder=True)
|
||||
emit("aten::append.t : (t[], t) -> (t[])")
|
||||
emit("aten::add.t : (t[], t[]) -> (t[])", has_canonicalizer=True)
|
||||
emit("aten::eq.int_list : (int[], int[]) -> (bool)", has_folder=True)
|
||||
|
|
|
@ -1770,3 +1770,24 @@ func.func @torch.aten.sort.int$reverse_true() -> !torch.list<int> {
|
|||
torch.aten.sort.int %0, %true : !torch.list<int>, !torch.bool
|
||||
return %0 : !torch.list<int>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @torch.aten.cat$fold_single_operand
|
||||
// CHECK-SAME: %[[ARG0:.+]]: !torch.tensor
|
||||
// CHECK: return %[[ARG0]] : !torch.tensor
|
||||
func.func @torch.aten.cat$fold_single_operand(%arg0: !torch.tensor) -> !torch.tensor {
|
||||
%int1 = torch.constant.int 1
|
||||
%0 = torch.prim.ListConstruct %arg0 : (!torch.tensor) -> !torch.list<tensor>
|
||||
%1 = torch.aten.cat %0, %int1 : !torch.list<tensor>, !torch.int -> !torch.tensor
|
||||
return %1: !torch.tensor
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @torch.aten.slice.tensor$fold_full_domain_slice
|
||||
// CHECK-SAME: %[[ARG0:.+]]: !torch.vtensor<[4],f32>
|
||||
// CHECK: return %[[ARG0]] : !torch.vtensor<[4],f32>
|
||||
func.func @torch.aten.slice.tensor$fold_full_domain_slice(%arg0: !torch.vtensor<[4],f32>) -> !torch.vtensor<[4],f32> {
|
||||
%int1 = torch.constant.int 1
|
||||
%int-1 = torch.constant.int -1
|
||||
%int0 = torch.constant.int 0
|
||||
%0 = torch.aten.slice.Tensor %arg0, %int0, %int0, %int-1, %int1 : !torch.vtensor<[4], f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[4], f32>
|
||||
return %0 : !torch.vtensor<[4],f32>
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue