[Torch Dialect] Support aten.cuda and add canonicalizer for aten.cuda (#2231)

pull/2163/head snapshot-20230614.869
Yuanqiang Liu 2023-06-14 09:56:39 +08:00 committed by GitHub
parent 0caaf8d32a
commit 7c6961bcbf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 46 additions and 0 deletions

View File

@ -6996,6 +6996,29 @@ def Torch_AtenDetachOp : Torch_Op<"aten.detach", [
let hasFolder = 1; let hasFolder = 1;
} }
def Torch_AtenCudaOp : Torch_Op<"aten.cuda", [
AllowsTypeRefinement,
ReadOnly
]> {
let summary = "Generated op for `aten::cuda : (Tensor) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self
);
let results = (outs
AnyTorchTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenCudaOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 1);
}
void AtenCudaOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 1);
}
}];
let hasCanonicalizer = 1;
}
def Torch_AtenEmbeddingOp : Torch_Op<"aten.embedding", [ def Torch_AtenEmbeddingOp : Torch_Op<"aten.embedding", [
AllowsTypeRefinement, AllowsTypeRefinement,
HasValueSemantics, HasValueSemantics,

View File

@ -2433,6 +2433,20 @@ void PrimDeviceOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
}); });
} }
//===----------------------------------------------------------------------===//
// AtenCudaOp
//===----------------------------------------------------------------------===//
void AtenCudaOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
MLIRContext *context) {
patterns.add(+[](AtenCudaOp op, PatternRewriter &rewriter) {
// Device information isn't relevant to torch-mlir
auto inputTensor = op.getSelf();
rewriter.replaceOp(op, inputTensor);
return success();
});
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// AtenIntTensorOp // AtenIntTensorOp
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -484,6 +484,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit_with_mutating_variants("aten::copy : (Tensor, Tensor, bool) -> (Tensor)") emit_with_mutating_variants("aten::copy : (Tensor, Tensor, bool) -> (Tensor)")
emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)") emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)")
emit("aten::detach : (Tensor) -> (Tensor)", has_folder=True) emit("aten::detach : (Tensor) -> (Tensor)", has_folder=True)
emit("aten::cuda : (Tensor) -> (Tensor)", has_canonicalizer=True)
emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)") emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)")
emit("aten::embedding_bag.padding_idx : (Tensor, Tensor, Tensor, bool, int, bool, Tensor?, bool, int?) -> (Tensor, Tensor, Tensor, Tensor)") emit("aten::embedding_bag.padding_idx : (Tensor, Tensor, Tensor, bool, int, bool, Tensor?, bool, int?) -> (Tensor, Tensor, Tensor, Tensor)")
emit("aten::_embedding_bag : (Tensor, Tensor, Tensor, bool, int, bool, Tensor?, bool, int) -> (Tensor, Tensor, Tensor, Tensor)") emit("aten::_embedding_bag : (Tensor, Tensor, Tensor, bool, int, bool, Tensor?, bool, int) -> (Tensor, Tensor, Tensor, Tensor)")

View File

@ -1993,3 +1993,11 @@ func.func @torch.prims.view_of$fold(%arg0: !torch.vtensor<[3,4,2],f32>) -> !torc
%0 = torch.prims.view_of %arg0 : !torch.vtensor<[3,4,2],f32> -> !torch.vtensor<[3,4,2],f32> %0 = torch.prims.view_of %arg0 : !torch.vtensor<[3,4,2],f32> -> !torch.vtensor<[3,4,2],f32>
return %0 : !torch.vtensor<[3,4,2],f32> return %0 : !torch.vtensor<[3,4,2],f32>
} }
// CHECK-LABEL: func.func @torch.aten.cuda$canonicalize
// CHECK-SAME: %[[ARG:.*]]: !torch.tensor
// CHECK-NEXT: return %[[ARG]] : !torch.tensor
func.func @torch.aten.cuda$canonicalize(%arg0: !torch.tensor) -> !torch.tensor {
%0 = torch.aten.cuda %arg0 : !torch.tensor -> !torch.tensor
return %0 : !torch.tensor
}