[torch] Add `torch.aten.view.dtype` to op list (#3664)

Support dtype conversion between types. This is useful for bitcasting
buffers between differing bit depths.
pull/3665/head
Rob Suderman 2024-08-23 19:02:53 -07:00 committed by GitHub
parent 9a6fe58a02
commit 9a4c8c606c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 90 additions and 0 deletions

View File

@ -8273,6 +8273,54 @@ def Torch_Aten__Or__TensorOp : Torch_Op<"aten.__or__.Tensor", [
let hasCanonicalizer = 1;
}
def Torch_Aten__Lshift__ScalarOp : Torch_Op<"aten.__lshift__.Scalar", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::__lshift__.Scalar : (Tensor, Scalar) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchScalarType:$other
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult Aten__Lshift__ScalarOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void Aten__Lshift__ScalarOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
}
def Torch_Aten__Rshift__ScalarOp : Torch_Op<"aten.__rshift__.Scalar", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::__rshift__.Scalar : (Tensor, Scalar) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchScalarType:$other
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult Aten__Rshift__ScalarOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void Aten__Rshift__ScalarOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
}
def Torch_Aten_SoftmaxOp : Torch_Op<"aten._softmax", [
AllowsTypeRefinement,
HasValueSemantics,
@ -11958,6 +12006,29 @@ def Torch_AtenViewOp : Torch_Op<"aten.view", [
let hasFolder = 1;
}
def Torch_AtenViewDtypeOp : Torch_Op<"aten.view.dtype", [
AllowsTypeRefinement,
ReadOnly
]> {
let summary = "Generated op for `aten::view.dtype : (Tensor, int) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
Torch_IntType:$dtype
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenViewDtypeOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void AtenViewDtypeOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
}
def Torch_Aten_UnsafeViewOp : Torch_Op<"aten._unsafe_view", [
AllowsTypeRefinement,
HasValueSemantics,

View File

@ -845,6 +845,22 @@ static Value createLinalgPayloadCalculationForElementwiseOp(
return b.create<arith::SubIOp>(loc, lhs, scaled);
}
}
if (auto lshiftScalar = dyn_cast<Aten__Lshift__ScalarOp>(op)) {
Type dtype =
cast<RankedTensorType>(converter->convertType(lshiftScalar.getType()))
.getElementType();
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
return b.create<arith::ShLIOp>(loc, self, other);
}
if (auto rshiftScalar = dyn_cast<Aten__Rshift__ScalarOp>(op)) {
Type dtype =
cast<RankedTensorType>(converter->convertType(rshiftScalar.getType()))
.getElementType();
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
return b.create<arith::ShRUIOp>(loc, self, other);
}
if (auto subScalar = dyn_cast<AtenSubScalarOp>(op)) {
Type dtype =
cast<RankedTensorType>(converter->convertType(subScalar.getType()))

View File

@ -688,6 +688,8 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
emit("aten::__and__.Scalar : (Tensor, Scalar) -> (Tensor)", has_canonicalizer=True)
emit("aten::__or__.Tensor : (Tensor, Tensor) -> (Tensor)", has_canonicalizer=True)
emit("aten::__lshift__.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::__rshift__.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
emit("aten::mean : (Tensor, int?) -> (Tensor)")
emit("aten::std : (Tensor, bool) -> (Tensor)")
@ -880,6 +882,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
emit("aten::_cast_Long : (Tensor, bool) -> (Tensor)", has_canonicalizer=True)
emit("aten::type_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True)
emit("aten::view.dtype : (Tensor, int) -> (Tensor)")
emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)")
emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)", has_folder=True)
emit(