mirror of https://github.com/llvm/torch-mlir
[torch] Add `torch.aten.view.dtype` to op list (#3664)
Support dtype conversion between types. This is useful for bitcasting buffers between differing bit depths.byteir
parent
3e93cad09c
commit
52f54505ce
|
@ -8302,6 +8302,54 @@ def Torch_Aten__Or__TensorOp : Torch_Op<"aten.__or__.Tensor", [
|
||||||
let hasCanonicalizer = 1;
|
let hasCanonicalizer = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def Torch_Aten__Lshift__ScalarOp : Torch_Op<"aten.__lshift__.Scalar", [
|
||||||
|
AllowsTypeRefinement,
|
||||||
|
HasValueSemantics,
|
||||||
|
ReadOnly
|
||||||
|
]> {
|
||||||
|
let summary = "Generated op for `aten::__lshift__.Scalar : (Tensor, Scalar) -> (Tensor)`";
|
||||||
|
let arguments = (ins
|
||||||
|
AnyTorchTensorType:$self,
|
||||||
|
AnyTorchScalarType:$other
|
||||||
|
);
|
||||||
|
let results = (outs
|
||||||
|
AnyTorchOptionalTensorType:$result
|
||||||
|
);
|
||||||
|
let hasCustomAssemblyFormat = 1;
|
||||||
|
let extraClassDefinition = [{
|
||||||
|
ParseResult Aten__Lshift__ScalarOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||||
|
return parseDefaultTorchOp(parser, result, 2, 1);
|
||||||
|
}
|
||||||
|
void Aten__Lshift__ScalarOp::print(OpAsmPrinter &printer) {
|
||||||
|
printDefaultTorchOp(printer, *this, 2, 1);
|
||||||
|
}
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
|
||||||
|
def Torch_Aten__Rshift__ScalarOp : Torch_Op<"aten.__rshift__.Scalar", [
|
||||||
|
AllowsTypeRefinement,
|
||||||
|
HasValueSemantics,
|
||||||
|
ReadOnly
|
||||||
|
]> {
|
||||||
|
let summary = "Generated op for `aten::__rshift__.Scalar : (Tensor, Scalar) -> (Tensor)`";
|
||||||
|
let arguments = (ins
|
||||||
|
AnyTorchTensorType:$self,
|
||||||
|
AnyTorchScalarType:$other
|
||||||
|
);
|
||||||
|
let results = (outs
|
||||||
|
AnyTorchOptionalTensorType:$result
|
||||||
|
);
|
||||||
|
let hasCustomAssemblyFormat = 1;
|
||||||
|
let extraClassDefinition = [{
|
||||||
|
ParseResult Aten__Rshift__ScalarOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||||
|
return parseDefaultTorchOp(parser, result, 2, 1);
|
||||||
|
}
|
||||||
|
void Aten__Rshift__ScalarOp::print(OpAsmPrinter &printer) {
|
||||||
|
printDefaultTorchOp(printer, *this, 2, 1);
|
||||||
|
}
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
|
||||||
def Torch_Aten_SoftmaxOp : Torch_Op<"aten._softmax", [
|
def Torch_Aten_SoftmaxOp : Torch_Op<"aten._softmax", [
|
||||||
AllowsTypeRefinement,
|
AllowsTypeRefinement,
|
||||||
HasValueSemantics,
|
HasValueSemantics,
|
||||||
|
@ -12014,6 +12062,29 @@ def Torch_AtenViewOp : Torch_Op<"aten.view", [
|
||||||
let hasFolder = 1;
|
let hasFolder = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def Torch_AtenViewDtypeOp : Torch_Op<"aten.view.dtype", [
|
||||||
|
AllowsTypeRefinement,
|
||||||
|
ReadOnly
|
||||||
|
]> {
|
||||||
|
let summary = "Generated op for `aten::view.dtype : (Tensor, int) -> (Tensor)`";
|
||||||
|
let arguments = (ins
|
||||||
|
AnyTorchTensorType:$self,
|
||||||
|
Torch_IntType:$dtype
|
||||||
|
);
|
||||||
|
let results = (outs
|
||||||
|
AnyTorchOptionalTensorType:$result
|
||||||
|
);
|
||||||
|
let hasCustomAssemblyFormat = 1;
|
||||||
|
let extraClassDefinition = [{
|
||||||
|
ParseResult AtenViewDtypeOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||||
|
return parseDefaultTorchOp(parser, result, 2, 1);
|
||||||
|
}
|
||||||
|
void AtenViewDtypeOp::print(OpAsmPrinter &printer) {
|
||||||
|
printDefaultTorchOp(printer, *this, 2, 1);
|
||||||
|
}
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
|
||||||
def Torch_Aten_UnsafeViewOp : Torch_Op<"aten._unsafe_view", [
|
def Torch_Aten_UnsafeViewOp : Torch_Op<"aten._unsafe_view", [
|
||||||
AllowsTypeRefinement,
|
AllowsTypeRefinement,
|
||||||
HasValueSemantics,
|
HasValueSemantics,
|
||||||
|
|
|
@ -845,6 +845,22 @@ static Value createLinalgPayloadCalculationForElementwiseOp(
|
||||||
return b.create<arith::SubIOp>(loc, lhs, scaled);
|
return b.create<arith::SubIOp>(loc, lhs, scaled);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (auto lshiftScalar = dyn_cast<Aten__Lshift__ScalarOp>(op)) {
|
||||||
|
Type dtype =
|
||||||
|
cast<RankedTensorType>(converter->convertType(lshiftScalar.getType()))
|
||||||
|
.getElementType();
|
||||||
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
||||||
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
|
||||||
|
return b.create<arith::ShLIOp>(loc, self, other);
|
||||||
|
}
|
||||||
|
if (auto rshiftScalar = dyn_cast<Aten__Rshift__ScalarOp>(op)) {
|
||||||
|
Type dtype =
|
||||||
|
cast<RankedTensorType>(converter->convertType(rshiftScalar.getType()))
|
||||||
|
.getElementType();
|
||||||
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
||||||
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
|
||||||
|
return b.create<arith::ShRUIOp>(loc, self, other);
|
||||||
|
}
|
||||||
if (auto subScalar = dyn_cast<AtenSubScalarOp>(op)) {
|
if (auto subScalar = dyn_cast<AtenSubScalarOp>(op)) {
|
||||||
Type dtype =
|
Type dtype =
|
||||||
cast<RankedTensorType>(converter->convertType(subScalar.getType()))
|
cast<RankedTensorType>(converter->convertType(subScalar.getType()))
|
||||||
|
|
|
@ -691,6 +691,8 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
||||||
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
|
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
|
||||||
emit("aten::__and__.Scalar : (Tensor, Scalar) -> (Tensor)", has_canonicalizer=True)
|
emit("aten::__and__.Scalar : (Tensor, Scalar) -> (Tensor)", has_canonicalizer=True)
|
||||||
emit("aten::__or__.Tensor : (Tensor, Tensor) -> (Tensor)", has_canonicalizer=True)
|
emit("aten::__or__.Tensor : (Tensor, Tensor) -> (Tensor)", has_canonicalizer=True)
|
||||||
|
emit("aten::__lshift__.Scalar : (Tensor, Scalar) -> (Tensor)")
|
||||||
|
emit("aten::__rshift__.Scalar : (Tensor, Scalar) -> (Tensor)")
|
||||||
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
|
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
|
||||||
emit("aten::mean : (Tensor, int?) -> (Tensor)")
|
emit("aten::mean : (Tensor, int?) -> (Tensor)")
|
||||||
emit("aten::std : (Tensor, bool) -> (Tensor)")
|
emit("aten::std : (Tensor, bool) -> (Tensor)")
|
||||||
|
@ -886,6 +888,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
||||||
emit("aten::_cast_Long : (Tensor, bool) -> (Tensor)", has_canonicalizer=True)
|
emit("aten::_cast_Long : (Tensor, bool) -> (Tensor)", has_canonicalizer=True)
|
||||||
emit("aten::type_as : (Tensor, Tensor) -> (Tensor)")
|
emit("aten::type_as : (Tensor, Tensor) -> (Tensor)")
|
||||||
emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True)
|
emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True)
|
||||||
|
emit("aten::view.dtype : (Tensor, int) -> (Tensor)")
|
||||||
emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)")
|
emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)")
|
||||||
emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)", has_folder=True)
|
emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)", has_folder=True)
|
||||||
emit(
|
emit(
|
||||||
|
|
Loading…
Reference in New Issue