mirror of https://github.com/llvm/torch-mlir
Add Add and Sub scalar op conversions.
`aten.add.Scalar` and `aten.sub.Scalar` op conversions have been added. The changes have been made as a part of `-convert-torch-to-linalg` pass.pull/500/head snapshot-20211222.160
parent
3cb46cecef
commit
9e1ecf2c0b
|
@ -1000,3 +1000,69 @@ def ElementwiseAndIntegerModule_basic(module, tu: TestUtils):
|
||||||
torch.randint(-10, 10, (3, 4)))
|
torch.randint(-10, 10, (3, 4)))
|
||||||
|
|
||||||
|
|
||||||
|
class ElementwiseSubScalarIntModule(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
@export
|
||||||
|
@annotate_args([
|
||||||
|
None,
|
||||||
|
([-1, -1], torch.int64, True),
|
||||||
|
])
|
||||||
|
def forward(self, x):
|
||||||
|
return torch.sub(x, 2.1, alpha = 2)
|
||||||
|
|
||||||
|
@register_test_case(module_factory=lambda: ElementwiseSubScalarIntModule())
|
||||||
|
def ElementwiseSubScalarIntModule_basic(module, tu: TestUtils):
|
||||||
|
module.forward(torch.randint(10, (3, 4)))
|
||||||
|
|
||||||
|
|
||||||
|
class ElementwiseSubScalarFloatModule(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
@export
|
||||||
|
@annotate_args([
|
||||||
|
None,
|
||||||
|
([-1, -1], torch.float32, True),
|
||||||
|
])
|
||||||
|
def forward(self, x):
|
||||||
|
return torch.sub(x, 2.1)
|
||||||
|
|
||||||
|
@register_test_case(module_factory=lambda: ElementwiseSubScalarFloatModule())
|
||||||
|
def ElementwiseSubScalarFloatModule_basic(module, tu: TestUtils):
|
||||||
|
module.forward(tu.rand(3, 4))
|
||||||
|
|
||||||
|
|
||||||
|
class ElementwiseAddScalarIntModule(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
@export
|
||||||
|
@annotate_args([
|
||||||
|
None,
|
||||||
|
([-1, -1], torch.int64, True),
|
||||||
|
])
|
||||||
|
def forward(self, x):
|
||||||
|
return torch.add(x, 3.0)
|
||||||
|
|
||||||
|
@register_test_case(module_factory=lambda: ElementwiseAddScalarIntModule())
|
||||||
|
def ElementwiseAddScalarIntModule_basic(module, tu: TestUtils):
|
||||||
|
module.forward(torch.randint(10, (3, 4)))
|
||||||
|
|
||||||
|
|
||||||
|
class ElementwiseAddScalarFloatModule(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
@export
|
||||||
|
@annotate_args([
|
||||||
|
None,
|
||||||
|
([-1, -1], torch.float32, True),
|
||||||
|
])
|
||||||
|
def forward(self, x):
|
||||||
|
return torch.add(x, 3.0, alpha = 2)
|
||||||
|
|
||||||
|
@register_test_case(module_factory=lambda: ElementwiseAddScalarFloatModule())
|
||||||
|
def ElementwiseAddScalarFloatModule_basic(module, tu: TestUtils):
|
||||||
|
module.forward(tu.rand(3, 4))
|
||||||
|
|
|
@ -1676,6 +1676,42 @@ static Value createLinalgPayloadCalculationForElementwiseOp(
|
||||||
return b.create<arith::SubIOp>(loc, lhs, scaled);
|
return b.create<arith::SubIOp>(loc, lhs, scaled);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (auto subScalar = dyn_cast<AtenSubScalarOp>(op)) {
|
||||||
|
Type dtype = converter->convertType(subScalar.getType())
|
||||||
|
.cast<RankedTensorType>()
|
||||||
|
.getElementType();
|
||||||
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
||||||
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
|
||||||
|
Value alpha = convertScalarToDtype(b, loc, operands[2], dtype);
|
||||||
|
if (dtype.isa<mlir::FloatType>()) {
|
||||||
|
Value mult = b.create<arith::MulFOp>(loc, other, alpha);
|
||||||
|
return b.create<arith::SubFOp>(loc, self, mult);
|
||||||
|
} else if (dtype.isa<mlir::IntegerType>()) {
|
||||||
|
Value mult = b.create<arith::MulIOp>(loc, other, alpha);
|
||||||
|
return b.create<arith::SubIOp>(loc, self, mult);
|
||||||
|
}
|
||||||
|
subScalar.emitError("unimplemented: dtype other than float and integer "
|
||||||
|
"types are not supported.");
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
if (auto addScalar = dyn_cast<AtenAddScalarOp>(op)) {
|
||||||
|
Type dtype = converter->convertType(addScalar.getType())
|
||||||
|
.cast<RankedTensorType>()
|
||||||
|
.getElementType();
|
||||||
|
Value self = convertScalarToDtype(b, loc, payloadArgs[0], dtype);
|
||||||
|
Value other = convertScalarToDtype(b, loc, operands[1], dtype);
|
||||||
|
Value alpha = convertScalarToDtype(b, loc, operands[2], dtype);
|
||||||
|
if (dtype.isa<mlir::FloatType>()) {
|
||||||
|
Value mult = b.create<arith::MulFOp>(loc, other, alpha);
|
||||||
|
return b.create<arith::AddFOp>(loc, self, mult);
|
||||||
|
} else if (dtype.isa<mlir::IntegerType>()) {
|
||||||
|
Value mult = b.create<arith::MulIOp>(loc, other, alpha);
|
||||||
|
return b.create<arith::AddIOp>(loc, self, mult);
|
||||||
|
}
|
||||||
|
addScalar.emitError("unimplemented: dtype other than float and integer "
|
||||||
|
"types are not supported.");
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
if (auto mul = dyn_cast<AtenMulTensorOp>(op)) {
|
if (auto mul = dyn_cast<AtenMulTensorOp>(op)) {
|
||||||
AtenMulTensorOp::Adaptor adaptor(operands);
|
AtenMulTensorOp::Adaptor adaptor(operands);
|
||||||
Type dtype = converter->convertType(mul.getType())
|
Type dtype = converter->convertType(mul.getType())
|
||||||
|
@ -2244,7 +2280,8 @@ struct ConvertElementwiseOp : ConversionPattern {
|
||||||
AtenRsqrtOp, AtenDivScalarOp, AtenAbsOp, AtenReciprocalOp,
|
AtenRsqrtOp, AtenDivScalarOp, AtenAbsOp, AtenReciprocalOp,
|
||||||
AtenBitwiseAndTensorOp, AtenGtScalarOp, AtenEqScalarOp,
|
AtenBitwiseAndTensorOp, AtenGtScalarOp, AtenEqScalarOp,
|
||||||
AtenLtScalarOp, AtenWhereSelfOp, AtenCeilOp, AtenGtTensorOp,
|
AtenLtScalarOp, AtenWhereSelfOp, AtenCeilOp, AtenGtTensorOp,
|
||||||
AtenEqTensorOp, AtenLtTensorOp>(op))
|
AtenEqTensorOp, AtenLtTensorOp, AtenSubScalarOp, AtenAddScalarOp>(
|
||||||
|
op))
|
||||||
return rewriter.notifyMatchFailure(op, "not a supported elementwise op");
|
return rewriter.notifyMatchFailure(op, "not a supported elementwise op");
|
||||||
|
|
||||||
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
|
||||||
|
|
Loading…
Reference in New Issue