From a74a98094b88d2e8b4d532ffe711f3fa94a56ccb Mon Sep 17 00:00:00 2001 From: Stella Laurenzo Date: Wed, 16 Sep 2020 16:21:24 -0700 Subject: [PATCH] Add a new python script to auto-generate ATen op ODS definitions. (#43) * Add a new python script to auto-generate ATen op ODS definitions. * There is still some work on some of the ops to annotate correct types. * The ODS is not actually included into the dialect yet, but I'd like to commit it so that we can track changes. * Will reconcile this with the ops produced by the existing script in a followup. Still need to do some more iteration to reach parity. --- .../Dialect/ATen/GeneratedATenBuiltinOps.td | 2023 +++++++++++++++++ python/npcomp/torch/opdefs/generate_ods.py | 199 ++ python/npcomp/torch/opdefs/registry.py | 44 +- 3 files changed, 2260 insertions(+), 6 deletions(-) create mode 100644 include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td create mode 100644 python/npcomp/torch/opdefs/generate_ods.py diff --git a/include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td b/include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td new file mode 100644 index 000000000..927d65f97 --- /dev/null +++ b/include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td @@ -0,0 +1,2023 @@ +//===-------------------------------------------------------*- tablegen -*-===// +// +// This file is licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Operation summaries and descriptions were systematically derived from public +// API docstrings and are licensed accordingly: +// https://github.com/pytorch/pytorch/blob/master/LICENSE +//===----------------------------------------------------------------------===// +// This file is automatically generated. Please do not edit. +// Generated via: +// python -m npcomp.torch.opdefs.generate_ods +//===----------------------------------------------------------------------===// + +def ATen_AbsOp: ATen_ImmutableTensorOp<"abs", [NoSideEffect]> { + let summary = "abs(input, *, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the absolute value of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = |\text{input}_{i}| + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.abs(torch.tensor([-1, -2, 3])) + tensor([ 1, 2, 3]) + }]; +} + +def ATen_AbsInplaceOp: ATen_RefTensorOp<"abs.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_AcosOp: ATen_ImmutableTensorOp<"acos", [NoSideEffect]> { + let summary = "acos(input, *, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the inverse cosine of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = \cos^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.3348, -0.5889, 0.2005, -0.1584]) + >>> torch.acos(a) + tensor([ 1.2294, 2.2004, 1.3690, 1.7298]) + }]; +} + +def ATen_AcosInplaceOp: ATen_RefTensorOp<"acos.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_AngleOp: ATen_ImmutableTensorOp<"angle", [NoSideEffect]> { + let summary = "angle(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the element-wise angle (in radians) of the given :attr:`input` tensor. + + .. math:: + \text{out}_{i} = angle(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 + tensor([ 135., 135, -45]) + }]; +} + +def ATen_AngleInplaceOp: ATen_RefTensorOp<"angle.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_AsinOp: ATen_ImmutableTensorOp<"asin", [NoSideEffect]> { + let summary = "asin(input, *, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the arcsine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sin^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5962, 1.4985, -0.4396, 1.4525]) + >>> torch.asin(a) + tensor([-0.6387, nan, -0.4552, nan]) + }]; +} + +def ATen_AsinInplaceOp: ATen_RefTensorOp<"asin.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_AtanOp: ATen_ImmutableTensorOp<"atan", [NoSideEffect]> { + let summary = "atan(input, *, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the arctangent of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \tan^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.2341, 0.2539, -0.6256, -0.6448]) + >>> torch.atan(a) + tensor([ 0.2299, 0.2487, -0.5591, -0.5727]) + }]; +} + +def ATen_AtanInplaceOp: ATen_RefTensorOp<"atan.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_CeilOp: ATen_ImmutableTensorOp<"ceil", [NoSideEffect]> { + let summary = "ceil(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the ceil of the elements of :attr:`input`, + the smallest integer greater than or equal to each element. + + .. math:: + \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil = \left\lfloor \text{input}_{i} \right\rfloor + 1 + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + >>> torch.ceil(a) + tensor([-0., -1., -1., 1.]) + }]; +} + +def ATen_CeilInplaceOp: ATen_RefTensorOp<"ceil.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_ConjOp: ATen_ImmutableTensorOp<"conj", [NoSideEffect]> { + let summary = "conj(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the element-wise conjugate of the given :attr:`input` tensor. + + .. math:: + \text{out}_{i} = conj(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.conj(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) + }]; +} + +def ATen_ConjInplaceOp: ATen_RefTensorOp<"conj.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_CosOp: ATen_ImmutableTensorOp<"cos", [NoSideEffect]> { + let summary = "cos(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the cosine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \cos(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 1.4309, 1.2706, -0.8562, 0.9796]) + >>> torch.cos(a) + tensor([ 0.1395, 0.2957, 0.6553, 0.5574]) + }]; +} + +def ATen_CosInplaceOp: ATen_RefTensorOp<"cos.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_CoshOp: ATen_ImmutableTensorOp<"cosh", [NoSideEffect]> { + let summary = "cosh(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the hyperbolic cosine of the elements of + :attr:`input`. + + .. math:: + \text{out}_{i} = \cosh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1632, 1.1835, -0.6979, -0.7325]) + >>> torch.cosh(a) + tensor([ 1.0133, 1.7860, 1.2536, 1.2805]) + }]; +} + +def ATen_CoshInplaceOp: ATen_RefTensorOp<"cosh.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_DigammaOp: ATen_ImmutableTensorOp<"digamma", [NoSideEffect]> { + let summary = "digamma(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the logarithmic derivative of the gamma function on `input`. + + .. math:: + \psi(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} + + Args: + input (Tensor): the tensor to compute the digamma function on + + Example:: + + >>> a = torch.tensor([1, 0.5]) + >>> torch.digamma(a) + tensor([-0.5772, -1.9635]) + }]; +} + +def ATen_DigammaInplaceOp: ATen_RefTensorOp<"digamma.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_ErfOp: ATen_ImmutableTensorOp<"erf", [NoSideEffect]> { + let summary = "erf(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the error function of each element. The error function is defined as follows: + + .. math:: + \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.erf(torch.tensor([0, -1., 10.])) + tensor([ 0.0000, -0.8427, 1.0000]) + }]; +} + +def ATen_ErfInplaceOp: ATen_RefTensorOp<"erf.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_ErfcOp: ATen_ImmutableTensorOp<"erfc", [NoSideEffect]> { + let summary = "erfc(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the complementary error function of each element of :attr:`input`. + The complementary error function is defined as follows: + + .. math:: + \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.erfc(torch.tensor([0, -1., 10.])) + tensor([ 1.0000, 1.8427, 0.0000]) + }]; +} + +def ATen_ErfcInplaceOp: ATen_RefTensorOp<"erfc.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_ErfinvOp: ATen_ImmutableTensorOp<"erfinv", [NoSideEffect]> { + let summary = "erfinv(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the inverse error function of each element of :attr:`input`. + The inverse error function is defined in the range :math:`(-1, 1)` as: + + .. math:: + \mathrm{erfinv}(\mathrm{erf}(x)) = x + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.erfinv(torch.tensor([0, 0.5, -1.])) + tensor([ 0.0000, 0.4769, -inf]) + }]; +} + +def ATen_ErfinvInplaceOp: ATen_RefTensorOp<"erfinv.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_ExpOp: ATen_ImmutableTensorOp<"exp", [NoSideEffect]> { + let summary = "exp(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the exponential of the elements + of the input tensor :attr:`input`. + + .. math:: + y_{i} = e^{x_{i}} + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.exp(torch.tensor([0, math.log(2.)])) + tensor([ 1., 2.]) + }]; +} + +def ATen_ExpInplaceOp: ATen_RefTensorOp<"exp.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_Expm1Op: ATen_ImmutableTensorOp<"expm1", [NoSideEffect]> { + let summary = "expm1(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the exponential of the elements minus 1 + of :attr:`input`. + + .. math:: + y_{i} = e^{x_{i}} - 1 + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.expm1(torch.tensor([0, math.log(2.)])) + tensor([ 0., 1.]) + }]; +} + +def ATen_Expm1InplaceOp: ATen_RefTensorOp<"expm1.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_FloorOp: ATen_ImmutableTensorOp<"floor", [NoSideEffect]> { + let summary = "floor(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the floor of the elements of :attr:`input`, + the largest integer less than or equal to each element. + + .. math:: + \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.8166, 1.5308, -0.2530, -0.2091]) + >>> torch.floor(a) + tensor([-1., 1., -1., -1.]) + }]; +} + +def ATen_FloorInplaceOp: ATen_RefTensorOp<"floor.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_FracOp: ATen_ImmutableTensorOp<"frac", [NoSideEffect]> { + let summary = "frac(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the fractional portion of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) + + Example:: + + >>> torch.frac(torch.tensor([1, 2.5, -3.2])) + tensor([ 0.0000, 0.5000, -0.2000]) + }]; +} + +def ATen_FracInplaceOp: ATen_RefTensorOp<"frac.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_LgammaOp: ATen_ImmutableTensorOp<"lgamma", [NoSideEffect]> { + let summary = "lgamma(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the logarithm of the gamma function on :attr:`input`. + + .. math:: + \text{out}_{i} = \log \Gamma(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.lgamma(a) + tensor([ 0.5724, 0.0000, -0.1208]) + }]; +} + +def ATen_LgammaInplaceOp: ATen_RefTensorOp<"lgamma.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_LogOp: ATen_ImmutableTensorOp<"log", [NoSideEffect]> { + let summary = "log(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the natural logarithm of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{e} (x_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190]) + >>> torch.log(a) + tensor([ nan, nan, nan, nan, nan]) + }]; +} + +def ATen_LogInplaceOp: ATen_RefTensorOp<"log.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_Log10Op: ATen_ImmutableTensorOp<"log10", [NoSideEffect]> { + let summary = "log10(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the logarithm to the base 10 of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{10} (x_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251]) + + + >>> torch.log10(a) + tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476]) + }]; +} + +def ATen_Log10InplaceOp: ATen_RefTensorOp<"log10.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_Log1pOp: ATen_ImmutableTensorOp<"log1p", [NoSideEffect]> { + let summary = "log1p(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the natural logarithm of (1 + :attr:`input`). + + .. math:: + y_i = \log_{e} (x_i + 1) + + .. note:: This function is more accurate than :func:`torch.log` for small + values of :attr:`input` + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492]) + >>> torch.log1p(a) + tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225]) + }]; +} + +def ATen_Log1pInplaceOp: ATen_RefTensorOp<"log1p.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_Log2Op: ATen_ImmutableTensorOp<"log2", [NoSideEffect]> { + let summary = "log2(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the logarithm to the base 2 of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{2} (x_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490]) + + + >>> torch.log2(a) + tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504]) + }]; +} + +def ATen_Log2InplaceOp: ATen_RefTensorOp<"log2.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_NegOp: ATen_ImmutableTensorOp<"neg", [NoSideEffect]> { + let summary = "neg(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the negative of the elements of :attr:`input`. + + .. math:: + \text{out} = -1 \times \text{input} + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.neg(a) + tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940]) + }]; +} + +def ATen_NegInplaceOp: ATen_RefTensorOp<"neg.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_ReciprocalOp: ATen_ImmutableTensorOp<"reciprocal", [NoSideEffect]> { + let summary = "reciprocal(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the reciprocal of the elements of :attr:`input` + + .. math:: + \text{out}_{i} = \frac{1}{\text{input}_{i}} + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.4595, -2.1219, -1.4314, 0.7298]) + >>> torch.reciprocal(a) + tensor([-2.1763, -0.4713, -0.6986, 1.3702]) + }]; +} + +def ATen_ReciprocalInplaceOp: ATen_RefTensorOp<"reciprocal.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_RoundOp: ATen_ImmutableTensorOp<"round", [NoSideEffect]> { + let summary = "round(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with each of the elements of :attr:`input` rounded + to the closest integer. + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.9920, 0.6077, 0.9734, -1.0362]) + >>> torch.round(a) + tensor([ 1., 1., 1., -1.]) + }]; +} + +def ATen_RoundInplaceOp: ATen_RefTensorOp<"round.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_RsqrtOp: ATen_ImmutableTensorOp<"rsqrt", [NoSideEffect]> { + let summary = "rsqrt(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the reciprocal of the square-root of each of + the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.0370, 0.2970, 1.5420, -0.9105]) + >>> torch.rsqrt(a) + tensor([ nan, 1.8351, 0.8053, nan]) + }]; +} + +def ATen_RsqrtInplaceOp: ATen_RefTensorOp<"rsqrt.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_SigmoidOp: ATen_ImmutableTensorOp<"sigmoid", [NoSideEffect]> { + let summary = "sigmoid(input, *, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the sigmoid of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) + >>> torch.sigmoid(a) + tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) + }]; +} + +def ATen_SigmoidInplaceOp: ATen_RefTensorOp<"sigmoid.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_SignOp: ATen_ImmutableTensorOp<"sign", [NoSideEffect]> { + let summary = "sign(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the signs of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> a + tensor([ 0.7000, -1.2000, 0.0000, 2.3000]) + >>> torch.sign(a) + tensor([ 1., -1., 0., 1.]) + }]; +} + +def ATen_SignInplaceOp: ATen_RefTensorOp<"sign.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_SinOp: ATen_ImmutableTensorOp<"sin", [NoSideEffect]> { + let summary = "sin(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the sine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sin(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5461, 0.1347, -2.7266, -0.2746]) + >>> torch.sin(a) + tensor([-0.5194, 0.1343, -0.4032, -0.2711]) + }]; +} + +def ATen_SinInplaceOp: ATen_RefTensorOp<"sin.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_SinhOp: ATen_ImmutableTensorOp<"sinh", [NoSideEffect]> { + let summary = "sinh(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the hyperbolic sine of the elements of + :attr:`input`. + + .. math:: + \text{out}_{i} = \sinh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.5380, -0.8632, -0.1265, 0.9399]) + >>> torch.sinh(a) + tensor([ 0.5644, -0.9744, -0.1268, 1.0845]) + }]; +} + +def ATen_SinhInplaceOp: ATen_RefTensorOp<"sinh.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_SqrtOp: ATen_ImmutableTensorOp<"sqrt", [NoSideEffect]> { + let summary = "sqrt(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the square-root of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}} + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.sqrt(a) + tensor([ nan, 1.0112, 0.2883, 0.6933]) + }]; +} + +def ATen_SqrtInplaceOp: ATen_RefTensorOp<"sqrt.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_TanOp: ATen_ImmutableTensorOp<"tan", [NoSideEffect]> { + let summary = "tan(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the tangent of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \tan(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.2027, -1.7687, 0.4412, -1.3856]) + >>> torch.tan(a) + tensor([-2.5930, 4.9859, 0.4722, -5.3366]) + }]; +} + +def ATen_TanInplaceOp: ATen_RefTensorOp<"tan.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_TanhOp: ATen_ImmutableTensorOp<"tanh", [NoSideEffect]> { + let summary = "tanh(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the hyperbolic tangent of the elements + of :attr:`input`. + + .. math:: + \text{out}_{i} = \tanh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.8986, -0.7279, 1.1745, 0.2611]) + >>> torch.tanh(a) + tensor([ 0.7156, -0.6218, 0.8257, 0.2553]) + }]; +} + +def ATen_TanhInplaceOp: ATen_RefTensorOp<"tanh.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_TruncOp: ATen_ImmutableTensorOp<"trunc", [NoSideEffect]> { + let summary = "trunc(input, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns a new tensor with the truncated integer values of + the elements of :attr:`input`. + + Args: + input (Tensor): the input tensor. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 3.4742, 0.5466, -0.8008, -0.9079]) + >>> torch.trunc(a) + tensor([ 3., 0., -0., -0.]) + }]; +} + +def ATen_TruncInplaceOp: ATen_RefTensorOp<"trunc.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_AddOp: ATen_ImmutableTensorOp<"add", [NoSideEffect]> { + let summary = "add(input, other, out=None)"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyScalar:$alpha + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Adds the scalar :attr:`other` to each element of the input :attr:`input` + and returns a new resulting tensor. + + .. math:: + \text{out} = \text{input} + \text{other} + + If :attr:`input` is of type FloatTensor or DoubleTensor, :attr:`other` must be + a real number, otherwise it should be an integer. + + Args: + input (Tensor): the input tensor. + value (Number): the number to be added to each element of :attr:`input` + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + .. function:: add(input, other, *, alpha=1, out=None) + + Each element of the tensor :attr:`other` is multiplied by the scalar + :attr:`alpha` and added to each element of the tensor :attr:`input`. + The resulting tensor is returned. + + The shapes of :attr:`input` and :attr:`other` must be + :ref:`broadcastable `. + + .. math:: + \text{out} = \text{input} + \text{alpha} \times \text{other} + + If :attr:`other` is of type FloatTensor or DoubleTensor, :attr:`alpha` must be + a real number, otherwise it should be an integer. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + alpha (Number): the scalar multiplier for :attr:`other` + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> b = torch.randn(4, 1) + >>> b + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(a, b, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) + }]; +} + +def ATen_AddInplaceOp: ATen_RefTensorOp<"add.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyScalar:$alpha, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_Atan2Op: ATen_ImmutableTensorOp<"atan2", [NoSideEffect]> { + let summary = "atan2(input, other, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}` + with consideration of the quadrant. Returns a new tensor with the signed angles + in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})` + and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second + parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first + parameter, is the y-coordinate.) + + The shapes of ``input`` and ``other`` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.9041, 0.0196, -0.3108, -2.4423]) + >>> torch.atan2(a, torch.randn(4)) + tensor([ 0.9833, 0.0811, -1.9743, -1.4151]) + }]; +} + +def ATen_Atan2InplaceOp: ATen_RefTensorOp<"atan2.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_DivOp: ATen_ImmutableTensorOp<"div", [NoSideEffect]> { + let summary = "div(input, other, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Divides each element of the input ``input`` with the scalar ``other`` and + returns a new resulting tensor. + + .. warning:: + Integer division using div is no longer supported, and in a future release + div will perform true division as in Python 3. Use :func:`torch.true_divide` + or :func:`torch.floor_divide` (// in Python), instead. + + .. math:: + \text{out}_i = \frac{\text{input}_i}{\text{other}} + + If the :class:`torch.dtype` of ``input`` and ``other`` differ, the + :class:`torch.dtype` of the result tensor is determined following rules + described in the type promotion :ref:`documentation `. If + ``out`` is specified, the result must be :ref:`castable ` + to the :class:`torch.dtype` of the specified output tensor. Integral division + by zero leads to undefined behavior. + + Args: + input (Tensor): the input tensor. + other (Number): the number to be divided to each element of ``input`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) + >>> torch.div(a, 0.5) + tensor([ 0.7620, 2.5548, -0.5944, -0.7439, 0.9275]) + + .. function:: div(input, other, out=None) -> Tensor + + Each element of the tensor ``input`` is divided by each element of the tensor + ``other``. The resulting tensor is returned. + + .. math:: + \text{out}_i = \frac{\text{input}_i}{\text{other}_i} + + The shapes of ``input`` and ``other`` must be :ref:`broadcastable + `. If the :class:`torch.dtype` of ``input`` and + ``other`` differ, the :class:`torch.dtype` of the result tensor is determined + following rules described in the type promotion :ref:`documentation + `. If ``out`` is specified, the result must be + :ref:`castable ` to the :class:`torch.dtype` of the + specified output tensor. Integral division by zero leads to undefined behavior. + + Args: + input (Tensor): the numerator tensor + other (Tensor): the denominator tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3711, -1.9353, -0.4605, -0.2917], + [ 0.1815, -1.0111, 0.9805, -1.5923], + [ 0.1062, 1.4581, 0.7759, -1.2344], + [-0.1830, -0.0313, 1.1908, -1.4757]]) + >>> b = torch.randn(4) + >>> b + tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) + >>> torch.div(a, b) + tensor([[-0.4620, -6.6051, 0.5676, 1.2637], + [ 0.2260, -3.4507, -1.2086, 6.8988], + [ 0.1322, 4.9764, -0.9564, 5.3480], + [-0.2278, -0.1068, -1.4678, 6.3936]]) + }]; +} + +def ATen_DivInplaceOp: ATen_RefTensorOp<"div.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_FloorDivideOp: ATen_ImmutableTensorOp<"floor_divide", [NoSideEffect]> { + let summary = "floor_divide(input, other, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Return the division of the inputs rounded down to the nearest integer. See :func:`torch.div` + for type promotion and broadcasting rules. + + .. math:: + \text{{out}}_i = \left\lfloor \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right\rfloor + + + Args: + input (Tensor): the numerator tensor + other (Tensor or Scalar): the denominator + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([4.0, 3.0]) + >>> b = torch.tensor([2.0, 2.0]) + >>> torch.floor_divide(a, b) + tensor([2.0, 1.0]) + >>> torch.floor_divide(a, 1.4) + tensor([2.0, 2.0]) + }]; +} + +def ATen_FloorDivideInplaceOp: ATen_RefTensorOp<"floor_divide.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_MulOp: ATen_ImmutableTensorOp<"mul", [NoSideEffect]> { + let summary = "mul(input, other, out=None)"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Multiplies each element of the input :attr:`input` with the scalar + :attr:`other` and returns a new resulting tensor. + + .. math:: + \text{out}_i = \text{other} \times \text{input}_i + + If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`other` + should be a real number, otherwise it should be an integer + + Args: + {input} + value (Number): the number to be multiplied to each element of :attr:`input` + {out} + + Example:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.2015, -0.4255, 2.6087]) + >>> torch.mul(a, 100) + tensor([ 20.1494, -42.5491, 260.8663]) + + .. function:: mul(input, other, out=None) + + Each element of the tensor :attr:`input` is multiplied by the corresponding + element of the Tensor :attr:`other`. The resulting tensor is returned. + + The shapes of :attr:`input` and :attr:`other` must be + :ref:`broadcastable `. + + .. math:: + \text{out}_i = \text{input}_i \times \text{other}_i + + Args: + input (Tensor): the first multiplicand tensor + other (Tensor): the second multiplicand tensor + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 1) + >>> a + tensor([[ 1.1207], + [-0.3137], + [ 0.0700], + [ 0.8378]]) + >>> b = torch.randn(1, 4) + >>> b + tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]]) + >>> torch.mul(a, b) + tensor([[ 0.5767, 0.1363, -0.5877, 2.5083], + [-0.1614, -0.0382, 0.1645, -0.7021], + [ 0.0360, 0.0085, -0.0367, 0.1567], + [ 0.4312, 0.1019, -0.4394, 1.8753]]) + }]; +} + +def ATen_MulInplaceOp: ATen_RefTensorOp<"mul.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_RemainderOp: ATen_ImmutableTensorOp<"remainder", [NoSideEffect]> { + let summary = "remainder(input, other, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the element-wise remainder of division. + + The dividend and divisor may contain both for integer and floating point + numbers. The remainder has the same sign as the divisor :attr:`other`. + + When :attr:`other` is a tensor, the shapes of :attr:`input` and + :attr:`other` must be :ref:`broadcastable `. + + Args: + input (Tensor): the dividend + other (Tensor or float): the divisor that may be either a number or a + Tensor of the same shape as the dividend + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1., 2, 3, 4, 5]), 1.5) + tensor([ 1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) + + .. seealso:: + + :func:`torch.fmod`, which computes the element-wise remainder of + division equivalently to the C library function ``fmod()``. + }]; +} + +def ATen_RemainderInplaceOp: ATen_RefTensorOp<"remainder.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_TrueDivideOp: ATen_ImmutableTensorOp<"true_divide", [NoSideEffect]> { + let summary = "true_divide(dividend, divisor, *, out) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$dividend, + ATen_AnyTensor:$divisor + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Performs "true division" that always computes the division + in floating point. Analogous to division in Python 3 and equivalent to + :func:`torch.div` except when both inputs have bool or integer scalar types, + in which case they are cast to the default (floating) scalar type before the division. + + .. math:: + \text{out}_i = \frac{\text{dividend}_i}{\text{divisor}} + + Args: + dividend (Tensor): the dividend + divisor (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> dividend = torch.tensor([5, 3], dtype=torch.int) + >>> divisor = torch.tensor([3, 2], dtype=torch.int) + >>> torch.true_divide(dividend, divisor) + tensor([1.6667, 1.5000]) + >>> torch.true_divide(dividend, 2) + tensor([2.5000, 1.5000]) + }]; +} + +def ATen_TrueDivideInplaceOp: ATen_RefTensorOp<"true_divide.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$dividend, + ATen_AnyTensor:$divisor, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_CumsumOp: ATen_ImmutableTensorOp<"cumsum", [NoSideEffect]> { + let summary = "cumsum(input, dim, out=None, dtype=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyScalar:$dim + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Returns the cumulative sum of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595, + 0.1850, -1.1571, -0.4243]) + >>> torch.cumsum(a, dim=0) + tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058, + -1.8209, -2.9780, -3.4022]) + }]; +} + +def ATen_CumsumInplaceOp: ATen_RefTensorOp<"cumsum.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyScalar:$dim, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_AddmmOp: ATen_ImmutableTensorOp<"addmm", [NoSideEffect]> { + let summary = "addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$mat1, + ATen_AnyTensor:$mat2, + ATen_AnyScalar:$beta, + ATen_AnyScalar:$alpha + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be multiplied + mat2 (Tensor): the second matrix to be multiplied + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + }]; +} + +def ATen_AddmmInplaceOp: ATen_RefTensorOp<"addmm.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$mat1, + ATen_AnyTensor:$mat2, + ATen_AnyScalar:$beta, + ATen_AnyScalar:$alpha, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_DotOp: ATen_ImmutableTensorOp<"dot", [NoSideEffect]> { + let summary = "dot(input, tensor) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$tensor + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Computes the dot product (inner product) of two tensors. + + .. note:: This function does not :ref:`broadcast `. + + Example:: + + >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) + }]; +} + +def ATen_MatmulOp: ATen_ImmutableTensorOp<"matmul", [NoSideEffect]> { + let summary = "matmul(input, other, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Matrix product of two tensors. + + The behavior depends on the dimensionality of the tensors as follows: + + - If both tensors are 1-dimensional, the dot product (scalar) is returned. + - If both arguments are 2-dimensional, the matrix-matrix product is returned. + - If the first argument is 1-dimensional and the second argument is 2-dimensional, + a 1 is prepended to its dimension for the purpose of the matrix multiply. + After the matrix multiply, the prepended dimension is removed. + - If the first argument is 2-dimensional and the second argument is 1-dimensional, + the matrix-vector product is returned. + - If both arguments are at least 1-dimensional and at least one argument is + N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first + argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the + batched matrix multiply and removed after. If the second argument is 1-dimensional, a + 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. + The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus + must be broadcastable). For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)` + tensor, :attr:`out` will be an :math:`(j \times k \times n \times p)` tensor. + + .. note:: + + The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. + + Arguments: + input (Tensor): the first tensor to be multiplied + other (Tensor): the second tensor to be multiplied + out (Tensor, optional): the output tensor. + + Example:: + + >>> # vector x vector + >>> tensor1 = torch.randn(3) + >>> tensor2 = torch.randn(3) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([]) + >>> # matrix x vector + >>> tensor1 = torch.randn(3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([3]) + >>> # batched matrix x broadcasted vector + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3]) + >>> # batched matrix x batched matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(10, 4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + >>> # batched matrix x broadcasted matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + }]; +} + +def ATen_MatmulInplaceOp: ATen_RefTensorOp<"matmul.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$other, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_MmOp: ATen_ImmutableTensorOp<"mm", [NoSideEffect]> { + let summary = "mm(input, mat2, out=None) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$mat2 + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`. + + If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. + + .. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + + Args: + input (Tensor): the first matrix to be multiplied + mat2 (Tensor): the second matrix to be multiplied + out (Tensor, optional): the output tensor. + + Example:: + + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.mm(mat1, mat2) + tensor([[ 0.4851, 0.5037, -0.3633], + [-0.0760, -3.6705, 2.4784]]) + }]; +} + +def ATen_MmInplaceOp: ATen_RefTensorOp<"mm.inplace", []> { + let summary = "See non-inplace op variant."; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyTensor:$mat2, + ATen_AnyRefTensor:$out + ); + let results = (outs); +} + +def ATen_AvgPool1dOp: ATen_ImmutableTensorOp<"avg_pool1d", [NoSideEffect]> { + let summary = "avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyScalar:$kernel_size, + ATen_AnyScalar:$stride, + ATen_AnyScalar:$padding, + ATen_AnyScalar:$ceil_mode, + ATen_AnyScalar:$count_include_pad + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + Applies a 1D average pooling over an input signal composed of several + input planes. + + See :class:`~torch.nn.AvgPool1d` for details and output shape. + + Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` + kernel_size: the size of the window. Can be a single number or a + tuple `(kW,)` + stride: the stride of the window. Can be a single number or a tuple + `(sW,)`. Default: :attr:`kernel_size` + padding: implicit zero paddings on both sides of the input. Can be a + single number or a tuple `(padW,)`. Default: 0 + ceil_mode: when True, will use `ceil` instead of `floor` to compute the + output shape. Default: ``False`` + count_include_pad: when True, will include the zero-padding in the + averaging calculation. Default: ``True`` + + Examples:: + + >>> # pool of square window of size=3, stride=2 + >>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32) + >>> F.avg_pool1d(input, kernel_size=3, stride=2) + tensor([[[ 2., 4., 6.]]]) + }]; +} + +def ATen_MaxPool1dOp: ATen_ImmutableTensorOp<"max_pool1d", [NoSideEffect]> { + let summary = "Applies a 1D max pooling over an input signal composed of several input"; + let arguments = (ins + ATen_AnyTensor:$input, + ATen_AnyScalar:$kernel_size, + ATen_AnyScalar:$stride, + ATen_AnyScalar:$padding, + ATen_AnyScalar:$padding, + ATen_AnyScalar:$ceil_mode + ); + let results = (outs + ATen_AnyTensor:$result + ); + let description = [{ + + See :class:`~torch.nn.MaxPool1d` for details. + + }]; +} + diff --git a/python/npcomp/torch/opdefs/generate_ods.py b/python/npcomp/torch/opdefs/generate_ods.py new file mode 100644 index 000000000..39ed0ffcc --- /dev/null +++ b/python/npcomp/torch/opdefs/generate_ods.py @@ -0,0 +1,199 @@ +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +"""Generates ODS for a registry of ops.""" + +from typing import TextIO + +import argparse +from contextlib import contextmanager +import importlib +import logging +import re +import sys +import textwrap + +from .registry import * + +_INDENT = " " + + +class OdsEmitter: + ods_prefix = "ATen_" + ods_suffix = "Op" + ods_value_template = "ATen_ImmutableTensorOp" + ods_ref_template = "ATen_RefTensorOp" + op_prefix = "" + + def __init__(self, r: OpRegistry, out: TextIO): + super().__init__() + self.r = r + self.out = out + self.indent_level = 0 + + def emit_ods(self): + for op_m in self.r.mappings: + if isinstance(op_m, SimpleOpMapping): + self._emit_simple_op_mapping(op_m) + else: + logging.warn(f"Unrecognized op mapping type: {op_m!r}") + + def _emit_simple_op_mapping(self, op_m: SimpleOpMapping): + identifier = (f"{self.ods_prefix}" + f"{_snakecase_to_camelcase(op_m.mlir_operation_name)}" + f"{self.ods_suffix}") + traits = [] + + if op_m.is_outref_form: + template_name = self.ods_ref_template + summary = "See non-inplace op variant." + description = "" + else: + template_name = self.ods_value_template + summary, description = _split_docstring(op_m.op_f.__doc__) + + if not op_m.is_outref_form: + traits.append("NoSideEffect") + self.print(f"def {identifier}: {template_name}" + f"<{_quote(op_m.mlir_operation_name)}, [" + f"{', '.join(traits)}" + f"]> {{") + + # Summary. + with self.indent(): + self.print(f"let summary = {_quote(summary)};") + + # Arguments. + with self.indent(): + self.print("let arguments = (ins") + with self.indent(): + operand_len = len(op_m.operand_map) + for index, (_, value_spec) in enumerate(op_m.operand_map): + is_last = index == operand_len - 1 + self.print(f"{value_spec.mlir_ods_predicate}:${value_spec.name}", + end="\n" if is_last else ",\n") + self.print(");") + + # Results (omitted if an outref/inplace form). + with self.indent(): + if op_m.is_outref_form: + self.print("let results = (outs);") + else: + self.print("let results = (outs") + with self.indent(): + result_len = len(op_m.result_map) + for index, (_, value_spec) in enumerate(op_m.result_map): + is_last = index == result_len - 1 + self.print(f"{value_spec.mlir_ods_predicate}:${value_spec.name}", + end="\n" if is_last else ",\n") + self.print(");") + + # Description and extra class declarations. + with self.indent(): + if description: + quoted_description = _quote_multiline_docstring( + description, indent_level=self.indent_level) + self.print(f"let description = {quoted_description};") + + self.print("}\n") + + @contextmanager + def indent(self, level=1): + self.indent_level += level + yield + self.indent_level -= level + assert self.indent_level >= 0, "Unbalanced indentation" + + def print(self, s, *, end="\n", indent=True): + if indent and self.indent_level: + self.out.write(_INDENT * self.indent_level) + self.out.write(s) + self.out.write(end) + + +def _snakecase_to_camelcase(ident: str): + return "".join(x.capitalize() or "_" for x in re.split(r"[\._]", ident)) + + +def _quote(s: str): + s = s.replace(r'"', r'\\"') + return f'"{s}"' + + +def _quote_multiline_docstring(s: str, indent_level: int = 0): + # TODO: Possibly find a python module to markdown the docstring for better + # document generation. + # Unlikely to contain the delimitter and since just a docstring, be safe. + s = s.replace("}]", "") + # Strip each line. + s = "\n".join([l.rstrip() for l in s.splitlines()]) + indent = _INDENT * indent_level + s = textwrap.indent(s, indent + _INDENT) + return "[{\n" + s + "\n" + indent + "}]" + + +def _split_docstring(docstring: str): + """Splits the docstring into a summary and description.""" + lines = docstring.splitlines() + # Skip leading blank lines. + while lines and not lines[0]: + lines = lines[1:] + if len(lines) > 2: + return lines[0], "\n".join(lines[2:]) + else: + return lines[0] + + +def main(args): + r = OpRegistry() + # Populate from modules that provide a populate() function. + op_modules = [args.op_module] + for m_name in op_modules: + logging.info(f"Populating from module: {m_name}") + m = importlib.import_module(m_name, package=__package__) + f = getattr(m, "populate") + f(r) + + out = sys.stdout + + # Write file header. + module_name = sys.modules["__main__"].__loader__.name + banner_lines = [ + "//===-------------------------------------------------------*- tablegen -*-===//", + "//", + "// This file is licensed under the Apache License v2.0 with LLVM Exceptions.", + "// See https://llvm.org/LICENSE.txt for license information.", + "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception", + "//", + "// Operation summaries and descriptions were systematically derived from public", + "// API docstrings and are licensed accordingly:", + "// https://github.com/pytorch/pytorch/blob/master/LICENSE", + "//===----------------------------------------------------------------------===//", + "// This file is automatically generated. Please do not edit.", + "// Generated via:", + f"// python -m {module_name} {' '.join(sys.argv[1:])}", + "//===----------------------------------------------------------------------===//", + "", + "", + ] + banner_lines = [l.strip() for l in banner_lines] + out.write("\n".join(banner_lines)) + + emitter = OdsEmitter(r, out=out) + emitter.emit_ods() + + +def _create_argparse(): + parser = argparse.ArgumentParser(prog="generate_ods") + parser.add_argument( + "--op_module", + default=".aten_ops", + help="Name of a python module for populating the registry") + return parser + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + parser = _create_argparse() + args = parser.parse_args() + main(args) diff --git a/python/npcomp/torch/opdefs/registry.py b/python/npcomp/torch/opdefs/registry.py index 452b0a4aa..4beb23e5e 100644 --- a/python/npcomp/torch/opdefs/registry.py +++ b/python/npcomp/torch/opdefs/registry.py @@ -22,7 +22,7 @@ Example usage (fully automatic discovery): alpha=ScalarValue()).with_outref_variant() """ -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Sequence, Tuple import logging import random @@ -92,6 +92,10 @@ class ValueSpec: super().__init__() self.name = name + @property + def mlir_ods_predicate(self): + return "AnyType" + def generate_example(self, index=0): """Generates an example value.""" raise NotImplementedError() @@ -109,6 +113,10 @@ class TensorValue(ValueSpec): example_size = (2, 3, 7) # No significance. self.example_size = example_size + @property + def mlir_ods_predicate(self): + return "ATen_AnyTensor" + def generate_example(self, index=0): return torch.rand(*self.example_size) @@ -122,6 +130,10 @@ class TensorOutRef(ValueSpec): example_size = (2, 3, 7) # No significance. self.example_size = example_size + @property + def mlir_ods_predicate(self): + return "ATen_AnyRefTensor" + def generate_example(self, index=0): return torch.rand(*self.example_size) @@ -133,13 +145,22 @@ class ScalarValue(ValueSpec): super().__init__(name=name) self.value = value + @property + def mlir_ods_predicate(self): + return "ATen_AnyScalar" + def generate_example(self, index=0): if self.value is not None: return self.value return 1.0 + index # Generates a stable value. -class SimpleOpMapping: +class OpMapping: + """Base class for things purporting to map an operation.""" + pass + + +class SimpleOpMapping(OpMapping): """Maps a PyTorch invocation to its MLIR representation.""" def __init__(self, op_f, *op_args, **op_kwargs): @@ -228,10 +249,17 @@ class SimpleOpMapping: def _set_default_mlir_operation_name(self): op_ns, op_name = self.op_kind.split("::", maxsplit=1) - default_name = op_ns + "." + op_name + # Since these are emitted into the "aten" dialect namespace, alias them + # to omit the prefix to distinguish from custom ops and others (which will + # have a prefix). + default_name = op_name if op_ns == "aten" else op_ns + "." + op_name + if op_ns == "aten": + default_name = op_name + else: + default_name = op_ns + "." + op_name if self.is_outref_form: - default_name += "_outref" + default_name += ".inplace" self.mlir_operation_name = default_name def _configure_from_example(self): @@ -352,8 +380,12 @@ class OpRegistry: return m @property - def mappings(self): - """Returns the list of SimpleOpMappings.""" + def mappings(self) -> Sequence[OpMapping]: + """Returns the list of OpMapping. + + Returns: + Sequence of OpMapping concrete classes (most commonly SimpleOpMapping). + """ self._finalize_pending() return self._mappings