mirror of https://github.com/llvm/torch-mlir
2502 lines
71 KiB
TableGen
2502 lines
71 KiB
TableGen
//===-------------------------------------------------------*- tablegen -*-===//
|
|
//
|
|
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
// Operation summaries and descriptions were systematically derived from public
|
|
// API docstrings and are licensed accordingly:
|
|
// https://github.com/pytorch/pytorch/blob/master/LICENSE
|
|
//===----------------------------------------------------------------------===//
|
|
// This file is automatically generated. Please do not edit.
|
|
// Generated via:
|
|
// python -m npcomp.torch.opdefs.generate_ods
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def ATen_AbsOp: ATen_ImmutableTensorOp<"abs", [NoSideEffect]> {
|
|
let summary = "abs(input, *, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the absolute value of each element in :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = |\text{input}_{i}|
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.abs(torch.tensor([-1, -2, 3]))
|
|
tensor([ 1, 2, 3])
|
|
}];
|
|
}
|
|
|
|
def ATen_AbsInplaceOp: ATen_RefTensorOp<"abs.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_AcosOp: ATen_ImmutableTensorOp<"acos", [NoSideEffect]> {
|
|
let summary = "acos(input, *, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the inverse cosine of each element in :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \cos^{-1}(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
|
|
>>> torch.acos(a)
|
|
tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
|
|
}];
|
|
}
|
|
|
|
def ATen_AcosInplaceOp: ATen_RefTensorOp<"acos.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_AngleOp: ATen_ImmutableTensorOp<"angle", [NoSideEffect]> {
|
|
let summary = "angle(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
|
|
|
|
.. math::
|
|
\text{out}_{i} = angle(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
|
|
tensor([ 135., 135, -45])
|
|
}];
|
|
}
|
|
|
|
def ATen_AngleInplaceOp: ATen_RefTensorOp<"angle.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_AsinOp: ATen_ImmutableTensorOp<"asin", [NoSideEffect]> {
|
|
let summary = "asin(input, *, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the arcsine of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \sin^{-1}(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-0.5962, 1.4985, -0.4396, 1.4525])
|
|
>>> torch.asin(a)
|
|
tensor([-0.6387, nan, -0.4552, nan])
|
|
}];
|
|
}
|
|
|
|
def ATen_AsinInplaceOp: ATen_RefTensorOp<"asin.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_AtanOp: ATen_ImmutableTensorOp<"atan", [NoSideEffect]> {
|
|
let summary = "atan(input, *, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the arctangent of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \tan^{-1}(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
|
|
>>> torch.atan(a)
|
|
tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
|
|
}];
|
|
}
|
|
|
|
def ATen_AtanInplaceOp: ATen_RefTensorOp<"atan.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_CeilOp: ATen_ImmutableTensorOp<"ceil", [NoSideEffect]> {
|
|
let summary = "ceil(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the ceil of the elements of :attr:`input`,
|
|
the smallest integer greater than or equal to each element.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil = \left\lfloor \text{input}_{i} \right\rfloor + 1
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-0.6341, -1.4208, -1.0900, 0.5826])
|
|
>>> torch.ceil(a)
|
|
tensor([-0., -1., -1., 1.])
|
|
}];
|
|
}
|
|
|
|
def ATen_CeilInplaceOp: ATen_RefTensorOp<"ceil.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_ConjOp: ATen_ImmutableTensorOp<"conj", [NoSideEffect]> {
|
|
let summary = "conj(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the element-wise conjugate of the given :attr:`input` tensor.
|
|
|
|
.. math::
|
|
\text{out}_{i} = conj(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.conj(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
|
|
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
|
|
}];
|
|
}
|
|
|
|
def ATen_ConjInplaceOp: ATen_RefTensorOp<"conj.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_CosOp: ATen_ImmutableTensorOp<"cos", [NoSideEffect]> {
|
|
let summary = "cos(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the cosine of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \cos(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
|
|
>>> torch.cos(a)
|
|
tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
|
|
}];
|
|
}
|
|
|
|
def ATen_CosInplaceOp: ATen_RefTensorOp<"cos.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_CoshOp: ATen_ImmutableTensorOp<"cosh", [NoSideEffect]> {
|
|
let summary = "cosh(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the hyperbolic cosine of the elements of
|
|
:attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \cosh(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
|
|
>>> torch.cosh(a)
|
|
tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
|
|
}];
|
|
}
|
|
|
|
def ATen_CoshInplaceOp: ATen_RefTensorOp<"cosh.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_DigammaOp: ATen_ImmutableTensorOp<"digamma", [NoSideEffect]> {
|
|
let summary = "digamma(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the logarithmic derivative of the gamma function on `input`.
|
|
|
|
.. math::
|
|
\psi(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)}
|
|
|
|
Args:
|
|
input (Tensor): the tensor to compute the digamma function on
|
|
|
|
Example::
|
|
|
|
>>> a = torch.tensor([1, 0.5])
|
|
>>> torch.digamma(a)
|
|
tensor([-0.5772, -1.9635])
|
|
}];
|
|
}
|
|
|
|
def ATen_DigammaInplaceOp: ATen_RefTensorOp<"digamma.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_ErfOp: ATen_ImmutableTensorOp<"erf", [NoSideEffect]> {
|
|
let summary = "erf(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the error function of each element. The error function is defined as follows:
|
|
|
|
.. math::
|
|
\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.erf(torch.tensor([0, -1., 10.]))
|
|
tensor([ 0.0000, -0.8427, 1.0000])
|
|
}];
|
|
}
|
|
|
|
def ATen_ErfInplaceOp: ATen_RefTensorOp<"erf.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_ErfcOp: ATen_ImmutableTensorOp<"erfc", [NoSideEffect]> {
|
|
let summary = "erfc(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the complementary error function of each element of :attr:`input`.
|
|
The complementary error function is defined as follows:
|
|
|
|
.. math::
|
|
\mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.erfc(torch.tensor([0, -1., 10.]))
|
|
tensor([ 1.0000, 1.8427, 0.0000])
|
|
}];
|
|
}
|
|
|
|
def ATen_ErfcInplaceOp: ATen_RefTensorOp<"erfc.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_ErfinvOp: ATen_ImmutableTensorOp<"erfinv", [NoSideEffect]> {
|
|
let summary = "erfinv(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the inverse error function of each element of :attr:`input`.
|
|
The inverse error function is defined in the range :math:`(-1, 1)` as:
|
|
|
|
.. math::
|
|
\mathrm{erfinv}(\mathrm{erf}(x)) = x
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.erfinv(torch.tensor([0, 0.5, -1.]))
|
|
tensor([ 0.0000, 0.4769, -inf])
|
|
}];
|
|
}
|
|
|
|
def ATen_ErfinvInplaceOp: ATen_RefTensorOp<"erfinv.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_ExpOp: ATen_ImmutableTensorOp<"exp", [NoSideEffect]> {
|
|
let summary = "exp(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the exponential of the elements
|
|
of the input tensor :attr:`input`.
|
|
|
|
.. math::
|
|
y_{i} = e^{x_{i}}
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.exp(torch.tensor([0, math.log(2.)]))
|
|
tensor([ 1., 2.])
|
|
}];
|
|
}
|
|
|
|
def ATen_ExpInplaceOp: ATen_RefTensorOp<"exp.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_Expm1Op: ATen_ImmutableTensorOp<"expm1", [NoSideEffect]> {
|
|
let summary = "expm1(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the exponential of the elements minus 1
|
|
of :attr:`input`.
|
|
|
|
.. math::
|
|
y_{i} = e^{x_{i}} - 1
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.expm1(torch.tensor([0, math.log(2.)]))
|
|
tensor([ 0., 1.])
|
|
}];
|
|
}
|
|
|
|
def ATen_Expm1InplaceOp: ATen_RefTensorOp<"expm1.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_FloorOp: ATen_ImmutableTensorOp<"floor", [NoSideEffect]> {
|
|
let summary = "floor(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the floor of the elements of :attr:`input`,
|
|
the largest integer less than or equal to each element.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-0.8166, 1.5308, -0.2530, -0.2091])
|
|
>>> torch.floor(a)
|
|
tensor([-1., 1., -1., -1.])
|
|
}];
|
|
}
|
|
|
|
def ATen_FloorInplaceOp: ATen_RefTensorOp<"floor.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_FracOp: ATen_ImmutableTensorOp<"frac", [NoSideEffect]> {
|
|
let summary = "frac(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the fractional portion of each element in :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
|
|
|
|
Example::
|
|
|
|
>>> torch.frac(torch.tensor([1, 2.5, -3.2]))
|
|
tensor([ 0.0000, 0.5000, -0.2000])
|
|
}];
|
|
}
|
|
|
|
def ATen_FracInplaceOp: ATen_RefTensorOp<"frac.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_LgammaOp: ATen_ImmutableTensorOp<"lgamma", [NoSideEffect]> {
|
|
let summary = "lgamma(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the logarithm of the gamma function on :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \log \Gamma(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.arange(0.5, 2, 0.5)
|
|
>>> torch.lgamma(a)
|
|
tensor([ 0.5724, 0.0000, -0.1208])
|
|
}];
|
|
}
|
|
|
|
def ATen_LgammaInplaceOp: ATen_RefTensorOp<"lgamma.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_LogOp: ATen_ImmutableTensorOp<"log", [NoSideEffect]> {
|
|
let summary = "log(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the natural logarithm of the elements
|
|
of :attr:`input`.
|
|
|
|
.. math::
|
|
y_{i} = \log_{e} (x_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(5)
|
|
>>> a
|
|
tensor([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190])
|
|
>>> torch.log(a)
|
|
tensor([ nan, nan, nan, nan, nan])
|
|
}];
|
|
}
|
|
|
|
def ATen_LogInplaceOp: ATen_RefTensorOp<"log.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_Log10Op: ATen_ImmutableTensorOp<"log10", [NoSideEffect]> {
|
|
let summary = "log10(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the logarithm to the base 10 of the elements
|
|
of :attr:`input`.
|
|
|
|
.. math::
|
|
y_{i} = \log_{10} (x_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.rand(5)
|
|
>>> a
|
|
tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
|
|
|
|
|
|
>>> torch.log10(a)
|
|
tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
|
|
}];
|
|
}
|
|
|
|
def ATen_Log10InplaceOp: ATen_RefTensorOp<"log10.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_Log1pOp: ATen_ImmutableTensorOp<"log1p", [NoSideEffect]> {
|
|
let summary = "log1p(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
|
|
|
|
.. math::
|
|
y_i = \log_{e} (x_i + 1)
|
|
|
|
.. note:: This function is more accurate than :func:`torch.log` for small
|
|
values of :attr:`input`
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(5)
|
|
>>> a
|
|
tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
|
|
>>> torch.log1p(a)
|
|
tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
|
|
}];
|
|
}
|
|
|
|
def ATen_Log1pInplaceOp: ATen_RefTensorOp<"log1p.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_Log2Op: ATen_ImmutableTensorOp<"log2", [NoSideEffect]> {
|
|
let summary = "log2(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the logarithm to the base 2 of the elements
|
|
of :attr:`input`.
|
|
|
|
.. math::
|
|
y_{i} = \log_{2} (x_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.rand(5)
|
|
>>> a
|
|
tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
|
|
|
|
|
|
>>> torch.log2(a)
|
|
tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
|
|
}];
|
|
}
|
|
|
|
def ATen_Log2InplaceOp: ATen_RefTensorOp<"log2.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_NegOp: ATen_ImmutableTensorOp<"neg", [NoSideEffect]> {
|
|
let summary = "neg(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the negative of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out} = -1 \times \text{input}
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(5)
|
|
>>> a
|
|
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
|
|
>>> torch.neg(a)
|
|
tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
|
|
}];
|
|
}
|
|
|
|
def ATen_NegInplaceOp: ATen_RefTensorOp<"neg.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_ReciprocalOp: ATen_ImmutableTensorOp<"reciprocal", [NoSideEffect]> {
|
|
let summary = "reciprocal(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the reciprocal of the elements of :attr:`input`
|
|
|
|
.. math::
|
|
\text{out}_{i} = \frac{1}{\text{input}_{i}}
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-0.4595, -2.1219, -1.4314, 0.7298])
|
|
>>> torch.reciprocal(a)
|
|
tensor([-2.1763, -0.4713, -0.6986, 1.3702])
|
|
}];
|
|
}
|
|
|
|
def ATen_ReciprocalInplaceOp: ATen_RefTensorOp<"reciprocal.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_RoundOp: ATen_ImmutableTensorOp<"round", [NoSideEffect]> {
|
|
let summary = "round(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with each of the elements of :attr:`input` rounded
|
|
to the closest integer.
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.9920, 0.6077, 0.9734, -1.0362])
|
|
>>> torch.round(a)
|
|
tensor([ 1., 1., 1., -1.])
|
|
}];
|
|
}
|
|
|
|
def ATen_RoundInplaceOp: ATen_RefTensorOp<"round.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_RsqrtOp: ATen_ImmutableTensorOp<"rsqrt", [NoSideEffect]> {
|
|
let summary = "rsqrt(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the reciprocal of the square-root of each of
|
|
the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-0.0370, 0.2970, 1.5420, -0.9105])
|
|
>>> torch.rsqrt(a)
|
|
tensor([ nan, 1.8351, 0.8053, nan])
|
|
}];
|
|
}
|
|
|
|
def ATen_RsqrtInplaceOp: ATen_RefTensorOp<"rsqrt.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_SigmoidOp: ATen_ImmutableTensorOp<"sigmoid", [NoSideEffect]> {
|
|
let summary = "sigmoid(input, *, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the sigmoid of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}}
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.9213, 1.0887, -0.8858, -1.7683])
|
|
>>> torch.sigmoid(a)
|
|
tensor([ 0.7153, 0.7481, 0.2920, 0.1458])
|
|
}];
|
|
}
|
|
|
|
def ATen_SigmoidInplaceOp: ATen_RefTensorOp<"sigmoid.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_SignOp: ATen_ImmutableTensorOp<"sign", [NoSideEffect]> {
|
|
let summary = "sign(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the signs of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
|
|
>>> a
|
|
tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
|
|
>>> torch.sign(a)
|
|
tensor([ 1., -1., 0., 1.])
|
|
}];
|
|
}
|
|
|
|
def ATen_SignInplaceOp: ATen_RefTensorOp<"sign.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_SinOp: ATen_ImmutableTensorOp<"sin", [NoSideEffect]> {
|
|
let summary = "sin(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the sine of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \sin(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-0.5461, 0.1347, -2.7266, -0.2746])
|
|
>>> torch.sin(a)
|
|
tensor([-0.5194, 0.1343, -0.4032, -0.2711])
|
|
}];
|
|
}
|
|
|
|
def ATen_SinInplaceOp: ATen_RefTensorOp<"sin.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_SinhOp: ATen_ImmutableTensorOp<"sinh", [NoSideEffect]> {
|
|
let summary = "sinh(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the hyperbolic sine of the elements of
|
|
:attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \sinh(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
|
|
>>> torch.sinh(a)
|
|
tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
|
|
}];
|
|
}
|
|
|
|
def ATen_SinhInplaceOp: ATen_RefTensorOp<"sinh.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_SqrtOp: ATen_ImmutableTensorOp<"sqrt", [NoSideEffect]> {
|
|
let summary = "sqrt(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the square-root of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \sqrt{\text{input}_{i}}
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
|
|
>>> torch.sqrt(a)
|
|
tensor([ nan, 1.0112, 0.2883, 0.6933])
|
|
}];
|
|
}
|
|
|
|
def ATen_SqrtInplaceOp: ATen_RefTensorOp<"sqrt.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_TanOp: ATen_ImmutableTensorOp<"tan", [NoSideEffect]> {
|
|
let summary = "tan(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the tangent of the elements of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \tan(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-1.2027, -1.7687, 0.4412, -1.3856])
|
|
>>> torch.tan(a)
|
|
tensor([-2.5930, 4.9859, 0.4722, -5.3366])
|
|
}];
|
|
}
|
|
|
|
def ATen_TanInplaceOp: ATen_RefTensorOp<"tan.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_TanhOp: ATen_ImmutableTensorOp<"tanh", [NoSideEffect]> {
|
|
let summary = "tanh(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the hyperbolic tangent of the elements
|
|
of :attr:`input`.
|
|
|
|
.. math::
|
|
\text{out}_{i} = \tanh(\text{input}_{i})
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
|
|
>>> torch.tanh(a)
|
|
tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
|
|
}];
|
|
}
|
|
|
|
def ATen_TanhInplaceOp: ATen_RefTensorOp<"tanh.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_TruncOp: ATen_ImmutableTensorOp<"trunc", [NoSideEffect]> {
|
|
let summary = "trunc(input, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the truncated integer values of
|
|
the elements of :attr:`input`.
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
|
|
>>> torch.trunc(a)
|
|
tensor([ 3., 0., -0., -0.])
|
|
}];
|
|
}
|
|
|
|
def ATen_TruncInplaceOp: ATen_RefTensorOp<"trunc.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_ReluOp: ATen_ImmutableTensorOp<"relu", [NoSideEffect]> {
|
|
let summary = "";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
}
|
|
|
|
def ATen_AddOp: ATen_ImmutableTensorOp<"add", [NoSideEffect]> {
|
|
let summary = "add(input, other, out=None)";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyScalar:$alpha
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Adds the scalar :attr:`other` to each element of the input :attr:`input`
|
|
and returns a new resulting tensor.
|
|
|
|
.. math::
|
|
\text{out} = \text{input} + \text{other}
|
|
|
|
If :attr:`input` is of type FloatTensor or DoubleTensor, :attr:`other` must be
|
|
a real number, otherwise it should be an integer.
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
value (Number): the number to be added to each element of :attr:`input`
|
|
|
|
Keyword arguments:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
|
|
>>> torch.add(a, 20)
|
|
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
|
|
|
|
.. function:: add(input, other, *, alpha=1, out=None)
|
|
|
|
Each element of the tensor :attr:`other` is multiplied by the scalar
|
|
:attr:`alpha` and added to each element of the tensor :attr:`input`.
|
|
The resulting tensor is returned.
|
|
|
|
The shapes of :attr:`input` and :attr:`other` must be
|
|
:ref:`broadcastable <broadcasting-semantics>`.
|
|
|
|
.. math::
|
|
\text{out} = \text{input} + \text{alpha} \times \text{other}
|
|
|
|
If :attr:`other` is of type FloatTensor or DoubleTensor, :attr:`alpha` must be
|
|
a real number, otherwise it should be an integer.
|
|
|
|
Args:
|
|
input (Tensor): the first input tensor
|
|
other (Tensor): the second input tensor
|
|
alpha (Number): the scalar multiplier for :attr:`other`
|
|
|
|
Keyword arguments:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([-0.9732, -0.3497, 0.6245, 0.4022])
|
|
>>> b = torch.randn(4, 1)
|
|
>>> b
|
|
tensor([[ 0.3743],
|
|
[-1.7724],
|
|
[-0.5811],
|
|
[-0.8017]])
|
|
>>> torch.add(a, b, alpha=10)
|
|
tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
|
|
[-18.6971, -18.0736, -17.0994, -17.3216],
|
|
[ -6.7845, -6.1610, -5.1868, -5.4090],
|
|
[ -8.9902, -8.3667, -7.3925, -7.6147]])
|
|
}];
|
|
}
|
|
|
|
def ATen_AddInplaceOp: ATen_RefTensorOp<"add.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyScalar:$alpha,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_Atan2Op: ATen_ImmutableTensorOp<"atan2", [NoSideEffect]> {
|
|
let summary = "atan2(input, other, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}`
|
|
with consideration of the quadrant. Returns a new tensor with the signed angles
|
|
in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})`
|
|
and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second
|
|
parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first
|
|
parameter, is the y-coordinate.)
|
|
|
|
The shapes of ``input`` and ``other`` must be
|
|
:ref:`broadcastable <broadcasting-semantics>`.
|
|
|
|
Args:
|
|
input (Tensor): the first input tensor
|
|
other (Tensor): the second input tensor
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4)
|
|
>>> a
|
|
tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
|
|
>>> torch.atan2(a, torch.randn(4))
|
|
tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
|
|
}];
|
|
}
|
|
|
|
def ATen_Atan2InplaceOp: ATen_RefTensorOp<"atan2.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_DivOp: ATen_ImmutableTensorOp<"div", [NoSideEffect]> {
|
|
let summary = "div(input, other, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Divides each element of the input ``input`` with the scalar ``other`` and
|
|
returns a new resulting tensor.
|
|
|
|
.. warning::
|
|
Integer division using div is no longer supported, and in a future release
|
|
div will perform true division as in Python 3. Use :func:`torch.true_divide`
|
|
or :func:`torch.floor_divide` (// in Python), instead.
|
|
|
|
.. math::
|
|
\text{out}_i = \frac{\text{input}_i}{\text{other}}
|
|
|
|
If the :class:`torch.dtype` of ``input`` and ``other`` differ, the
|
|
:class:`torch.dtype` of the result tensor is determined following rules
|
|
described in the type promotion :ref:`documentation <type-promotion-doc>`. If
|
|
``out`` is specified, the result must be :ref:`castable <type-promotion-doc>`
|
|
to the :class:`torch.dtype` of the specified output tensor. Integral division
|
|
by zero leads to undefined behavior.
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
other (Number): the number to be divided to each element of ``input``
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(5)
|
|
>>> a
|
|
tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
|
|
>>> torch.div(a, 0.5)
|
|
tensor([ 0.7620, 2.5548, -0.5944, -0.7439, 0.9275])
|
|
|
|
.. function:: div(input, other, out=None) -> Tensor
|
|
|
|
Each element of the tensor ``input`` is divided by each element of the tensor
|
|
``other``. The resulting tensor is returned.
|
|
|
|
.. math::
|
|
\text{out}_i = \frac{\text{input}_i}{\text{other}_i}
|
|
|
|
The shapes of ``input`` and ``other`` must be :ref:`broadcastable
|
|
<broadcasting-semantics>`. If the :class:`torch.dtype` of ``input`` and
|
|
``other`` differ, the :class:`torch.dtype` of the result tensor is determined
|
|
following rules described in the type promotion :ref:`documentation
|
|
<type-promotion-doc>`. If ``out`` is specified, the result must be
|
|
:ref:`castable <type-promotion-doc>` to the :class:`torch.dtype` of the
|
|
specified output tensor. Integral division by zero leads to undefined behavior.
|
|
|
|
Args:
|
|
input (Tensor): the numerator tensor
|
|
other (Tensor): the denominator tensor
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4, 4)
|
|
>>> a
|
|
tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
|
|
[ 0.1815, -1.0111, 0.9805, -1.5923],
|
|
[ 0.1062, 1.4581, 0.7759, -1.2344],
|
|
[-0.1830, -0.0313, 1.1908, -1.4757]])
|
|
>>> b = torch.randn(4)
|
|
>>> b
|
|
tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
|
|
>>> torch.div(a, b)
|
|
tensor([[-0.4620, -6.6051, 0.5676, 1.2637],
|
|
[ 0.2260, -3.4507, -1.2086, 6.8988],
|
|
[ 0.1322, 4.9764, -0.9564, 5.3480],
|
|
[-0.2278, -0.1068, -1.4678, 6.3936]])
|
|
}];
|
|
}
|
|
|
|
def ATen_DivInplaceOp: ATen_RefTensorOp<"div.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_FloorDivideOp: ATen_ImmutableTensorOp<"floor_divide", [NoSideEffect]> {
|
|
let summary = "floor_divide(input, other, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Return the division of the inputs rounded down to the nearest integer. See :func:`torch.div`
|
|
for type promotion and broadcasting rules.
|
|
|
|
.. math::
|
|
\text{{out}}_i = \left\lfloor \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right\rfloor
|
|
|
|
|
|
Args:
|
|
input (Tensor): the numerator tensor
|
|
other (Tensor or Scalar): the denominator
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.tensor([4.0, 3.0])
|
|
>>> b = torch.tensor([2.0, 2.0])
|
|
>>> torch.floor_divide(a, b)
|
|
tensor([2.0, 1.0])
|
|
>>> torch.floor_divide(a, 1.4)
|
|
tensor([2.0, 2.0])
|
|
}];
|
|
}
|
|
|
|
def ATen_FloorDivideInplaceOp: ATen_RefTensorOp<"floor_divide.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_MulOp: ATen_ImmutableTensorOp<"mul", [NoSideEffect]> {
|
|
let summary = "mul(input, other, out=None)";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Multiplies each element of the input :attr:`input` with the scalar
|
|
:attr:`other` and returns a new resulting tensor.
|
|
|
|
.. math::
|
|
\text{out}_i = \text{other} \times \text{input}_i
|
|
|
|
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`other`
|
|
should be a real number, otherwise it should be an integer
|
|
|
|
Args:
|
|
{input}
|
|
value (Number): the number to be multiplied to each element of :attr:`input`
|
|
{out}
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(3)
|
|
>>> a
|
|
tensor([ 0.2015, -0.4255, 2.6087])
|
|
>>> torch.mul(a, 100)
|
|
tensor([ 20.1494, -42.5491, 260.8663])
|
|
|
|
.. function:: mul(input, other, out=None)
|
|
|
|
Each element of the tensor :attr:`input` is multiplied by the corresponding
|
|
element of the Tensor :attr:`other`. The resulting tensor is returned.
|
|
|
|
The shapes of :attr:`input` and :attr:`other` must be
|
|
:ref:`broadcastable <broadcasting-semantics>`.
|
|
|
|
.. math::
|
|
\text{out}_i = \text{input}_i \times \text{other}_i
|
|
|
|
Args:
|
|
input (Tensor): the first multiplicand tensor
|
|
other (Tensor): the second multiplicand tensor
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4, 1)
|
|
>>> a
|
|
tensor([[ 1.1207],
|
|
[-0.3137],
|
|
[ 0.0700],
|
|
[ 0.8378]])
|
|
>>> b = torch.randn(1, 4)
|
|
>>> b
|
|
tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
|
|
>>> torch.mul(a, b)
|
|
tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
|
|
[-0.1614, -0.0382, 0.1645, -0.7021],
|
|
[ 0.0360, 0.0085, -0.0367, 0.1567],
|
|
[ 0.4312, 0.1019, -0.4394, 1.8753]])
|
|
}];
|
|
}
|
|
|
|
def ATen_MulInplaceOp: ATen_RefTensorOp<"mul.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_RemainderOp: ATen_ImmutableTensorOp<"remainder", [NoSideEffect]> {
|
|
let summary = "remainder(input, other, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the element-wise remainder of division.
|
|
|
|
The dividend and divisor may contain both for integer and floating point
|
|
numbers. The remainder has the same sign as the divisor :attr:`other`.
|
|
|
|
When :attr:`other` is a tensor, the shapes of :attr:`input` and
|
|
:attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
|
|
|
|
Args:
|
|
input (Tensor): the dividend
|
|
other (Tensor or float): the divisor that may be either a number or a
|
|
Tensor of the same shape as the dividend
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
|
|
tensor([ 1., 0., 1., 1., 0., 1.])
|
|
>>> torch.remainder(torch.tensor([1., 2, 3, 4, 5]), 1.5)
|
|
tensor([ 1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
|
|
|
|
.. seealso::
|
|
|
|
:func:`torch.fmod`, which computes the element-wise remainder of
|
|
division equivalently to the C library function ``fmod()``.
|
|
}];
|
|
}
|
|
|
|
def ATen_RemainderInplaceOp: ATen_RefTensorOp<"remainder.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_TrueDivideOp: ATen_ImmutableTensorOp<"true_divide", [NoSideEffect]> {
|
|
let summary = "true_divide(dividend, divisor, *, out) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$dividend,
|
|
ATen_AnyTensor:$divisor
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Performs "true division" that always computes the division
|
|
in floating point. Analogous to division in Python 3 and equivalent to
|
|
:func:`torch.div` except when both inputs have bool or integer scalar types,
|
|
in which case they are cast to the default (floating) scalar type before the division.
|
|
|
|
.. math::
|
|
\text{out}_i = \frac{\text{dividend}_i}{\text{divisor}}
|
|
|
|
Args:
|
|
dividend (Tensor): the dividend
|
|
divisor (Tensor or Scalar): the divisor
|
|
|
|
Keyword args:
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> dividend = torch.tensor([5, 3], dtype=torch.int)
|
|
>>> divisor = torch.tensor([3, 2], dtype=torch.int)
|
|
>>> torch.true_divide(dividend, divisor)
|
|
tensor([1.6667, 1.5000])
|
|
>>> torch.true_divide(dividend, 2)
|
|
tensor([2.5000, 1.5000])
|
|
}];
|
|
}
|
|
|
|
def ATen_TrueDivideInplaceOp: ATen_RefTensorOp<"true_divide.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$dividend,
|
|
ATen_AnyTensor:$divisor,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_CumsumOp: ATen_ImmutableTensorOp<"cumsum", [NoSideEffect]> {
|
|
let summary = "cumsum(input, dim, out=None, dtype=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyScalar:$dim
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns the cumulative sum of elements of :attr:`input` in the dimension
|
|
:attr:`dim`.
|
|
|
|
For example, if :attr:`input` is a vector of size N, the result will also be
|
|
a vector of size N, with elements.
|
|
|
|
.. math::
|
|
y_i = x_1 + x_2 + x_3 + \dots + x_i
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
dim (int): the dimension to do the operation over
|
|
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
|
|
If specified, the input tensor is casted to :attr:`dtype` before the operation
|
|
is performed. This is useful for preventing data type overflows. Default: None.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(10)
|
|
>>> a
|
|
tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595,
|
|
0.1850, -1.1571, -0.4243])
|
|
>>> torch.cumsum(a, dim=0)
|
|
tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058,
|
|
-1.8209, -2.9780, -3.4022])
|
|
}];
|
|
}
|
|
|
|
def ATen_CumsumInplaceOp: ATen_RefTensorOp<"cumsum.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyScalar:$dim,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_MeanOp: ATen_ImmutableTensorOp<"mean", [NoSideEffect]> {
|
|
let summary = "mean(input) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$dim,
|
|
ATen_BoolScalar:$keep_dim
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns the mean value of all elements in the :attr:`input` tensor.
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(1, 3)
|
|
>>> a
|
|
tensor([[ 0.2294, -0.5481, 1.3288]])
|
|
>>> torch.mean(a)
|
|
tensor(0.3367)
|
|
|
|
.. function:: mean(input, dim, keepdim=False, out=None) -> Tensor
|
|
|
|
Returns the mean value of each row of the :attr:`input` tensor in the given
|
|
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
|
|
reduce over all of them.
|
|
|
|
|
|
If :attr:`keepdim` is ``True``, the output tensor is of the same size
|
|
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
|
|
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
|
|
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
|
|
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
dim (int or tuple of ints): the dimension or dimensions to reduce.
|
|
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4, 4)
|
|
>>> a
|
|
tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
|
|
[-0.9644, 1.0131, -0.6549, -1.4279],
|
|
[-0.2951, -1.3350, -0.7694, 0.5600],
|
|
[ 1.0842, -0.9580, 0.3623, 0.2343]])
|
|
>>> torch.mean(a, 1)
|
|
tensor([-0.0163, -0.5085, -0.4599, 0.1807])
|
|
>>> torch.mean(a, 1, True)
|
|
tensor([[-0.0163],
|
|
[-0.5085],
|
|
[-0.4599],
|
|
[ 0.1807]])
|
|
}];
|
|
}
|
|
|
|
def ATen_MeanInplaceOp: ATen_RefTensorOp<"mean.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$dim,
|
|
ATen_BoolScalar:$keep_dim,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_SumOp: ATen_ImmutableTensorOp<"sum", [NoSideEffect]> {
|
|
let summary = "sum(input, dtype=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$dim,
|
|
ATen_BoolScalar:$keep_dim
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns the sum of all elements in the :attr:`input` tensor.
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
|
|
If specified, the input tensor is casted to :attr:`dtype` before the operation
|
|
is performed. This is useful for preventing data type overflows. Default: None.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(1, 3)
|
|
>>> a
|
|
tensor([[ 0.1133, -0.9567, 0.2958]])
|
|
>>> torch.sum(a)
|
|
tensor(-0.5475)
|
|
|
|
.. function:: sum(input, dim, keepdim=False, dtype=None) -> Tensor
|
|
|
|
Returns the sum of each row of the :attr:`input` tensor in the given
|
|
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
|
|
reduce over all of them.
|
|
|
|
|
|
If :attr:`keepdim` is ``True``, the output tensor is of the same size
|
|
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
|
|
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
|
|
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
|
|
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
dim (int or tuple of ints): the dimension or dimensions to reduce.
|
|
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
|
|
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
|
|
If specified, the input tensor is casted to :attr:`dtype` before the operation
|
|
is performed. This is useful for preventing data type overflows. Default: None.
|
|
|
|
Example::
|
|
|
|
>>> a = torch.randn(4, 4)
|
|
>>> a
|
|
tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
|
|
[-0.2993, 0.9138, 0.9337, -1.6864],
|
|
[ 0.1132, 0.7892, -0.1003, 0.5688],
|
|
[ 0.3637, -0.9906, -0.4752, -1.5197]])
|
|
>>> torch.sum(a, 1)
|
|
tensor([-0.4598, -0.1381, 1.3708, -2.6217])
|
|
>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
|
|
>>> torch.sum(b, (2, 1))
|
|
tensor([ 435., 1335., 2235., 3135.])
|
|
}];
|
|
}
|
|
|
|
def ATen_SumInplaceOp: ATen_RefTensorOp<"sum.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$dim,
|
|
ATen_BoolScalar:$keep_dim,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_GatherOp: ATen_ImmutableTensorOp<"gather", [NoSideEffect]> {
|
|
let summary = "gather(input, dim, index, out=None, sparse_grad=False) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntScalar:$dim,
|
|
ATen_AnyTensor:$index,
|
|
ATen_BoolScalar:$sparse_grad
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Gathers values along an axis specified by `dim`.
|
|
|
|
For a 3-D tensor the output is specified by::
|
|
|
|
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
|
|
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
|
|
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
|
|
|
|
If :attr:`input` is an n-dimensional tensor with size
|
|
:math:`(x_0, x_1..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})`
|
|
and ``dim = i``, then :attr:`index` must be an :math:`n`-dimensional tensor with
|
|
size :math:`(x_0, x_1, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})` where :math:`y \geq 1`
|
|
and :attr:`out` will have the same size as :attr:`index`.
|
|
|
|
Args:
|
|
input (Tensor): the source tensor
|
|
dim (int): the axis along which to index
|
|
index (LongTensor): the indices of elements to gather
|
|
out (Tensor, optional): the destination tensor
|
|
sparse_grad(bool,optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
|
|
|
|
Example::
|
|
|
|
>>> t = torch.tensor([[1,2],[3,4]])
|
|
>>> torch.gather(t, 1, torch.tensor([[0,0],[1,0]]))
|
|
tensor([[ 1, 1],
|
|
[ 4, 3]])
|
|
}];
|
|
}
|
|
|
|
def ATen_GatherInplaceOp: ATen_RefTensorOp<"gather.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntScalar:$dim,
|
|
ATen_AnyTensor:$index,
|
|
ATen_BoolScalar:$sparse_grad,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_NumpyTOp: ATen_ImmutableTensorOp<"numpy_T", [NoSideEffect]> {
|
|
let summary = "Is this Tensor with its dimensions reversed.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
If ``n`` is the number of dimensions in ``x``,
|
|
``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
|
|
}];
|
|
}
|
|
|
|
def ATen_AddmmOp: ATen_ImmutableTensorOp<"addmm", [NoSideEffect]> {
|
|
let summary = "addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$mat1,
|
|
ATen_AnyTensor:$mat2,
|
|
ATen_AnyScalar:$beta,
|
|
ATen_AnyScalar:$alpha
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
|
|
The matrix :attr:`input` is added to the final result.
|
|
|
|
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
|
|
:math:`(m \times p)` tensor, then :attr:`input` must be
|
|
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
|
|
and :attr:`out` will be a :math:`(n \times p)` tensor.
|
|
|
|
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
|
|
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
|
|
|
|
.. math::
|
|
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
|
|
|
|
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
|
|
:attr:`alpha` must be real numbers, otherwise they should be integers.
|
|
|
|
Args:
|
|
input (Tensor): matrix to be added
|
|
mat1 (Tensor): the first matrix to be multiplied
|
|
mat2 (Tensor): the second matrix to be multiplied
|
|
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
|
|
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> M = torch.randn(2, 3)
|
|
>>> mat1 = torch.randn(2, 3)
|
|
>>> mat2 = torch.randn(3, 3)
|
|
>>> torch.addmm(M, mat1, mat2)
|
|
tensor([[-4.8716, 1.4671, -1.3746],
|
|
[ 0.7573, -3.9555, -2.8681]])
|
|
}];
|
|
}
|
|
|
|
def ATen_AddmmInplaceOp: ATen_RefTensorOp<"addmm.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$mat1,
|
|
ATen_AnyTensor:$mat2,
|
|
ATen_AnyScalar:$beta,
|
|
ATen_AnyScalar:$alpha,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_DotOp: ATen_ImmutableTensorOp<"dot", [NoSideEffect]> {
|
|
let summary = "dot(input, tensor) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$tensor
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Computes the dot product (inner product) of two tensors.
|
|
|
|
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
|
|
|
|
Example::
|
|
|
|
>>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
|
|
tensor(7)
|
|
}];
|
|
}
|
|
|
|
def ATen_MatmulOp: ATen_ImmutableTensorOp<"matmul", [NoSideEffect]> {
|
|
let summary = "matmul(input, other, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Matrix product of two tensors.
|
|
|
|
The behavior depends on the dimensionality of the tensors as follows:
|
|
|
|
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
|
|
- If both arguments are 2-dimensional, the matrix-matrix product is returned.
|
|
- If the first argument is 1-dimensional and the second argument is 2-dimensional,
|
|
a 1 is prepended to its dimension for the purpose of the matrix multiply.
|
|
After the matrix multiply, the prepended dimension is removed.
|
|
- If the first argument is 2-dimensional and the second argument is 1-dimensional,
|
|
the matrix-vector product is returned.
|
|
- If both arguments are at least 1-dimensional and at least one argument is
|
|
N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
|
|
argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
|
|
batched matrix multiply and removed after. If the second argument is 1-dimensional, a
|
|
1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
|
|
The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
|
|
must be broadcastable). For example, if :attr:`input` is a
|
|
:math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
|
|
tensor, :attr:`out` will be an :math:`(j \times k \times n \times p)` tensor.
|
|
|
|
.. note::
|
|
|
|
The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
|
|
|
|
Arguments:
|
|
input (Tensor): the first tensor to be multiplied
|
|
other (Tensor): the second tensor to be multiplied
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> # vector x vector
|
|
>>> tensor1 = torch.randn(3)
|
|
>>> tensor2 = torch.randn(3)
|
|
>>> torch.matmul(tensor1, tensor2).size()
|
|
torch.Size([])
|
|
>>> # matrix x vector
|
|
>>> tensor1 = torch.randn(3, 4)
|
|
>>> tensor2 = torch.randn(4)
|
|
>>> torch.matmul(tensor1, tensor2).size()
|
|
torch.Size([3])
|
|
>>> # batched matrix x broadcasted vector
|
|
>>> tensor1 = torch.randn(10, 3, 4)
|
|
>>> tensor2 = torch.randn(4)
|
|
>>> torch.matmul(tensor1, tensor2).size()
|
|
torch.Size([10, 3])
|
|
>>> # batched matrix x batched matrix
|
|
>>> tensor1 = torch.randn(10, 3, 4)
|
|
>>> tensor2 = torch.randn(10, 4, 5)
|
|
>>> torch.matmul(tensor1, tensor2).size()
|
|
torch.Size([10, 3, 5])
|
|
>>> # batched matrix x broadcasted matrix
|
|
>>> tensor1 = torch.randn(10, 3, 4)
|
|
>>> tensor2 = torch.randn(4, 5)
|
|
>>> torch.matmul(tensor1, tensor2).size()
|
|
torch.Size([10, 3, 5])
|
|
}];
|
|
}
|
|
|
|
def ATen_MatmulInplaceOp: ATen_RefTensorOp<"matmul.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$other,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_MmOp: ATen_ImmutableTensorOp<"mm", [NoSideEffect]> {
|
|
let summary = "mm(input, mat2, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$mat2
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
|
|
|
|
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
|
|
:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
|
|
|
|
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
|
|
For broadcasting matrix products, see :func:`torch.matmul`.
|
|
|
|
Args:
|
|
input (Tensor): the first matrix to be multiplied
|
|
mat2 (Tensor): the second matrix to be multiplied
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> mat1 = torch.randn(2, 3)
|
|
>>> mat2 = torch.randn(3, 3)
|
|
>>> torch.mm(mat1, mat2)
|
|
tensor([[ 0.4851, 0.5037, -0.3633],
|
|
[-0.0760, -3.6705, 2.4784]])
|
|
}];
|
|
}
|
|
|
|
def ATen_MmInplaceOp: ATen_RefTensorOp<"mm.inplace", []> {
|
|
let summary = "See non-inplace op variant.";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyTensor:$mat2,
|
|
ATen_AnyRefTensor:$out
|
|
);
|
|
let results = (outs);
|
|
}
|
|
|
|
def ATen_HardtanhOp: ATen_ImmutableTensorOp<"hardtanh", [NoSideEffect]> {
|
|
let summary = " hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_FloatScalar:$min_val,
|
|
ATen_FloatScalar:$max_val
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more
|
|
details.
|
|
|
|
}];
|
|
}
|
|
|
|
def ATen_AvgPool1dOp: ATen_ImmutableTensorOp<"avg_pool1d", [NoSideEffect]> {
|
|
let summary = "avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$kernel_size,
|
|
ATen_IntList:$stride,
|
|
ATen_IntList:$padding,
|
|
ATen_BoolScalar:$ceil_mode,
|
|
ATen_BoolScalar:$count_include_pad
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Applies a 1D average pooling over an input signal composed of several
|
|
input planes.
|
|
|
|
See :class:`~torch.nn.AvgPool1d` for details and output shape.
|
|
|
|
Args:
|
|
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
|
|
kernel_size: the size of the window. Can be a single number or a
|
|
tuple `(kW,)`
|
|
stride: the stride of the window. Can be a single number or a tuple
|
|
`(sW,)`. Default: :attr:`kernel_size`
|
|
padding: implicit zero paddings on both sides of the input. Can be a
|
|
single number or a tuple `(padW,)`. Default: 0
|
|
ceil_mode: when True, will use `ceil` instead of `floor` to compute the
|
|
output shape. Default: ``False``
|
|
count_include_pad: when True, will include the zero-padding in the
|
|
averaging calculation. Default: ``True``
|
|
|
|
Examples::
|
|
|
|
>>> # pool of square window of size=3, stride=2
|
|
>>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32)
|
|
>>> F.avg_pool1d(input, kernel_size=3, stride=2)
|
|
tensor([[[ 2., 4., 6.]]])
|
|
}];
|
|
}
|
|
|
|
def ATen_MaxPool1dOp: ATen_ImmutableTensorOp<"max_pool1d", [NoSideEffect]> {
|
|
let summary = "Applies a 1D max pooling over an input signal composed of several input";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$kernel_size,
|
|
ATen_IntList:$stride,
|
|
ATen_IntList:$padding,
|
|
ATen_IntList:$dilation,
|
|
ATen_BoolScalar:$ceil_mode
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
|
|
See :class:`~torch.nn.MaxPool1d` for details.
|
|
|
|
}];
|
|
}
|
|
|
|
def ATen_AsStridedOp: ATen_ImmutableTensorOp<"as_strided", [NoSideEffect]> {
|
|
let summary = "as_strided(input, size, stride, storage_offset=0) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$size,
|
|
ATen_IntList:$stride,
|
|
ATen_IntScalar:$storage_offset
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Create a view of an existing `torch.Tensor` :attr:`input` with specified
|
|
:attr:`size`, :attr:`stride` and :attr:`storage_offset`.
|
|
|
|
.. warning::
|
|
More than one element of a created tensor may refer to a single memory
|
|
location. As a result, in-place operations (especially ones that are
|
|
vectorized) may result in incorrect behavior. If you need to write to
|
|
the tensors, please clone them first.
|
|
|
|
Many PyTorch functions, which return a view of a tensor, are internally
|
|
implemented with this function. Those functions, like
|
|
:meth:`torch.Tensor.expand`, are easier to read and are therefore more
|
|
advisable to use.
|
|
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
size (tuple or ints): the shape of the output tensor
|
|
stride (tuple or ints): the stride of the output tensor
|
|
storage_offset (int, optional): the offset in the underlying storage of the output tensor
|
|
|
|
Example::
|
|
|
|
>>> x = torch.randn(3, 3)
|
|
>>> x
|
|
tensor([[ 0.9039, 0.6291, 1.0795],
|
|
[ 0.1586, 2.1939, -0.4900],
|
|
[-0.1909, -0.7503, 1.9355]])
|
|
>>> t = torch.as_strided(x, (2, 2), (1, 2))
|
|
>>> t
|
|
tensor([[0.9039, 1.0795],
|
|
[0.6291, 0.1586]])
|
|
>>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
|
|
tensor([[0.6291, 0.1586],
|
|
[1.0795, 2.1939]])
|
|
|
|
MLIR Specific Notes
|
|
-------------------
|
|
In PyTorch proper, this op creates a view that may internally alias. And
|
|
have explicit warnings about avoiding inplace updates on such a
|
|
view (without first cloning). For the moment, this op is formulated with
|
|
value semantics that imply a copy instead of a view, and it is expected
|
|
that any sharing can be recovered later by the compiler. The warning
|
|
about not in-place updating of such a result should be treated as UB
|
|
when compiled.
|
|
}];
|
|
}
|
|
|
|
def ATen_ExpandOp: ATen_ImmutableTensorOp<"expand", [NoSideEffect]> {
|
|
let summary = "expand(*sizes) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_AnyScalar:$sizes,
|
|
ATen_BoolScalar:$implicit
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
|
|
to a larger size.
|
|
|
|
Passing -1 as the size for a dimension means not changing the size of
|
|
that dimension.
|
|
|
|
Tensor can be also expanded to a larger number of dimensions, and the
|
|
new ones will be appended at the front. For the new dimensions, the
|
|
size cannot be set to -1.
|
|
|
|
Expanding a tensor does not allocate new memory, but only creates a
|
|
new view on the existing tensor where a dimension of size one is
|
|
expanded to a larger size by setting the ``stride`` to 0. Any dimension
|
|
of size 1 can be expanded to an arbitrary value without allocating new
|
|
memory.
|
|
|
|
Args:
|
|
*sizes (torch.Size or int...): the desired expanded size
|
|
|
|
.. warning::
|
|
|
|
More than one element of an expanded tensor may refer to a single
|
|
memory location. As a result, in-place operations (especially ones that
|
|
are vectorized) may result in incorrect behavior. If you need to write
|
|
to the tensors, please clone them first.
|
|
|
|
Example::
|
|
|
|
>>> x = torch.tensor([[1], [2], [3]])
|
|
>>> x.size()
|
|
torch.Size([3, 1])
|
|
>>> x.expand(3, 4)
|
|
tensor([[ 1, 1, 1, 1],
|
|
[ 2, 2, 2, 2],
|
|
[ 3, 3, 3, 3]])
|
|
>>> x.expand(-1, 4) # -1 means not changing the size of that dimension
|
|
tensor([[ 1, 1, 1, 1],
|
|
[ 2, 2, 2, 2],
|
|
[ 3, 3, 3, 3]])
|
|
|
|
MLIR Specific Notes
|
|
-------------------
|
|
See notes for the 'as_strided' op.
|
|
}];
|
|
}
|
|
|
|
def ATen_SqueezeOp: ATen_ImmutableTensorOp<"squeeze", [NoSideEffect]> {
|
|
let summary = "squeeze(input, dim=None, out=None) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntScalar:$dim
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
|
|
|
|
For example, if `input` is of shape:
|
|
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor
|
|
will be of shape: :math:`(A \times B \times C \times D)`.
|
|
|
|
When :attr:`dim` is given, a squeeze operation is done only in the given
|
|
dimension. If `input` is of shape: :math:`(A \times 1 \times B)`,
|
|
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
|
|
will squeeze the tensor to the shape :math:`(A \times B)`.
|
|
|
|
.. note:: The returned tensor shares the storage with the input tensor,
|
|
so changing the contents of one will change the contents of the other.
|
|
|
|
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
|
|
will also remove the batch dimension, which can lead to unexpected
|
|
errors.
|
|
|
|
Args:
|
|
input (Tensor): the input tensor.
|
|
dim (int, optional): if given, the input will be squeezed only in
|
|
this dimension
|
|
out (Tensor, optional): the output tensor.
|
|
|
|
Example::
|
|
|
|
>>> x = torch.zeros(2, 1, 2, 1, 2)
|
|
>>> x.size()
|
|
torch.Size([2, 1, 2, 1, 2])
|
|
>>> y = torch.squeeze(x)
|
|
>>> y.size()
|
|
torch.Size([2, 2, 2])
|
|
>>> y = torch.squeeze(x, 0)
|
|
>>> y.size()
|
|
torch.Size([2, 1, 2, 1, 2])
|
|
>>> y = torch.squeeze(x, 1)
|
|
>>> y.size()
|
|
torch.Size([2, 2, 1, 2])
|
|
|
|
MLIR Specific Notes
|
|
-------------------
|
|
See notes for the 'as_strided' op.
|
|
}];
|
|
}
|
|
|
|
def ATen_ViewOp: ATen_ImmutableTensorOp<"view", [NoSideEffect]> {
|
|
let summary = "view(*shape) -> Tensor";
|
|
let arguments = (ins
|
|
ATen_AnyTensor:$input,
|
|
ATen_IntList:$size
|
|
);
|
|
let results = (outs
|
|
ATen_AnyTensor:$result
|
|
);
|
|
let description = [{
|
|
Returns a new tensor with the same data as the :attr:`self` tensor but of a
|
|
different :attr:`shape`.
|
|
|
|
The returned tensor shares the same data and must have the same number
|
|
of elements, but may have a different size. For a tensor to be viewed, the new
|
|
view size must be compatible with its original size and stride, i.e., each new
|
|
view dimension must either be a subspace of an original dimension, or only span
|
|
across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
|
|
contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
|
|
|
|
.. math::
|
|
|
|
\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
|
|
|
|
Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
|
|
without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
|
|
:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
|
|
returns a view if the shapes are compatible, and copies (equivalent to calling
|
|
:meth:`contiguous`) otherwise.
|
|
|
|
Args:
|
|
shape (torch.Size or int...): the desired size
|
|
|
|
Example::
|
|
|
|
>>> x = torch.randn(4, 4)
|
|
>>> x.size()
|
|
torch.Size([4, 4])
|
|
>>> y = x.view(16)
|
|
>>> y.size()
|
|
torch.Size([16])
|
|
>>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
|
|
>>> z.size()
|
|
torch.Size([2, 8])
|
|
|
|
>>> a = torch.randn(1, 2, 3, 4)
|
|
>>> a.size()
|
|
torch.Size([1, 2, 3, 4])
|
|
>>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
|
|
>>> b.size()
|
|
torch.Size([1, 3, 2, 4])
|
|
>>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
|
|
>>> c.size()
|
|
torch.Size([1, 3, 2, 4])
|
|
>>> torch.equal(b, c)
|
|
False
|
|
|
|
|
|
MLIR Specific Notes
|
|
-------------------
|
|
See notes for the 'as_strided' op.
|
|
}];
|
|
}
|
|
|