From 9618c2dbf76d6a4a06cdd17ba80e007154288ecc Mon Sep 17 00:00:00 2001 From: Stella Laurenzo Date: Thu, 22 Oct 2020 14:13:26 -0700 Subject: [PATCH] NFC: Re-organize ATen directory structure and fix warnings. * Still some more work to do on the Transforms tree to bring it in line with the others (will do that as I add things). --- .../pytorch/csrc/init_python_bindings.cpp | 8 +- include/npcomp/Dialect/ATen/CMakeLists.txt | 19 +- .../Dialect/ATen/GeneratedATenBuiltinOps.td | 2501 ----------------- .../Dialect/ATen/{ => IR}/ATenDialect.h | 16 +- include/npcomp/Dialect/ATen/IR/ATenDialect.td | 40 + .../Dialect/ATen/{ => IR}/ATenOpInterface.td | 6 +- .../Dialect/ATen/{ => IR}/ATenOpInterfaces.h | 8 +- .../ATen/{ => IR}/ATenOpStatisticsUtils.h | 16 +- .../Dialect/ATen/{ATen.td => IR/ATenOps.td} | 38 +- include/npcomp/Dialect/ATen/IR/CMakeLists.txt | 19 + .../{ATenOps.td => IR/GeneratedATenOps.td} | 6 +- .../ATen/{ => Transforms}/ATenLayerNamePass.h | 0 .../ATen/{ => Transforms}/ATenLoweringPass.h | 0 .../ATen/{ => Transforms}/ATenOpReport.h | 2 + .../ATen/{ => Transforms}/ATenPasses.h | 8 +- .../Dialect/ATen/{ => Transforms}/ATenToStd.h | 0 .../ATen/{ => Transforms}/ATenToStd.td | 12 +- .../Dialect/ATen/Transforms/CMakeLists.txt | 3 + .../ATen/{ => Transforms}/LivenessReport.h | 3 + .../{ => Transforms}/ReturnEliminationPass.h | 0 lib/Dialect/ATen/CMakeLists.txt | 27 +- lib/Dialect/ATen/{ => IR}/ATenDialect.cpp | 10 +- .../ATen/{ => IR}/ATenDialectOpStats.cpp | 4 +- lib/Dialect/ATen/IR/CMakeLists.txt | 17 + .../{ => Transforms}/ATenLayerNamePass.cpp | 4 +- .../{ => Transforms}/ATenLoweringPass.cpp | 48 +- .../ATen/{ => Transforms}/ATenOpReport.cpp | 8 +- .../ATen/{ => Transforms}/ATenPasses.cpp | 2 +- .../ATen/{ => Transforms}/ATenToStd.cpp | 7 +- lib/Dialect/ATen/Transforms/CMakeLists.txt | 12 + .../ATen/{ => Transforms}/LivenessReport.cpp | 31 +- .../ReturnEliminationPass.cpp | 9 +- lib/InitAll.cpp | 4 +- .../LowerStructuralToMemref.cpp | 2 - 34 files changed, 166 insertions(+), 2724 deletions(-) delete mode 100644 include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td rename include/npcomp/Dialect/ATen/{ => IR}/ATenDialect.h (87%) create mode 100644 include/npcomp/Dialect/ATen/IR/ATenDialect.td rename include/npcomp/Dialect/ATen/{ => IR}/ATenOpInterface.td (94%) rename include/npcomp/Dialect/ATen/{ => IR}/ATenOpInterfaces.h (70%) rename include/npcomp/Dialect/ATen/{ => IR}/ATenOpStatisticsUtils.h (95%) rename include/npcomp/Dialect/ATen/{ATen.td => IR/ATenOps.td} (77%) create mode 100644 include/npcomp/Dialect/ATen/IR/CMakeLists.txt rename include/npcomp/Dialect/ATen/{ATenOps.td => IR/GeneratedATenOps.td} (99%) rename include/npcomp/Dialect/ATen/{ => Transforms}/ATenLayerNamePass.h (100%) rename include/npcomp/Dialect/ATen/{ => Transforms}/ATenLoweringPass.h (100%) rename include/npcomp/Dialect/ATen/{ => Transforms}/ATenOpReport.h (96%) rename include/npcomp/Dialect/ATen/{ => Transforms}/ATenPasses.h (74%) rename include/npcomp/Dialect/ATen/{ => Transforms}/ATenToStd.h (100%) rename include/npcomp/Dialect/ATen/{ => Transforms}/ATenToStd.td (87%) create mode 100644 include/npcomp/Dialect/ATen/Transforms/CMakeLists.txt rename include/npcomp/Dialect/ATen/{ => Transforms}/LivenessReport.h (94%) rename include/npcomp/Dialect/ATen/{ => Transforms}/ReturnEliminationPass.h (100%) rename lib/Dialect/ATen/{ => IR}/ATenDialect.cpp (91%) rename lib/Dialect/ATen/{ => IR}/ATenDialectOpStats.cpp (99%) create mode 100644 lib/Dialect/ATen/IR/CMakeLists.txt rename lib/Dialect/ATen/{ => Transforms}/ATenLayerNamePass.cpp (96%) rename lib/Dialect/ATen/{ => Transforms}/ATenLoweringPass.cpp (95%) rename lib/Dialect/ATen/{ => Transforms}/ATenOpReport.cpp (95%) rename lib/Dialect/ATen/{ => Transforms}/ATenPasses.cpp (93%) rename lib/Dialect/ATen/{ => Transforms}/ATenToStd.cpp (81%) create mode 100644 lib/Dialect/ATen/Transforms/CMakeLists.txt rename lib/Dialect/ATen/{ => Transforms}/LivenessReport.cpp (91%) rename lib/Dialect/ATen/{ => Transforms}/ReturnEliminationPass.cpp (94%) diff --git a/frontends/pytorch/csrc/init_python_bindings.cpp b/frontends/pytorch/csrc/init_python_bindings.cpp index 05d6abc20..fd47003b6 100644 --- a/frontends/pytorch/csrc/init_python_bindings.cpp +++ b/frontends/pytorch/csrc/init_python_bindings.cpp @@ -25,10 +25,10 @@ #include "mlir/Pass/PassManager.h" #include "mlir/Transforms/Passes.h" -#include "npcomp/Dialect/ATen/ATenDialect.h" -#include "npcomp/Dialect/ATen/ATenOpReport.h" -#include "npcomp/Dialect/ATen/ATenPasses.h" -#include "npcomp/Dialect/ATen/LivenessReport.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" +#include "npcomp/Dialect/ATen/Transforms/ATenOpReport.h" +#include "npcomp/Dialect/ATen/Transforms/ATenPasses.h" +#include "npcomp/Dialect/ATen/Transforms/LivenessReport.h" #include "init_python_bindings.h" diff --git a/include/npcomp/Dialect/ATen/CMakeLists.txt b/include/npcomp/Dialect/ATen/CMakeLists.txt index 9a209c2ac..9f57627c3 100644 --- a/include/npcomp/Dialect/ATen/CMakeLists.txt +++ b/include/npcomp/Dialect/ATen/CMakeLists.txt @@ -1,17 +1,2 @@ -include_directories(${PROJECT_SOURCE_DIR}/dialect) - -add_mlir_dialect(ATen aten) -set(LLVM_TARGET_DEFINITIONS ATen.td) -mlir_tablegen(ATenEnums.h.inc -gen-enum-decls) -mlir_tablegen(ATenEnums.cpp.inc -gen-enum-defs) -add_public_tablegen_target(MLIRATenEnumsIncGen) - -set(LLVM_TARGET_DEFINITIONS ATenOpInterface.td) -mlir_tablegen(ATenOpInterfaces.h.inc -gen-op-interface-decls) -mlir_tablegen(ATenOpInterfaces.cpp.inc -gen-op-interface-defs) -add_public_tablegen_target(MLIRATenOpInterfacesIncGen) -add_dependencies(mlir-generic-headers MLIRATenOpInterfacesIncGen) - -set(LLVM_TARGET_DEFINITIONS ATenToStd.td) -mlir_tablegen(ATenToStd.cpp.inc -gen-rewriters) -add_public_tablegen_target(MLIRATenToStdIncGen) +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td b/include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td deleted file mode 100644 index bcffe905d..000000000 --- a/include/npcomp/Dialect/ATen/GeneratedATenBuiltinOps.td +++ /dev/null @@ -1,2501 +0,0 @@ -//===-------------------------------------------------------*- tablegen -*-===// -// -// This file is licensed under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -// Operation summaries and descriptions were systematically derived from public -// API docstrings and are licensed accordingly: -// https://github.com/pytorch/pytorch/blob/master/LICENSE -//===----------------------------------------------------------------------===// -// This file is automatically generated. Please do not edit. -// Generated via: -// python -m npcomp.torch.opdefs.generate_ods -//===----------------------------------------------------------------------===// - -def ATen_AbsOp: ATen_ImmutableTensorOp<"abs", [NoSideEffect]> { - let summary = "abs(input, *, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the absolute value of each element in :attr:`input`. - - .. math:: - \text{out}_{i} = |\text{input}_{i}| - - Args: - input (Tensor): the input tensor. - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.abs(torch.tensor([-1, -2, 3])) - tensor([ 1, 2, 3]) - }]; -} - -def ATen_AbsInplaceOp: ATen_RefTensorOp<"abs.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_AcosOp: ATen_ImmutableTensorOp<"acos", [NoSideEffect]> { - let summary = "acos(input, *, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the inverse cosine of each element in :attr:`input`. - - .. math:: - \text{out}_{i} = \cos^{-1}(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.3348, -0.5889, 0.2005, -0.1584]) - >>> torch.acos(a) - tensor([ 1.2294, 2.2004, 1.3690, 1.7298]) - }]; -} - -def ATen_AcosInplaceOp: ATen_RefTensorOp<"acos.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_AngleOp: ATen_ImmutableTensorOp<"angle", [NoSideEffect]> { - let summary = "angle(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the element-wise angle (in radians) of the given :attr:`input` tensor. - - .. math:: - \text{out}_{i} = angle(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 - tensor([ 135., 135, -45]) - }]; -} - -def ATen_AngleInplaceOp: ATen_RefTensorOp<"angle.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_AsinOp: ATen_ImmutableTensorOp<"asin", [NoSideEffect]> { - let summary = "asin(input, *, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the arcsine of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \sin^{-1}(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-0.5962, 1.4985, -0.4396, 1.4525]) - >>> torch.asin(a) - tensor([-0.6387, nan, -0.4552, nan]) - }]; -} - -def ATen_AsinInplaceOp: ATen_RefTensorOp<"asin.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_AtanOp: ATen_ImmutableTensorOp<"atan", [NoSideEffect]> { - let summary = "atan(input, *, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the arctangent of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \tan^{-1}(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.2341, 0.2539, -0.6256, -0.6448]) - >>> torch.atan(a) - tensor([ 0.2299, 0.2487, -0.5591, -0.5727]) - }]; -} - -def ATen_AtanInplaceOp: ATen_RefTensorOp<"atan.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_CeilOp: ATen_ImmutableTensorOp<"ceil", [NoSideEffect]> { - let summary = "ceil(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the ceil of the elements of :attr:`input`, - the smallest integer greater than or equal to each element. - - .. math:: - \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil = \left\lfloor \text{input}_{i} \right\rfloor + 1 - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-0.6341, -1.4208, -1.0900, 0.5826]) - >>> torch.ceil(a) - tensor([-0., -1., -1., 1.]) - }]; -} - -def ATen_CeilInplaceOp: ATen_RefTensorOp<"ceil.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_ConjOp: ATen_ImmutableTensorOp<"conj", [NoSideEffect]> { - let summary = "conj(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the element-wise conjugate of the given :attr:`input` tensor. - - .. math:: - \text{out}_{i} = conj(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.conj(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) - tensor([-1 - 1j, -2 - 2j, 3 + 3j]) - }]; -} - -def ATen_ConjInplaceOp: ATen_RefTensorOp<"conj.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_CosOp: ATen_ImmutableTensorOp<"cos", [NoSideEffect]> { - let summary = "cos(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the cosine of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \cos(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 1.4309, 1.2706, -0.8562, 0.9796]) - >>> torch.cos(a) - tensor([ 0.1395, 0.2957, 0.6553, 0.5574]) - }]; -} - -def ATen_CosInplaceOp: ATen_RefTensorOp<"cos.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_CoshOp: ATen_ImmutableTensorOp<"cosh", [NoSideEffect]> { - let summary = "cosh(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the hyperbolic cosine of the elements of - :attr:`input`. - - .. math:: - \text{out}_{i} = \cosh(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.1632, 1.1835, -0.6979, -0.7325]) - >>> torch.cosh(a) - tensor([ 1.0133, 1.7860, 1.2536, 1.2805]) - }]; -} - -def ATen_CoshInplaceOp: ATen_RefTensorOp<"cosh.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_DigammaOp: ATen_ImmutableTensorOp<"digamma", [NoSideEffect]> { - let summary = "digamma(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the logarithmic derivative of the gamma function on `input`. - - .. math:: - \psi(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} - - Args: - input (Tensor): the tensor to compute the digamma function on - - Example:: - - >>> a = torch.tensor([1, 0.5]) - >>> torch.digamma(a) - tensor([-0.5772, -1.9635]) - }]; -} - -def ATen_DigammaInplaceOp: ATen_RefTensorOp<"digamma.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_ErfOp: ATen_ImmutableTensorOp<"erf", [NoSideEffect]> { - let summary = "erf(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the error function of each element. The error function is defined as follows: - - .. math:: - \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.erf(torch.tensor([0, -1., 10.])) - tensor([ 0.0000, -0.8427, 1.0000]) - }]; -} - -def ATen_ErfInplaceOp: ATen_RefTensorOp<"erf.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_ErfcOp: ATen_ImmutableTensorOp<"erfc", [NoSideEffect]> { - let summary = "erfc(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the complementary error function of each element of :attr:`input`. - The complementary error function is defined as follows: - - .. math:: - \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.erfc(torch.tensor([0, -1., 10.])) - tensor([ 1.0000, 1.8427, 0.0000]) - }]; -} - -def ATen_ErfcInplaceOp: ATen_RefTensorOp<"erfc.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_ErfinvOp: ATen_ImmutableTensorOp<"erfinv", [NoSideEffect]> { - let summary = "erfinv(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the inverse error function of each element of :attr:`input`. - The inverse error function is defined in the range :math:`(-1, 1)` as: - - .. math:: - \mathrm{erfinv}(\mathrm{erf}(x)) = x - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.erfinv(torch.tensor([0, 0.5, -1.])) - tensor([ 0.0000, 0.4769, -inf]) - }]; -} - -def ATen_ErfinvInplaceOp: ATen_RefTensorOp<"erfinv.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_ExpOp: ATen_ImmutableTensorOp<"exp", [NoSideEffect]> { - let summary = "exp(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the exponential of the elements - of the input tensor :attr:`input`. - - .. math:: - y_{i} = e^{x_{i}} - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.exp(torch.tensor([0, math.log(2.)])) - tensor([ 1., 2.]) - }]; -} - -def ATen_ExpInplaceOp: ATen_RefTensorOp<"exp.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_Expm1Op: ATen_ImmutableTensorOp<"expm1", [NoSideEffect]> { - let summary = "expm1(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the exponential of the elements minus 1 - of :attr:`input`. - - .. math:: - y_{i} = e^{x_{i}} - 1 - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.expm1(torch.tensor([0, math.log(2.)])) - tensor([ 0., 1.]) - }]; -} - -def ATen_Expm1InplaceOp: ATen_RefTensorOp<"expm1.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_FloorOp: ATen_ImmutableTensorOp<"floor", [NoSideEffect]> { - let summary = "floor(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the floor of the elements of :attr:`input`, - the largest integer less than or equal to each element. - - .. math:: - \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-0.8166, 1.5308, -0.2530, -0.2091]) - >>> torch.floor(a) - tensor([-1., 1., -1., -1.]) - }]; -} - -def ATen_FloorInplaceOp: ATen_RefTensorOp<"floor.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_FracOp: ATen_ImmutableTensorOp<"frac", [NoSideEffect]> { - let summary = "frac(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the fractional portion of each element in :attr:`input`. - - .. math:: - \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) - - Example:: - - >>> torch.frac(torch.tensor([1, 2.5, -3.2])) - tensor([ 0.0000, 0.5000, -0.2000]) - }]; -} - -def ATen_FracInplaceOp: ATen_RefTensorOp<"frac.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_LgammaOp: ATen_ImmutableTensorOp<"lgamma", [NoSideEffect]> { - let summary = "lgamma(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the logarithm of the gamma function on :attr:`input`. - - .. math:: - \text{out}_{i} = \log \Gamma(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.arange(0.5, 2, 0.5) - >>> torch.lgamma(a) - tensor([ 0.5724, 0.0000, -0.1208]) - }]; -} - -def ATen_LgammaInplaceOp: ATen_RefTensorOp<"lgamma.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_LogOp: ATen_ImmutableTensorOp<"log", [NoSideEffect]> { - let summary = "log(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the natural logarithm of the elements - of :attr:`input`. - - .. math:: - y_{i} = \log_{e} (x_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(5) - >>> a - tensor([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190]) - >>> torch.log(a) - tensor([ nan, nan, nan, nan, nan]) - }]; -} - -def ATen_LogInplaceOp: ATen_RefTensorOp<"log.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_Log10Op: ATen_ImmutableTensorOp<"log10", [NoSideEffect]> { - let summary = "log10(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the logarithm to the base 10 of the elements - of :attr:`input`. - - .. math:: - y_{i} = \log_{10} (x_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.rand(5) - >>> a - tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251]) - - - >>> torch.log10(a) - tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476]) - }]; -} - -def ATen_Log10InplaceOp: ATen_RefTensorOp<"log10.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_Log1pOp: ATen_ImmutableTensorOp<"log1p", [NoSideEffect]> { - let summary = "log1p(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the natural logarithm of (1 + :attr:`input`). - - .. math:: - y_i = \log_{e} (x_i + 1) - - .. note:: This function is more accurate than :func:`torch.log` for small - values of :attr:`input` - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(5) - >>> a - tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492]) - >>> torch.log1p(a) - tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225]) - }]; -} - -def ATen_Log1pInplaceOp: ATen_RefTensorOp<"log1p.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_Log2Op: ATen_ImmutableTensorOp<"log2", [NoSideEffect]> { - let summary = "log2(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the logarithm to the base 2 of the elements - of :attr:`input`. - - .. math:: - y_{i} = \log_{2} (x_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.rand(5) - >>> a - tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490]) - - - >>> torch.log2(a) - tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504]) - }]; -} - -def ATen_Log2InplaceOp: ATen_RefTensorOp<"log2.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_NegOp: ATen_ImmutableTensorOp<"neg", [NoSideEffect]> { - let summary = "neg(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the negative of the elements of :attr:`input`. - - .. math:: - \text{out} = -1 \times \text{input} - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(5) - >>> a - tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) - >>> torch.neg(a) - tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940]) - }]; -} - -def ATen_NegInplaceOp: ATen_RefTensorOp<"neg.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_ReciprocalOp: ATen_ImmutableTensorOp<"reciprocal", [NoSideEffect]> { - let summary = "reciprocal(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the reciprocal of the elements of :attr:`input` - - .. math:: - \text{out}_{i} = \frac{1}{\text{input}_{i}} - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-0.4595, -2.1219, -1.4314, 0.7298]) - >>> torch.reciprocal(a) - tensor([-2.1763, -0.4713, -0.6986, 1.3702]) - }]; -} - -def ATen_ReciprocalInplaceOp: ATen_RefTensorOp<"reciprocal.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_RoundOp: ATen_ImmutableTensorOp<"round", [NoSideEffect]> { - let summary = "round(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with each of the elements of :attr:`input` rounded - to the closest integer. - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.9920, 0.6077, 0.9734, -1.0362]) - >>> torch.round(a) - tensor([ 1., 1., 1., -1.]) - }]; -} - -def ATen_RoundInplaceOp: ATen_RefTensorOp<"round.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_RsqrtOp: ATen_ImmutableTensorOp<"rsqrt", [NoSideEffect]> { - let summary = "rsqrt(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the reciprocal of the square-root of each of - the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-0.0370, 0.2970, 1.5420, -0.9105]) - >>> torch.rsqrt(a) - tensor([ nan, 1.8351, 0.8053, nan]) - }]; -} - -def ATen_RsqrtInplaceOp: ATen_RefTensorOp<"rsqrt.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_SigmoidOp: ATen_ImmutableTensorOp<"sigmoid", [NoSideEffect]> { - let summary = "sigmoid(input, *, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the sigmoid of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} - - Args: - input (Tensor): the input tensor. - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) - >>> torch.sigmoid(a) - tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) - }]; -} - -def ATen_SigmoidInplaceOp: ATen_RefTensorOp<"sigmoid.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_SignOp: ATen_ImmutableTensorOp<"sign", [NoSideEffect]> { - let summary = "sign(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the signs of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) - >>> a - tensor([ 0.7000, -1.2000, 0.0000, 2.3000]) - >>> torch.sign(a) - tensor([ 1., -1., 0., 1.]) - }]; -} - -def ATen_SignInplaceOp: ATen_RefTensorOp<"sign.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_SinOp: ATen_ImmutableTensorOp<"sin", [NoSideEffect]> { - let summary = "sin(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the sine of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \sin(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-0.5461, 0.1347, -2.7266, -0.2746]) - >>> torch.sin(a) - tensor([-0.5194, 0.1343, -0.4032, -0.2711]) - }]; -} - -def ATen_SinInplaceOp: ATen_RefTensorOp<"sin.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_SinhOp: ATen_ImmutableTensorOp<"sinh", [NoSideEffect]> { - let summary = "sinh(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the hyperbolic sine of the elements of - :attr:`input`. - - .. math:: - \text{out}_{i} = \sinh(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.5380, -0.8632, -0.1265, 0.9399]) - >>> torch.sinh(a) - tensor([ 0.5644, -0.9744, -0.1268, 1.0845]) - }]; -} - -def ATen_SinhInplaceOp: ATen_RefTensorOp<"sinh.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_SqrtOp: ATen_ImmutableTensorOp<"sqrt", [NoSideEffect]> { - let summary = "sqrt(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the square-root of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \sqrt{\text{input}_{i}} - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-2.0755, 1.0226, 0.0831, 0.4806]) - >>> torch.sqrt(a) - tensor([ nan, 1.0112, 0.2883, 0.6933]) - }]; -} - -def ATen_SqrtInplaceOp: ATen_RefTensorOp<"sqrt.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_TanOp: ATen_ImmutableTensorOp<"tan", [NoSideEffect]> { - let summary = "tan(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the tangent of the elements of :attr:`input`. - - .. math:: - \text{out}_{i} = \tan(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-1.2027, -1.7687, 0.4412, -1.3856]) - >>> torch.tan(a) - tensor([-2.5930, 4.9859, 0.4722, -5.3366]) - }]; -} - -def ATen_TanInplaceOp: ATen_RefTensorOp<"tan.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_TanhOp: ATen_ImmutableTensorOp<"tanh", [NoSideEffect]> { - let summary = "tanh(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the hyperbolic tangent of the elements - of :attr:`input`. - - .. math:: - \text{out}_{i} = \tanh(\text{input}_{i}) - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.8986, -0.7279, 1.1745, 0.2611]) - >>> torch.tanh(a) - tensor([ 0.7156, -0.6218, 0.8257, 0.2553]) - }]; -} - -def ATen_TanhInplaceOp: ATen_RefTensorOp<"tanh.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_TruncOp: ATen_ImmutableTensorOp<"trunc", [NoSideEffect]> { - let summary = "trunc(input, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the truncated integer values of - the elements of :attr:`input`. - - Args: - input (Tensor): the input tensor. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 3.4742, 0.5466, -0.8008, -0.9079]) - >>> torch.trunc(a) - tensor([ 3., 0., -0., -0.]) - }]; -} - -def ATen_TruncInplaceOp: ATen_RefTensorOp<"trunc.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_ReluOp: ATen_ImmutableTensorOp<"relu", [NoSideEffect]> { - let summary = ""; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); -} - -def ATen_AddOp: ATen_ImmutableTensorOp<"add", [NoSideEffect]> { - let summary = "add(input, other, out=None)"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyScalar:$alpha - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Adds the scalar :attr:`other` to each element of the input :attr:`input` - and returns a new resulting tensor. - - .. math:: - \text{out} = \text{input} + \text{other} - - If :attr:`input` is of type FloatTensor or DoubleTensor, :attr:`other` must be - a real number, otherwise it should be an integer. - - Args: - input (Tensor): the input tensor. - value (Number): the number to be added to each element of :attr:`input` - - Keyword arguments: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) - >>> torch.add(a, 20) - tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) - - .. function:: add(input, other, *, alpha=1, out=None) - - Each element of the tensor :attr:`other` is multiplied by the scalar - :attr:`alpha` and added to each element of the tensor :attr:`input`. - The resulting tensor is returned. - - The shapes of :attr:`input` and :attr:`other` must be - :ref:`broadcastable `. - - .. math:: - \text{out} = \text{input} + \text{alpha} \times \text{other} - - If :attr:`other` is of type FloatTensor or DoubleTensor, :attr:`alpha` must be - a real number, otherwise it should be an integer. - - Args: - input (Tensor): the first input tensor - other (Tensor): the second input tensor - alpha (Number): the scalar multiplier for :attr:`other` - - Keyword arguments: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([-0.9732, -0.3497, 0.6245, 0.4022]) - >>> b = torch.randn(4, 1) - >>> b - tensor([[ 0.3743], - [-1.7724], - [-0.5811], - [-0.8017]]) - >>> torch.add(a, b, alpha=10) - tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], - [-18.6971, -18.0736, -17.0994, -17.3216], - [ -6.7845, -6.1610, -5.1868, -5.4090], - [ -8.9902, -8.3667, -7.3925, -7.6147]]) - }]; -} - -def ATen_AddInplaceOp: ATen_RefTensorOp<"add.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyScalar:$alpha, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_Atan2Op: ATen_ImmutableTensorOp<"atan2", [NoSideEffect]> { - let summary = "atan2(input, other, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}` - with consideration of the quadrant. Returns a new tensor with the signed angles - in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})` - and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second - parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first - parameter, is the y-coordinate.) - - The shapes of ``input`` and ``other`` must be - :ref:`broadcastable `. - - Args: - input (Tensor): the first input tensor - other (Tensor): the second input tensor - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4) - >>> a - tensor([ 0.9041, 0.0196, -0.3108, -2.4423]) - >>> torch.atan2(a, torch.randn(4)) - tensor([ 0.9833, 0.0811, -1.9743, -1.4151]) - }]; -} - -def ATen_Atan2InplaceOp: ATen_RefTensorOp<"atan2.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_DivOp: ATen_ImmutableTensorOp<"div", [NoSideEffect]> { - let summary = "div(input, other, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Divides each element of the input ``input`` with the scalar ``other`` and - returns a new resulting tensor. - - .. warning:: - Integer division using div is no longer supported, and in a future release - div will perform true division as in Python 3. Use :func:`torch.true_divide` - or :func:`torch.floor_divide` (// in Python), instead. - - .. math:: - \text{out}_i = \frac{\text{input}_i}{\text{other}} - - If the :class:`torch.dtype` of ``input`` and ``other`` differ, the - :class:`torch.dtype` of the result tensor is determined following rules - described in the type promotion :ref:`documentation `. If - ``out`` is specified, the result must be :ref:`castable ` - to the :class:`torch.dtype` of the specified output tensor. Integral division - by zero leads to undefined behavior. - - Args: - input (Tensor): the input tensor. - other (Number): the number to be divided to each element of ``input`` - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(5) - >>> a - tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) - >>> torch.div(a, 0.5) - tensor([ 0.7620, 2.5548, -0.5944, -0.7439, 0.9275]) - - .. function:: div(input, other, out=None) -> Tensor - - Each element of the tensor ``input`` is divided by each element of the tensor - ``other``. The resulting tensor is returned. - - .. math:: - \text{out}_i = \frac{\text{input}_i}{\text{other}_i} - - The shapes of ``input`` and ``other`` must be :ref:`broadcastable - `. If the :class:`torch.dtype` of ``input`` and - ``other`` differ, the :class:`torch.dtype` of the result tensor is determined - following rules described in the type promotion :ref:`documentation - `. If ``out`` is specified, the result must be - :ref:`castable ` to the :class:`torch.dtype` of the - specified output tensor. Integral division by zero leads to undefined behavior. - - Args: - input (Tensor): the numerator tensor - other (Tensor): the denominator tensor - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4, 4) - >>> a - tensor([[-0.3711, -1.9353, -0.4605, -0.2917], - [ 0.1815, -1.0111, 0.9805, -1.5923], - [ 0.1062, 1.4581, 0.7759, -1.2344], - [-0.1830, -0.0313, 1.1908, -1.4757]]) - >>> b = torch.randn(4) - >>> b - tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) - >>> torch.div(a, b) - tensor([[-0.4620, -6.6051, 0.5676, 1.2637], - [ 0.2260, -3.4507, -1.2086, 6.8988], - [ 0.1322, 4.9764, -0.9564, 5.3480], - [-0.2278, -0.1068, -1.4678, 6.3936]]) - }]; -} - -def ATen_DivInplaceOp: ATen_RefTensorOp<"div.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_FloorDivideOp: ATen_ImmutableTensorOp<"floor_divide", [NoSideEffect]> { - let summary = "floor_divide(input, other, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Return the division of the inputs rounded down to the nearest integer. See :func:`torch.div` - for type promotion and broadcasting rules. - - .. math:: - \text{{out}}_i = \left\lfloor \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right\rfloor - - - Args: - input (Tensor): the numerator tensor - other (Tensor or Scalar): the denominator - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.tensor([4.0, 3.0]) - >>> b = torch.tensor([2.0, 2.0]) - >>> torch.floor_divide(a, b) - tensor([2.0, 1.0]) - >>> torch.floor_divide(a, 1.4) - tensor([2.0, 2.0]) - }]; -} - -def ATen_FloorDivideInplaceOp: ATen_RefTensorOp<"floor_divide.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_MulOp: ATen_ImmutableTensorOp<"mul", [NoSideEffect]> { - let summary = "mul(input, other, out=None)"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Multiplies each element of the input :attr:`input` with the scalar - :attr:`other` and returns a new resulting tensor. - - .. math:: - \text{out}_i = \text{other} \times \text{input}_i - - If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`other` - should be a real number, otherwise it should be an integer - - Args: - {input} - value (Number): the number to be multiplied to each element of :attr:`input` - {out} - - Example:: - - >>> a = torch.randn(3) - >>> a - tensor([ 0.2015, -0.4255, 2.6087]) - >>> torch.mul(a, 100) - tensor([ 20.1494, -42.5491, 260.8663]) - - .. function:: mul(input, other, out=None) - - Each element of the tensor :attr:`input` is multiplied by the corresponding - element of the Tensor :attr:`other`. The resulting tensor is returned. - - The shapes of :attr:`input` and :attr:`other` must be - :ref:`broadcastable `. - - .. math:: - \text{out}_i = \text{input}_i \times \text{other}_i - - Args: - input (Tensor): the first multiplicand tensor - other (Tensor): the second multiplicand tensor - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4, 1) - >>> a - tensor([[ 1.1207], - [-0.3137], - [ 0.0700], - [ 0.8378]]) - >>> b = torch.randn(1, 4) - >>> b - tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]]) - >>> torch.mul(a, b) - tensor([[ 0.5767, 0.1363, -0.5877, 2.5083], - [-0.1614, -0.0382, 0.1645, -0.7021], - [ 0.0360, 0.0085, -0.0367, 0.1567], - [ 0.4312, 0.1019, -0.4394, 1.8753]]) - }]; -} - -def ATen_MulInplaceOp: ATen_RefTensorOp<"mul.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_RemainderOp: ATen_ImmutableTensorOp<"remainder", [NoSideEffect]> { - let summary = "remainder(input, other, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the element-wise remainder of division. - - The dividend and divisor may contain both for integer and floating point - numbers. The remainder has the same sign as the divisor :attr:`other`. - - When :attr:`other` is a tensor, the shapes of :attr:`input` and - :attr:`other` must be :ref:`broadcastable `. - - Args: - input (Tensor): the dividend - other (Tensor or float): the divisor that may be either a number or a - Tensor of the same shape as the dividend - out (Tensor, optional): the output tensor. - - Example:: - - >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) - tensor([ 1., 0., 1., 1., 0., 1.]) - >>> torch.remainder(torch.tensor([1., 2, 3, 4, 5]), 1.5) - tensor([ 1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) - - .. seealso:: - - :func:`torch.fmod`, which computes the element-wise remainder of - division equivalently to the C library function ``fmod()``. - }]; -} - -def ATen_RemainderInplaceOp: ATen_RefTensorOp<"remainder.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_TrueDivideOp: ATen_ImmutableTensorOp<"true_divide", [NoSideEffect]> { - let summary = "true_divide(dividend, divisor, *, out) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$dividend, - ATen_AnyTensor:$divisor - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Performs "true division" that always computes the division - in floating point. Analogous to division in Python 3 and equivalent to - :func:`torch.div` except when both inputs have bool or integer scalar types, - in which case they are cast to the default (floating) scalar type before the division. - - .. math:: - \text{out}_i = \frac{\text{dividend}_i}{\text{divisor}} - - Args: - dividend (Tensor): the dividend - divisor (Tensor or Scalar): the divisor - - Keyword args: - out (Tensor, optional): the output tensor. - - Example:: - - >>> dividend = torch.tensor([5, 3], dtype=torch.int) - >>> divisor = torch.tensor([3, 2], dtype=torch.int) - >>> torch.true_divide(dividend, divisor) - tensor([1.6667, 1.5000]) - >>> torch.true_divide(dividend, 2) - tensor([2.5000, 1.5000]) - }]; -} - -def ATen_TrueDivideInplaceOp: ATen_RefTensorOp<"true_divide.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$dividend, - ATen_AnyTensor:$divisor, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_CumsumOp: ATen_ImmutableTensorOp<"cumsum", [NoSideEffect]> { - let summary = "cumsum(input, dim, out=None, dtype=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyScalar:$dim - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns the cumulative sum of elements of :attr:`input` in the dimension - :attr:`dim`. - - For example, if :attr:`input` is a vector of size N, the result will also be - a vector of size N, with elements. - - .. math:: - y_i = x_1 + x_2 + x_3 + \dots + x_i - - Args: - input (Tensor): the input tensor. - dim (int): the dimension to do the operation over - dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. - If specified, the input tensor is casted to :attr:`dtype` before the operation - is performed. This is useful for preventing data type overflows. Default: None. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(10) - >>> a - tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595, - 0.1850, -1.1571, -0.4243]) - >>> torch.cumsum(a, dim=0) - tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058, - -1.8209, -2.9780, -3.4022]) - }]; -} - -def ATen_CumsumInplaceOp: ATen_RefTensorOp<"cumsum.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyScalar:$dim, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_MeanOp: ATen_ImmutableTensorOp<"mean", [NoSideEffect]> { - let summary = "mean(input) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$dim, - ATen_BoolScalar:$keep_dim - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns the mean value of all elements in the :attr:`input` tensor. - - Args: - input (Tensor): the input tensor. - - Example:: - - >>> a = torch.randn(1, 3) - >>> a - tensor([[ 0.2294, -0.5481, 1.3288]]) - >>> torch.mean(a) - tensor(0.3367) - - .. function:: mean(input, dim, keepdim=False, out=None) -> Tensor - - Returns the mean value of each row of the :attr:`input` tensor in the given - dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, - reduce over all of them. - - - If :attr:`keepdim` is ``True``, the output tensor is of the same size - as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. - Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the - output tensor having 1 (or ``len(dim)``) fewer dimension(s). - - - Args: - input (Tensor): the input tensor. - dim (int or tuple of ints): the dimension or dimensions to reduce. - keepdim (bool): whether the output tensor has :attr:`dim` retained or not. - out (Tensor, optional): the output tensor. - - Example:: - - >>> a = torch.randn(4, 4) - >>> a - tensor([[-0.3841, 0.6320, 0.4254, -0.7384], - [-0.9644, 1.0131, -0.6549, -1.4279], - [-0.2951, -1.3350, -0.7694, 0.5600], - [ 1.0842, -0.9580, 0.3623, 0.2343]]) - >>> torch.mean(a, 1) - tensor([-0.0163, -0.5085, -0.4599, 0.1807]) - >>> torch.mean(a, 1, True) - tensor([[-0.0163], - [-0.5085], - [-0.4599], - [ 0.1807]]) - }]; -} - -def ATen_MeanInplaceOp: ATen_RefTensorOp<"mean.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$dim, - ATen_BoolScalar:$keep_dim, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_SumOp: ATen_ImmutableTensorOp<"sum", [NoSideEffect]> { - let summary = "sum(input, dtype=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$dim, - ATen_BoolScalar:$keep_dim - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns the sum of all elements in the :attr:`input` tensor. - - Args: - input (Tensor): the input tensor. - dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. - If specified, the input tensor is casted to :attr:`dtype` before the operation - is performed. This is useful for preventing data type overflows. Default: None. - - Example:: - - >>> a = torch.randn(1, 3) - >>> a - tensor([[ 0.1133, -0.9567, 0.2958]]) - >>> torch.sum(a) - tensor(-0.5475) - - .. function:: sum(input, dim, keepdim=False, dtype=None) -> Tensor - - Returns the sum of each row of the :attr:`input` tensor in the given - dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, - reduce over all of them. - - - If :attr:`keepdim` is ``True``, the output tensor is of the same size - as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. - Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the - output tensor having 1 (or ``len(dim)``) fewer dimension(s). - - - Args: - input (Tensor): the input tensor. - dim (int or tuple of ints): the dimension or dimensions to reduce. - keepdim (bool): whether the output tensor has :attr:`dim` retained or not. - dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. - If specified, the input tensor is casted to :attr:`dtype` before the operation - is performed. This is useful for preventing data type overflows. Default: None. - - Example:: - - >>> a = torch.randn(4, 4) - >>> a - tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], - [-0.2993, 0.9138, 0.9337, -1.6864], - [ 0.1132, 0.7892, -0.1003, 0.5688], - [ 0.3637, -0.9906, -0.4752, -1.5197]]) - >>> torch.sum(a, 1) - tensor([-0.4598, -0.1381, 1.3708, -2.6217]) - >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) - >>> torch.sum(b, (2, 1)) - tensor([ 435., 1335., 2235., 3135.]) - }]; -} - -def ATen_SumInplaceOp: ATen_RefTensorOp<"sum.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$dim, - ATen_BoolScalar:$keep_dim, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_GatherOp: ATen_ImmutableTensorOp<"gather", [NoSideEffect]> { - let summary = "gather(input, dim, index, out=None, sparse_grad=False) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntScalar:$dim, - ATen_AnyTensor:$index, - ATen_BoolScalar:$sparse_grad - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Gathers values along an axis specified by `dim`. - - For a 3-D tensor the output is specified by:: - - out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 - out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 - out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 - - If :attr:`input` is an n-dimensional tensor with size - :math:`(x_0, x_1..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})` - and ``dim = i``, then :attr:`index` must be an :math:`n`-dimensional tensor with - size :math:`(x_0, x_1, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})` where :math:`y \geq 1` - and :attr:`out` will have the same size as :attr:`index`. - - Args: - input (Tensor): the source tensor - dim (int): the axis along which to index - index (LongTensor): the indices of elements to gather - out (Tensor, optional): the destination tensor - sparse_grad(bool,optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. - - Example:: - - >>> t = torch.tensor([[1,2],[3,4]]) - >>> torch.gather(t, 1, torch.tensor([[0,0],[1,0]])) - tensor([[ 1, 1], - [ 4, 3]]) - }]; -} - -def ATen_GatherInplaceOp: ATen_RefTensorOp<"gather.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntScalar:$dim, - ATen_AnyTensor:$index, - ATen_BoolScalar:$sparse_grad, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_NumpyTOp: ATen_ImmutableTensorOp<"numpy_T", [NoSideEffect]> { - let summary = "Is this Tensor with its dimensions reversed."; - let arguments = (ins - ATen_AnyTensor:$input - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - If ``n`` is the number of dimensions in ``x``, - ``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``. - }]; -} - -def ATen_AddmmOp: ATen_ImmutableTensorOp<"addmm", [NoSideEffect]> { - let summary = "addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$mat1, - ATen_AnyTensor:$mat2, - ATen_AnyScalar:$beta, - ATen_AnyScalar:$alpha - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. - The matrix :attr:`input` is added to the final result. - - If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a - :math:`(m \times p)` tensor, then :attr:`input` must be - :ref:`broadcastable ` with a :math:`(n \times p)` tensor - and :attr:`out` will be a :math:`(n \times p)` tensor. - - :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between - :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. - - .. math:: - \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) - - For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and - :attr:`alpha` must be real numbers, otherwise they should be integers. - - Args: - input (Tensor): matrix to be added - mat1 (Tensor): the first matrix to be multiplied - mat2 (Tensor): the second matrix to be multiplied - beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) - alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) - out (Tensor, optional): the output tensor. - - Example:: - - >>> M = torch.randn(2, 3) - >>> mat1 = torch.randn(2, 3) - >>> mat2 = torch.randn(3, 3) - >>> torch.addmm(M, mat1, mat2) - tensor([[-4.8716, 1.4671, -1.3746], - [ 0.7573, -3.9555, -2.8681]]) - }]; -} - -def ATen_AddmmInplaceOp: ATen_RefTensorOp<"addmm.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$mat1, - ATen_AnyTensor:$mat2, - ATen_AnyScalar:$beta, - ATen_AnyScalar:$alpha, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_DotOp: ATen_ImmutableTensorOp<"dot", [NoSideEffect]> { - let summary = "dot(input, tensor) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$tensor - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Computes the dot product (inner product) of two tensors. - - .. note:: This function does not :ref:`broadcast `. - - Example:: - - >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) - tensor(7) - }]; -} - -def ATen_MatmulOp: ATen_ImmutableTensorOp<"matmul", [NoSideEffect]> { - let summary = "matmul(input, other, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Matrix product of two tensors. - - The behavior depends on the dimensionality of the tensors as follows: - - - If both tensors are 1-dimensional, the dot product (scalar) is returned. - - If both arguments are 2-dimensional, the matrix-matrix product is returned. - - If the first argument is 1-dimensional and the second argument is 2-dimensional, - a 1 is prepended to its dimension for the purpose of the matrix multiply. - After the matrix multiply, the prepended dimension is removed. - - If the first argument is 2-dimensional and the second argument is 1-dimensional, - the matrix-vector product is returned. - - If both arguments are at least 1-dimensional and at least one argument is - N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first - argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the - batched matrix multiply and removed after. If the second argument is 1-dimensional, a - 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. - The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus - must be broadcastable). For example, if :attr:`input` is a - :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)` - tensor, :attr:`out` will be an :math:`(j \times k \times n \times p)` tensor. - - .. note:: - - The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. - - Arguments: - input (Tensor): the first tensor to be multiplied - other (Tensor): the second tensor to be multiplied - out (Tensor, optional): the output tensor. - - Example:: - - >>> # vector x vector - >>> tensor1 = torch.randn(3) - >>> tensor2 = torch.randn(3) - >>> torch.matmul(tensor1, tensor2).size() - torch.Size([]) - >>> # matrix x vector - >>> tensor1 = torch.randn(3, 4) - >>> tensor2 = torch.randn(4) - >>> torch.matmul(tensor1, tensor2).size() - torch.Size([3]) - >>> # batched matrix x broadcasted vector - >>> tensor1 = torch.randn(10, 3, 4) - >>> tensor2 = torch.randn(4) - >>> torch.matmul(tensor1, tensor2).size() - torch.Size([10, 3]) - >>> # batched matrix x batched matrix - >>> tensor1 = torch.randn(10, 3, 4) - >>> tensor2 = torch.randn(10, 4, 5) - >>> torch.matmul(tensor1, tensor2).size() - torch.Size([10, 3, 5]) - >>> # batched matrix x broadcasted matrix - >>> tensor1 = torch.randn(10, 3, 4) - >>> tensor2 = torch.randn(4, 5) - >>> torch.matmul(tensor1, tensor2).size() - torch.Size([10, 3, 5]) - }]; -} - -def ATen_MatmulInplaceOp: ATen_RefTensorOp<"matmul.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$other, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_MmOp: ATen_ImmutableTensorOp<"mm", [NoSideEffect]> { - let summary = "mm(input, mat2, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$mat2 - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`. - - If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a - :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. - - .. note:: This function does not :ref:`broadcast `. - For broadcasting matrix products, see :func:`torch.matmul`. - - Args: - input (Tensor): the first matrix to be multiplied - mat2 (Tensor): the second matrix to be multiplied - out (Tensor, optional): the output tensor. - - Example:: - - >>> mat1 = torch.randn(2, 3) - >>> mat2 = torch.randn(3, 3) - >>> torch.mm(mat1, mat2) - tensor([[ 0.4851, 0.5037, -0.3633], - [-0.0760, -3.6705, 2.4784]]) - }]; -} - -def ATen_MmInplaceOp: ATen_RefTensorOp<"mm.inplace", []> { - let summary = "See non-inplace op variant."; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyTensor:$mat2, - ATen_AnyRefTensor:$out - ); - let results = (outs); -} - -def ATen_HardtanhOp: ATen_ImmutableTensorOp<"hardtanh", [NoSideEffect]> { - let summary = " hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_FloatScalar:$min_val, - ATen_FloatScalar:$max_val - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more - details. - - }]; -} - -def ATen_AvgPool1dOp: ATen_ImmutableTensorOp<"avg_pool1d", [NoSideEffect]> { - let summary = "avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$kernel_size, - ATen_IntList:$stride, - ATen_IntList:$padding, - ATen_BoolScalar:$ceil_mode, - ATen_BoolScalar:$count_include_pad - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Applies a 1D average pooling over an input signal composed of several - input planes. - - See :class:`~torch.nn.AvgPool1d` for details and output shape. - - Args: - input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` - kernel_size: the size of the window. Can be a single number or a - tuple `(kW,)` - stride: the stride of the window. Can be a single number or a tuple - `(sW,)`. Default: :attr:`kernel_size` - padding: implicit zero paddings on both sides of the input. Can be a - single number or a tuple `(padW,)`. Default: 0 - ceil_mode: when True, will use `ceil` instead of `floor` to compute the - output shape. Default: ``False`` - count_include_pad: when True, will include the zero-padding in the - averaging calculation. Default: ``True`` - - Examples:: - - >>> # pool of square window of size=3, stride=2 - >>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32) - >>> F.avg_pool1d(input, kernel_size=3, stride=2) - tensor([[[ 2., 4., 6.]]]) - }]; -} - -def ATen_MaxPool1dOp: ATen_ImmutableTensorOp<"max_pool1d", [NoSideEffect]> { - let summary = "Applies a 1D max pooling over an input signal composed of several input"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$kernel_size, - ATen_IntList:$stride, - ATen_IntList:$padding, - ATen_IntList:$dilation, - ATen_BoolScalar:$ceil_mode - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - - See :class:`~torch.nn.MaxPool1d` for details. - - }]; -} - -def ATen_AsStridedOp: ATen_ImmutableTensorOp<"as_strided", [NoSideEffect]> { - let summary = "as_strided(input, size, stride, storage_offset=0) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$size, - ATen_IntList:$stride, - ATen_IntScalar:$storage_offset - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Create a view of an existing `torch.Tensor` :attr:`input` with specified - :attr:`size`, :attr:`stride` and :attr:`storage_offset`. - - .. warning:: - More than one element of a created tensor may refer to a single memory - location. As a result, in-place operations (especially ones that are - vectorized) may result in incorrect behavior. If you need to write to - the tensors, please clone them first. - - Many PyTorch functions, which return a view of a tensor, are internally - implemented with this function. Those functions, like - :meth:`torch.Tensor.expand`, are easier to read and are therefore more - advisable to use. - - - Args: - input (Tensor): the input tensor. - size (tuple or ints): the shape of the output tensor - stride (tuple or ints): the stride of the output tensor - storage_offset (int, optional): the offset in the underlying storage of the output tensor - - Example:: - - >>> x = torch.randn(3, 3) - >>> x - tensor([[ 0.9039, 0.6291, 1.0795], - [ 0.1586, 2.1939, -0.4900], - [-0.1909, -0.7503, 1.9355]]) - >>> t = torch.as_strided(x, (2, 2), (1, 2)) - >>> t - tensor([[0.9039, 1.0795], - [0.6291, 0.1586]]) - >>> t = torch.as_strided(x, (2, 2), (1, 2), 1) - tensor([[0.6291, 0.1586], - [1.0795, 2.1939]]) - - MLIR Specific Notes - ------------------- - In PyTorch proper, this op creates a view that may internally alias. And - have explicit warnings about avoiding inplace updates on such a - view (without first cloning). For the moment, this op is formulated with - value semantics that imply a copy instead of a view, and it is expected - that any sharing can be recovered later by the compiler. The warning - about not in-place updating of such a result should be treated as UB - when compiled. - }]; -} - -def ATen_ExpandOp: ATen_ImmutableTensorOp<"expand", [NoSideEffect]> { - let summary = "expand(*sizes) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_AnyScalar:$sizes, - ATen_BoolScalar:$implicit - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new view of the :attr:`self` tensor with singleton dimensions expanded - to a larger size. - - Passing -1 as the size for a dimension means not changing the size of - that dimension. - - Tensor can be also expanded to a larger number of dimensions, and the - new ones will be appended at the front. For the new dimensions, the - size cannot be set to -1. - - Expanding a tensor does not allocate new memory, but only creates a - new view on the existing tensor where a dimension of size one is - expanded to a larger size by setting the ``stride`` to 0. Any dimension - of size 1 can be expanded to an arbitrary value without allocating new - memory. - - Args: - *sizes (torch.Size or int...): the desired expanded size - - .. warning:: - - More than one element of an expanded tensor may refer to a single - memory location. As a result, in-place operations (especially ones that - are vectorized) may result in incorrect behavior. If you need to write - to the tensors, please clone them first. - - Example:: - - >>> x = torch.tensor([[1], [2], [3]]) - >>> x.size() - torch.Size([3, 1]) - >>> x.expand(3, 4) - tensor([[ 1, 1, 1, 1], - [ 2, 2, 2, 2], - [ 3, 3, 3, 3]]) - >>> x.expand(-1, 4) # -1 means not changing the size of that dimension - tensor([[ 1, 1, 1, 1], - [ 2, 2, 2, 2], - [ 3, 3, 3, 3]]) - - MLIR Specific Notes - ------------------- - See notes for the 'as_strided' op. - }]; -} - -def ATen_SqueezeOp: ATen_ImmutableTensorOp<"squeeze", [NoSideEffect]> { - let summary = "squeeze(input, dim=None, out=None) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntScalar:$dim - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a tensor with all the dimensions of :attr:`input` of size `1` removed. - - For example, if `input` is of shape: - :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor - will be of shape: :math:`(A \times B \times C \times D)`. - - When :attr:`dim` is given, a squeeze operation is done only in the given - dimension. If `input` is of shape: :math:`(A \times 1 \times B)`, - ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` - will squeeze the tensor to the shape :math:`(A \times B)`. - - .. note:: The returned tensor shares the storage with the input tensor, - so changing the contents of one will change the contents of the other. - - .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` - will also remove the batch dimension, which can lead to unexpected - errors. - - Args: - input (Tensor): the input tensor. - dim (int, optional): if given, the input will be squeezed only in - this dimension - out (Tensor, optional): the output tensor. - - Example:: - - >>> x = torch.zeros(2, 1, 2, 1, 2) - >>> x.size() - torch.Size([2, 1, 2, 1, 2]) - >>> y = torch.squeeze(x) - >>> y.size() - torch.Size([2, 2, 2]) - >>> y = torch.squeeze(x, 0) - >>> y.size() - torch.Size([2, 1, 2, 1, 2]) - >>> y = torch.squeeze(x, 1) - >>> y.size() - torch.Size([2, 2, 1, 2]) - - MLIR Specific Notes - ------------------- - See notes for the 'as_strided' op. - }]; -} - -def ATen_ViewOp: ATen_ImmutableTensorOp<"view", [NoSideEffect]> { - let summary = "view(*shape) -> Tensor"; - let arguments = (ins - ATen_AnyTensor:$input, - ATen_IntList:$size - ); - let results = (outs - ATen_AnyTensor:$result - ); - let description = [{ - Returns a new tensor with the same data as the :attr:`self` tensor but of a - different :attr:`shape`. - - The returned tensor shares the same data and must have the same number - of elements, but may have a different size. For a tensor to be viewed, the new - view size must be compatible with its original size and stride, i.e., each new - view dimension must either be a subspace of an original dimension, or only span - across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following - contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`, - - .. math:: - - \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1] - - Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape` - without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a - :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which - returns a view if the shapes are compatible, and copies (equivalent to calling - :meth:`contiguous`) otherwise. - - Args: - shape (torch.Size or int...): the desired size - - Example:: - - >>> x = torch.randn(4, 4) - >>> x.size() - torch.Size([4, 4]) - >>> y = x.view(16) - >>> y.size() - torch.Size([16]) - >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions - >>> z.size() - torch.Size([2, 8]) - - >>> a = torch.randn(1, 2, 3, 4) - >>> a.size() - torch.Size([1, 2, 3, 4]) - >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension - >>> b.size() - torch.Size([1, 3, 2, 4]) - >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory - >>> c.size() - torch.Size([1, 3, 2, 4]) - >>> torch.equal(b, c) - False - - - MLIR Specific Notes - ------------------- - See notes for the 'as_strided' op. - }]; -} - diff --git a/include/npcomp/Dialect/ATen/ATenDialect.h b/include/npcomp/Dialect/ATen/IR/ATenDialect.h similarity index 87% rename from include/npcomp/Dialect/ATen/ATenDialect.h rename to include/npcomp/Dialect/ATen/IR/ATenDialect.h index 265aba8bf..461523f05 100644 --- a/include/npcomp/Dialect/ATen/ATenDialect.h +++ b/include/npcomp/Dialect/ATen/IR/ATenDialect.h @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_DIALECT_ATEN_DIALECT_H -#define NPCOMP_DIALECT_ATEN_DIALECT_H +#ifndef NPCOMP_DIALECT_ATEN_IR_DIALECT_H +#define NPCOMP_DIALECT_ATEN_IR_DIALECT_H #include "mlir/IR/Builders.h" #include "mlir/IR/Dialect.h" @@ -54,7 +54,7 @@ namespace { // Return the tensor volume (i.e., the number of elements) of the given shaped // type. If the type does not have a rank, return 1. If the type doesn't // have a static shape, return 0. -uint64_t getTensorVolume(const ShapedType ty) { +inline uint64_t getTensorVolume(const ShapedType ty) { if (!ty.hasRank()) return 1; @@ -71,7 +71,7 @@ uint64_t getTensorVolume(const ShapedType ty) { // If the type doesn't have a shape, return 1. If the type is shaped, but // does not have a rank, return 1. If the type is shaped, but doesn't have a // static shape, return 0. -uint64_t getTensorVolume(const Type ty) { +inline uint64_t getTensorVolume(const Type ty) { if (auto t = ty.dyn_cast()) { return getTensorVolume(t); } else { @@ -84,12 +84,12 @@ uint64_t getTensorVolume(const Type ty) { } // namespace NPCOMP } // namespace mlir -#include "npcomp/Dialect/ATen/ATenOpInterfaces.h" +#include "npcomp/Dialect/ATen/IR/ATenOpInterfaces.h" // include TableGen generated Op definitions #define GET_OP_CLASSES -#include "npcomp/Dialect/ATen/ATen.h.inc" +#include "npcomp/Dialect/ATen/IR/ATenOps.h.inc" -#include "npcomp/Dialect/ATen/ATenDialect.h.inc" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h.inc" -#endif +#endif // NPCOMP_DIALECT_ATEN_IR_DIALECT_H diff --git a/include/npcomp/Dialect/ATen/IR/ATenDialect.td b/include/npcomp/Dialect/ATen/IR/ATenDialect.td new file mode 100644 index 000000000..ac79e55e0 --- /dev/null +++ b/include/npcomp/Dialect/ATen/IR/ATenDialect.td @@ -0,0 +1,40 @@ +//===- ATenDialect.td --------------------------------------*- tablegen -*-===// +// +// This file is licensed under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef NPCOMP_DIALECT_ATEN_IR_ATEN_DIALECT +#define NPCOMP_DIALECT_ATEN_IR_ATEN_DIALECT + +include "mlir/IR/OpBase.td" + +//===----------------------------------------------------------------------===// +// Dialect definition +//===----------------------------------------------------------------------===// + +/// The ATenDialect models 'A Tensor library' from Pytorch. The intention +/// is to provide an abstraction which is isomorphic with datastructures +/// returned from the pytorch jit, enabling integration with Pytorch models. +/// Most of the actual operation definitions in tablegen are themselves +/// generated from C APIs exported by Pytorch. +def ATen_Dialect : Dialect { + let name = "aten"; + let cppNamespace = "::mlir::NPCOMP::aten"; +} + +//===----------------------------------------------------------------------===// +// Dialect types +//===----------------------------------------------------------------------===// + +def ATen_ListType : DialectType()">, "ATen List">, + BuildableType<"$_builder.getType<::mlir::NPCOMP::aten::ATenListType()"> { + let typeDescription = [{ + A variadic list of arguments in ATen. + }]; +} + +#endif // NPCOMP_DIALECT_ATEN_IR_ATEN_DIALECT diff --git a/include/npcomp/Dialect/ATen/ATenOpInterface.td b/include/npcomp/Dialect/ATen/IR/ATenOpInterface.td similarity index 94% rename from include/npcomp/Dialect/ATen/ATenOpInterface.td rename to include/npcomp/Dialect/ATen/IR/ATenOpInterface.td index dea6bf182..76092474a 100644 --- a/include/npcomp/Dialect/ATen/ATenOpInterface.td +++ b/include/npcomp/Dialect/ATen/IR/ATenOpInterface.td @@ -8,8 +8,8 @@ include "mlir/IR/OpBase.td" -#ifndef ATEN_OP_INTERFACES -#define ATEN_OP_INTERFACES +#ifndef NPCOMP_DIALECT_ATEN_IR_ATEN_OP_INTERFACES +#define NPCOMP_DIALECT_ATEN_IR_ATEN_OP_INTERFACES def StatisticsOpInterface : OpInterface<"StatisticsOpInterface"> { let description = [{ @@ -67,4 +67,4 @@ def AnyScalar : TypeConstraint; -#endif +#endif // NPCOMP_DIALECT_ATEN_IR_ATEN_OP_INTERFACES diff --git a/include/npcomp/Dialect/ATen/ATenOpInterfaces.h b/include/npcomp/Dialect/ATen/IR/ATenOpInterfaces.h similarity index 70% rename from include/npcomp/Dialect/ATen/ATenOpInterfaces.h rename to include/npcomp/Dialect/ATen/IR/ATenOpInterfaces.h index 7e2dcc3bc..fbcd3fca9 100644 --- a/include/npcomp/Dialect/ATen/ATenOpInterfaces.h +++ b/include/npcomp/Dialect/ATen/IR/ATenOpInterfaces.h @@ -6,15 +6,15 @@ // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_DIALECT_ATEN_OPINTERFACES_H -#define NPCOMP_DIALECT_ATEN_OPINTERFACES_H +#ifndef NPCOMP_DIALECT_ATEN_IR_OPINTERFACES_H +#define NPCOMP_DIALECT_ATEN_IR_OPINTERFACES_H #include "mlir/IR/Types.h" namespace mlir { namespace NPCOMP { -#include "npcomp/Dialect/ATen/ATenOpInterfaces.h.inc" +#include "npcomp/Dialect/ATen/IR/ATenOpInterfaces.h.inc" } // namespace NPCOMP } // namespace mlir -#endif +#endif // NPCOMP_DIALECT_ATEN_IR_OPINTERFACES_H diff --git a/include/npcomp/Dialect/ATen/ATenOpStatisticsUtils.h b/include/npcomp/Dialect/ATen/IR/ATenOpStatisticsUtils.h similarity index 95% rename from include/npcomp/Dialect/ATen/ATenOpStatisticsUtils.h rename to include/npcomp/Dialect/ATen/IR/ATenOpStatisticsUtils.h index 242a88bb9..0a898a5c6 100644 --- a/include/npcomp/Dialect/ATen/ATenOpStatisticsUtils.h +++ b/include/npcomp/Dialect/ATen/IR/ATenOpStatisticsUtils.h @@ -6,10 +6,10 @@ // //===----------------------------------------------------------------------===// -#ifndef NPCOMP_DIALECT_ATEN_OPSTATISTICSUTILS_H -#define NPCOMP_DIALECT_ATEN_OPSTATISTICSUTILS_H +#ifndef NPCOMP_DIALECT_ATEN_IR_OPSTATISTICSUTILS_H +#define NPCOMP_DIALECT_ATEN_IR_OPSTATISTICSUTILS_H -#include "npcomp/Dialect/ATen/ATenDialect.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" #include "mlir/IR/StandardTypes.h" #include "mlir/IR/Types.h" @@ -36,7 +36,6 @@ std::map getConv2dStatistics(T *o, uint64_t groups) { TensorType biasTy = o->bias().getType().template cast(); uint64_t ofm_volume = getTensorVolume(resultTy); - uint64_t ofm_depth = resultTy.getShape()[1]; uint64_t ifm_depth = inputTy.getShape()[1]; uint64_t kernel_height = weightTy.getShape()[2]; @@ -142,27 +141,20 @@ uint64_t getConv2dOperandTransferVolume(T *o, unsigned int idx, bool read) { float filter_height = weightTy.getShape()[3]; float batch_sw = inputTy.getShape()[0]; - float ifm_depth_sw = inputTy.getShape()[1]; float ih = inputTy.getShape()[2]; - float iw = inputTy.getShape()[3]; float ofm_depth_sw = resultTy.getShape()[1]; const float batch_hw = 4; - const float ifm_depth_hw = 32; const float ofm_depth_hw = 32; const float ifm_tile_height = 4; - const float ifm_tile_width = 4; - const float ofm_tile_height = 4; - const float ofm_tile_width = 4; float ifm_aperture = ifm_tile_height - ceilf(filter_height / 2.0f); float ifm_overlap = ceilf(filter_height / 2.0f); float bl = ceilf(batch_sw / batch_hw); float ol = ceilf(ofm_depth_sw / ofm_depth_hw); - float il = ceilf(ifm_depth_sw / ifm_depth_hw); float ifm_overhead = 1.0f; float weight_overhead = 1.0f; @@ -274,4 +266,4 @@ std::map getReLUOpStatistics(T op) { } // namespace NPCOMP } // namespace mlir -#endif +#endif // NPCOMP_DIALECT_ATEN_IR_OPSTATISTICSUTILS_H diff --git a/include/npcomp/Dialect/ATen/ATen.td b/include/npcomp/Dialect/ATen/IR/ATenOps.td similarity index 77% rename from include/npcomp/Dialect/ATen/ATen.td rename to include/npcomp/Dialect/ATen/IR/ATenOps.td index 973f51dc7..6627d1f77 100644 --- a/include/npcomp/Dialect/ATen/ATen.td +++ b/include/npcomp/Dialect/ATen/IR/ATenOps.td @@ -6,39 +6,13 @@ // //===----------------------------------------------------------------------===// -include "mlir/IR/OpBase.td" +#ifndef NPCOMP_DIALECT_ATEN_IR_ATEN_OPS +#define NPCOMP_DIALECT_ATEN_IR_ATEN_OPS -#ifndef ATEN_OPS -#define ATEN_OPS +include "npcomp/Dialect/ATen/IR/ATenDialect.td" +include "npcomp/Dialect/ATen/IR/ATenOpInterface.td" include "mlir/Interfaces/SideEffectInterfaces.td" -include "npcomp/Dialect/ATen/ATenOpInterface.td" - -//===----------------------------------------------------------------------===// -// Dialect definition -//===----------------------------------------------------------------------===// - -/// The ATenDialect models 'A Tensor library' from Pytorch. The intention -/// is to provide an abstraction which is isomorphic with datastructures -/// returned from the pytorch jit, enabling integration with Pytorch models. -/// Most of the actual operation definitions in tablegen are themselves -/// generated from C APIs exported by Pytorch. -def ATen_Dialect : Dialect { - let name = "aten"; - let cppNamespace = "::mlir::NPCOMP::aten"; -} - -//===----------------------------------------------------------------------===// -// Dialect types -//===----------------------------------------------------------------------===// - -def ATen_ListType : DialectType()">, "ATen List">, - BuildableType<"$_builder.getType<::mlir::NPCOMP::aten::ATenListType()"> { - let typeDescription = [{ - A variadic list of arguments in ATen. - }]; -} // TODO: convert to "let results =" style // TODO: Rename prefix from "aten" to "ATen" for consistency. @@ -48,7 +22,7 @@ class aten_Op traits = [StatisticsOpInterface]> : // Most ops are automatically generated from pytorch specs. -include "npcomp/Dialect/ATen/ATenOps.td" +include "npcomp/Dialect/ATen/IR/GeneratedATenOps.td" def aten_BatchNormOp: aten_Op<"batch_norm", [NoSideEffect, StatisticsOpInterface]>, @@ -179,4 +153,4 @@ def aten_TypeCastOp : aten_Op<"type_cast", [NoSideEffect]>, ); } -#endif +#endif // NPCOMP_DIALECT_ATEN_IR_ATEN_OPS diff --git a/include/npcomp/Dialect/ATen/IR/CMakeLists.txt b/include/npcomp/Dialect/ATen/IR/CMakeLists.txt new file mode 100644 index 000000000..0711d1dbe --- /dev/null +++ b/include/npcomp/Dialect/ATen/IR/CMakeLists.txt @@ -0,0 +1,19 @@ +set(dialect_namespace aten) +set(LLVM_TARGET_DEFINITIONS ATenOps.td) + +mlir_tablegen(ATenOps.h.inc -gen-op-decls) +mlir_tablegen(ATenOps.cpp.inc -gen-op-defs) +mlir_tablegen(ATenDialect.h.inc -gen-dialect-decls -dialect=${dialect_namespace}) +mlir_tablegen(ATenEnums.h.inc -gen-enum-decls) +mlir_tablegen(ATenEnums.cpp.inc -gen-enum-defs) +add_public_tablegen_target(MLIRATenIncGen) +add_dependencies(mlir-headers MLIRATenIncGen) + +set(LLVM_TARGET_DEFINITIONS ATenOpInterface.td) +mlir_tablegen(ATenOpInterfaces.h.inc -gen-op-interface-decls) +mlir_tablegen(ATenOpInterfaces.cpp.inc -gen-op-interface-defs) +add_public_tablegen_target(MLIRATenOpInterfacesIncGen) +add_dependencies(mlir-generic-headers MLIRATenOpInterfacesIncGen) + +add_mlir_doc(ATenDialect -gen-dialect-doc ATenDialect ATen/) +add_mlir_doc(ATenOps -gen-op-doc ATenOps ATen/) diff --git a/include/npcomp/Dialect/ATen/ATenOps.td b/include/npcomp/Dialect/ATen/IR/GeneratedATenOps.td similarity index 99% rename from include/npcomp/Dialect/ATen/ATenOps.td rename to include/npcomp/Dialect/ATen/IR/GeneratedATenOps.td index d1570a1be..f2961ed23 100644 --- a/include/npcomp/Dialect/ATen/ATenOps.td +++ b/include/npcomp/Dialect/ATen/IR/GeneratedATenOps.td @@ -7,8 +7,8 @@ // //===----------------------------------------------------------------------===// -#ifndef ATEN_OP_DEFS -#define ATEN_OP_DEFS +#ifndef NPCOMP_DIALECT_ATEN_IR_GENERATED_ATEN_OPS +#define NPCOMP_DIALECT_ATEN_IR_GENERATED_ATEN_OPS def aten_AddOp: aten_Op<"add", [NoSideEffect, StatisticsOpInterface]>, Results<(outs AnyTensor)> { @@ -730,4 +730,4 @@ def aten_MaxPool2dWithIndicesBackwardOp: aten_Op<"max_pool2d_with_indices_backwa }]; } -#endif +#endif // NPCOMP_DIALECT_ATEN_IR_GENERATED_ATEN_OPS diff --git a/include/npcomp/Dialect/ATen/ATenLayerNamePass.h b/include/npcomp/Dialect/ATen/Transforms/ATenLayerNamePass.h similarity index 100% rename from include/npcomp/Dialect/ATen/ATenLayerNamePass.h rename to include/npcomp/Dialect/ATen/Transforms/ATenLayerNamePass.h diff --git a/include/npcomp/Dialect/ATen/ATenLoweringPass.h b/include/npcomp/Dialect/ATen/Transforms/ATenLoweringPass.h similarity index 100% rename from include/npcomp/Dialect/ATen/ATenLoweringPass.h rename to include/npcomp/Dialect/ATen/Transforms/ATenLoweringPass.h diff --git a/include/npcomp/Dialect/ATen/ATenOpReport.h b/include/npcomp/Dialect/ATen/Transforms/ATenOpReport.h similarity index 96% rename from include/npcomp/Dialect/ATen/ATenOpReport.h rename to include/npcomp/Dialect/ATen/Transforms/ATenOpReport.h index 47d35a4f0..0f88125d9 100644 --- a/include/npcomp/Dialect/ATen/ATenOpReport.h +++ b/include/npcomp/Dialect/ATen/Transforms/ATenOpReport.h @@ -11,6 +11,8 @@ #include +#include "mlir/Pass/Pass.h" + namespace mlir { class Pass; } // namespace mlir diff --git a/include/npcomp/Dialect/ATen/ATenPasses.h b/include/npcomp/Dialect/ATen/Transforms/ATenPasses.h similarity index 74% rename from include/npcomp/Dialect/ATen/ATenPasses.h rename to include/npcomp/Dialect/ATen/Transforms/ATenPasses.h index b09bd5aa2..6d7492754 100644 --- a/include/npcomp/Dialect/ATen/ATenPasses.h +++ b/include/npcomp/Dialect/ATen/Transforms/ATenPasses.h @@ -9,10 +9,10 @@ #ifndef NPCOMP_DIALECT_ATEN_PASSES_H #define NPCOMP_DIALECT_ATEN_PASSES_H -#include "npcomp/Dialect/ATen/ATenLayerNamePass.h" -#include "npcomp/Dialect/ATen/ATenLoweringPass.h" -#include "npcomp/Dialect/ATen/ATenOpReport.h" -#include "npcomp/Dialect/ATen/ReturnEliminationPass.h" +#include "npcomp/Dialect/ATen/Transforms/ATenLayerNamePass.h" +#include "npcomp/Dialect/ATen/Transforms/ATenLoweringPass.h" +#include "npcomp/Dialect/ATen/Transforms/ATenOpReport.h" +#include "npcomp/Dialect/ATen/Transforms/ReturnEliminationPass.h" namespace mlir { namespace NPCOMP { diff --git a/include/npcomp/Dialect/ATen/ATenToStd.h b/include/npcomp/Dialect/ATen/Transforms/ATenToStd.h similarity index 100% rename from include/npcomp/Dialect/ATen/ATenToStd.h rename to include/npcomp/Dialect/ATen/Transforms/ATenToStd.h diff --git a/include/npcomp/Dialect/ATen/ATenToStd.td b/include/npcomp/Dialect/ATen/Transforms/ATenToStd.td similarity index 87% rename from include/npcomp/Dialect/ATen/ATenToStd.td rename to include/npcomp/Dialect/ATen/Transforms/ATenToStd.td index 41148ac88..f2bb2c077 100644 --- a/include/npcomp/Dialect/ATen/ATenToStd.td +++ b/include/npcomp/Dialect/ATen/Transforms/ATenToStd.td @@ -6,19 +6,11 @@ // //===----------------------------------------------------------------------===// -#ifdef MLIR_ATEN_TO_STD_TD -#else +#ifndef MLIR_ATEN_TO_STD_TD #define MLIR_ATEN_TO_STD_TD -#ifdef STANDARD_OPS -#else include "mlir/Dialect/StandardOps/IR/Ops.td" -#endif // STANDARD_OPS - -#ifdef ATEN_OPS -#else -include "ATen.td" -#endif +include "npcomp/Dialect/ATen/IR/ATenOps.td" // The pytorch convolution operator has 9 arguments, but we only have a jit // library that supports the first six at the moment. diff --git a/include/npcomp/Dialect/ATen/Transforms/CMakeLists.txt b/include/npcomp/Dialect/ATen/Transforms/CMakeLists.txt new file mode 100644 index 000000000..8005480d7 --- /dev/null +++ b/include/npcomp/Dialect/ATen/Transforms/CMakeLists.txt @@ -0,0 +1,3 @@ +set(LLVM_TARGET_DEFINITIONS ATenToStd.td) +mlir_tablegen(ATenToStd.cpp.inc -gen-rewriters) +add_public_tablegen_target(MLIRATenToStdIncGen) diff --git a/include/npcomp/Dialect/ATen/LivenessReport.h b/include/npcomp/Dialect/ATen/Transforms/LivenessReport.h similarity index 94% rename from include/npcomp/Dialect/ATen/LivenessReport.h rename to include/npcomp/Dialect/ATen/Transforms/LivenessReport.h index dc19ebb12..ac9f9d5d7 100644 --- a/include/npcomp/Dialect/ATen/LivenessReport.h +++ b/include/npcomp/Dialect/ATen/Transforms/LivenessReport.h @@ -11,6 +11,9 @@ #include +#include "mlir/IR/Module.h" +#include "llvm/ADT/DenseMap.h" + namespace mlir { namespace NPCOMP { namespace aten { diff --git a/include/npcomp/Dialect/ATen/ReturnEliminationPass.h b/include/npcomp/Dialect/ATen/Transforms/ReturnEliminationPass.h similarity index 100% rename from include/npcomp/Dialect/ATen/ReturnEliminationPass.h rename to include/npcomp/Dialect/ATen/Transforms/ReturnEliminationPass.h diff --git a/lib/Dialect/ATen/CMakeLists.txt b/lib/Dialect/ATen/CMakeLists.txt index 7ad28db80..9f57627c3 100644 --- a/lib/Dialect/ATen/CMakeLists.txt +++ b/lib/Dialect/ATen/CMakeLists.txt @@ -1,25 +1,2 @@ -add_npcomp_dialect_library(NPCOMPATenDialect - ATenDialect.cpp - ATenDialectOpStats.cpp - ATenPasses.cpp - ATenLayerNamePass.cpp - ATenLoweringPass.cpp - ATenOpReport.cpp - ATenToStd.cpp - LivenessReport.cpp - ReturnEliminationPass.cpp - - ADDITIONAL_HEADER_DIRS - ${PROJECT_SOURCE_DIR}/dialect/include - ${PROJECT_BINARY_DIR}/dialect/include - - DEPENDS - MLIRATenIncGen - MLIRATenEnumsIncGen - MLIRATenOpInterfacesIncGen - MLIRATenToStdIncGen - - LINK_LIBS PUBLIC - MLIRPass - MLIRTransformUtils - ) +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/lib/Dialect/ATen/ATenDialect.cpp b/lib/Dialect/ATen/IR/ATenDialect.cpp similarity index 91% rename from lib/Dialect/ATen/ATenDialect.cpp rename to lib/Dialect/ATen/IR/ATenDialect.cpp index ee9a1bb0c..cd6025995 100644 --- a/lib/Dialect/ATen/ATenDialect.cpp +++ b/lib/Dialect/ATen/IR/ATenDialect.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenDialect.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" #include "mlir/IR/DialectImplementation.h" using namespace mlir; @@ -56,8 +56,6 @@ mlir::Type ATenListType::getElementType() { } mlir::Type ATenDialect::parseType(DialectAsmParser &parser) const { - Location loc = parser.getEncodedSourceLoc(parser.getNameLoc()); - // All types start with an identifier that we switch on. StringRef typeNameSpelling; if (failed(parser.parseKeyword(&typeNameSpelling))) @@ -99,11 +97,11 @@ void ATenDialect::initialize() { addTypes(); addOperations< #define GET_OP_LIST -#include "npcomp/Dialect/ATen/ATen.cpp.inc" +#include "npcomp/Dialect/ATen/IR/ATenOps.cpp.inc" >(); } #define GET_OP_CLASSES -#include "npcomp/Dialect/ATen/ATen.cpp.inc" +#include "npcomp/Dialect/ATen/IR/ATenOps.cpp.inc" -#include "npcomp/Dialect/ATen/ATenOpInterfaces.cpp.inc" +#include "npcomp/Dialect/ATen/IR/ATenOpInterfaces.cpp.inc" diff --git a/lib/Dialect/ATen/ATenDialectOpStats.cpp b/lib/Dialect/ATen/IR/ATenDialectOpStats.cpp similarity index 99% rename from lib/Dialect/ATen/ATenDialectOpStats.cpp rename to lib/Dialect/ATen/IR/ATenDialectOpStats.cpp index d00554fb4..961f611cb 100644 --- a/lib/Dialect/ATen/ATenDialectOpStats.cpp +++ b/lib/Dialect/ATen/IR/ATenDialectOpStats.cpp @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenDialect.h" -#include "npcomp/Dialect/ATen/ATenOpStatisticsUtils.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" +#include "npcomp/Dialect/ATen/IR/ATenOpStatisticsUtils.h" #include "llvm/Support/Debug.h" diff --git a/lib/Dialect/ATen/IR/CMakeLists.txt b/lib/Dialect/ATen/IR/CMakeLists.txt new file mode 100644 index 000000000..4ecf8bbc7 --- /dev/null +++ b/lib/Dialect/ATen/IR/CMakeLists.txt @@ -0,0 +1,17 @@ +add_npcomp_dialect_library(NPCOMPATenDialect + ATenDialect.cpp + ATenDialectOpStats.cpp + + ADDITIONAL_HEADER_DIRS + ${PROJECT_SOURCE_DIR}/include/npcomp/Dialect/ATen + + DEPENDS + MLIRATenIncGen + #MLIRATenEnumsIncGen + MLIRATenOpInterfacesIncGen + #MLIRATenToStdIncGen + + LINK_LIBS PUBLIC + MLIRPass + MLIRTransformUtils +) diff --git a/lib/Dialect/ATen/ATenLayerNamePass.cpp b/lib/Dialect/ATen/Transforms/ATenLayerNamePass.cpp similarity index 96% rename from lib/Dialect/ATen/ATenLayerNamePass.cpp rename to lib/Dialect/ATen/Transforms/ATenLayerNamePass.cpp index b841cd90d..33a1bbdb2 100644 --- a/lib/Dialect/ATen/ATenLayerNamePass.cpp +++ b/lib/Dialect/ATen/Transforms/ATenLayerNamePass.cpp @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenLayerNamePass.h" -#include "npcomp/Dialect/ATen/ATenDialect.h" +#include "npcomp/Dialect/ATen/Transforms/ATenLayerNamePass.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" diff --git a/lib/Dialect/ATen/ATenLoweringPass.cpp b/lib/Dialect/ATen/Transforms/ATenLoweringPass.cpp similarity index 95% rename from lib/Dialect/ATen/ATenLoweringPass.cpp rename to lib/Dialect/ATen/Transforms/ATenLoweringPass.cpp index 2a3bb7f0b..4ff45115e 100644 --- a/lib/Dialect/ATen/ATenLoweringPass.cpp +++ b/lib/Dialect/ATen/Transforms/ATenLoweringPass.cpp @@ -6,9 +6,9 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenLoweringPass.h" -#include "npcomp/Dialect/ATen/ATenDialect.h" -#include "npcomp/Dialect/ATen/ATenToStd.h" +#include "npcomp/Dialect/ATen/Transforms/ATenLoweringPass.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" +#include "npcomp/Dialect/ATen/Transforms/ATenToStd.h" #include "mlir/Dialect/Affine/EDSC/Builders.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" @@ -70,7 +70,7 @@ static Value typeCast(PatternRewriter &builder, Value val, Type destTy) { /// unknown shape. static MemRefType getShapeErasedMemRefType(MemRefType type) { std::vector shape = type.getShape(); - for (int i = 0; i < shape.size(); i++) { + for (size_t i = 0, e = shape.size(); i < e; i++) { shape[i] = -1; } return MemRefType::get(shape, type.getElementType(), type.getAffineMaps(), @@ -120,27 +120,6 @@ static std::string getFullyMangledType(const Type ty) { return ret.str(); } -// Mangle the argument shapes into the function name. This is impractical for -// a library-based implementation, since each different shape has to be -// implemented by a different function. The function name is constructed -// from the prefix, the mangled result types, the mangled operand types. -// Types are mangled in a way that encodes the full shape information. -static std::string getFullyMangledFuncName(std::string prefix, - FunctionType fnTy) { - std::string sep = "_"; - - ArrayRef resultTy = fnTy.getResults(); - ArrayRef operTy = fnTy.getInputs(); - - std::string ret = prefix + "_AtenAcapOp_"; - for (const Type t : resultTy) - ret = ret + sep + getFullyMangledType(t); - for (const Type t : operTy) - ret = ret + sep + getFullyMangledType(t); - - return ret; -} - // Mangle the argument ranks into the function name. // TODO: Currently only supports MemRef, Float, Integer, and AtenList (poorly) static std::string getSimplyMangledType(const Type ty) { @@ -192,15 +171,6 @@ static std::string getSimplyMangledFuncName(std::string prefix, return ret; } -static std::string getSimplyMangledFuncName(std::string prefix, - FunctionType fnTy) { - - return getSimplyMangledFuncName(prefix, fnTy.getInputs(), fnTy.getResults()); -} - -std::string getMangledFuncName(std::string prefix, FunctionType fnTy) { - return getSimplyMangledFuncName(prefix, fnTy); -} std::string getMangledFuncName(std::string prefix, ArrayRef opTys, ArrayRef retTys) { @@ -254,13 +224,10 @@ public: Value result = rewriter.create(loc, memRefResultTy); Value lhs = memRefTypeCast(rewriter, operands[0]); Value rhs = memRefTypeCast(rewriter, operands[1]); - auto indexType = IndexType::get(op->getContext()); - using namespace edsc; ScopedContext scope(rewriter, loc); Value zero = intrinsics::std_constant_index(0); - Value one = intrinsics::std_constant_index(1); MemRefBoundsCapture vRes(result), vLHS(lhs), vRHS(rhs); StdIndexedValue iRes(result), iLHS(lhs), iRHS(rhs); Value M(vRes.ub(0)); @@ -345,8 +312,6 @@ LogicalResult rewriteWithVoidFunctionCallExplicit( TensorType tensorResultTy = t.cast(); MemRefType memRefResultTy = mlir::MemRefType::get( tensorResultTy.getShape(), tensorResultTy.getElementType(), {}, 0); - MemRefType erasedMemRefResultTy = - getShapeErasedMemRefType(memRefResultTy); retTys.push_back(memRefResultTy); // assume memRefResultTy has known shape, so we don't need any @@ -367,8 +332,7 @@ LogicalResult rewriteWithVoidFunctionCallExplicit( FuncOp funcOp = getATenFn(op->getParentOfType(), mangledFunctionName, newOps, empty); - auto new_call = - callOperation(empty, rewriter.getSymbolRefAttr(funcOp), newOps); + callOperation(empty, rewriter.getSymbolRefAttr(funcOp), newOps); rewriter.replaceOp(op, newResults); return success(); @@ -442,8 +406,6 @@ public: auto loc = op->getLoc(); edsc::ScopedContext scope(rewriter, loc); - auto constOp = cast(op); - Value result = op->getResult(0); Type t = result.getType(); if (t.isa()) { diff --git a/lib/Dialect/ATen/ATenOpReport.cpp b/lib/Dialect/ATen/Transforms/ATenOpReport.cpp similarity index 95% rename from lib/Dialect/ATen/ATenOpReport.cpp rename to lib/Dialect/ATen/Transforms/ATenOpReport.cpp index 79af5b339..a480fa325 100644 --- a/lib/Dialect/ATen/ATenOpReport.cpp +++ b/lib/Dialect/ATen/Transforms/ATenOpReport.cpp @@ -6,8 +6,7 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenOpReport.h" -#include "npcomp/Dialect/ATen/ATenDialect.h" +#include "npcomp/Dialect/ATen/Transforms/ATenOpReport.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -15,6 +14,7 @@ #include "llvm/Support/raw_ostream.h" #include "mlir/Pass/Pass.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" #include #include @@ -25,10 +25,6 @@ using namespace mlir; namespace { -std::string getAsString(std::map &m, std::string &e) { - return m.count(e) ? std::to_string(m[e]) : " "; -} - /// Query operations through the StatisticsOpInterface and print the result /// in a human-readable way. This replicates the functionality in various /// network analysis tools and is a stepping stone toward using the information diff --git a/lib/Dialect/ATen/ATenPasses.cpp b/lib/Dialect/ATen/Transforms/ATenPasses.cpp similarity index 93% rename from lib/Dialect/ATen/ATenPasses.cpp rename to lib/Dialect/ATen/Transforms/ATenPasses.cpp index 3c50e810f..215b81c76 100644 --- a/lib/Dialect/ATen/ATenPasses.cpp +++ b/lib/Dialect/ATen/Transforms/ATenPasses.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenPasses.h" +#include "npcomp/Dialect/ATen/Transforms/ATenPasses.h" using namespace mlir::NPCOMP::aten; diff --git a/lib/Dialect/ATen/ATenToStd.cpp b/lib/Dialect/ATen/Transforms/ATenToStd.cpp similarity index 81% rename from lib/Dialect/ATen/ATenToStd.cpp rename to lib/Dialect/ATen/Transforms/ATenToStd.cpp index 562302250..60531f673 100644 --- a/lib/Dialect/ATen/ATenToStd.cpp +++ b/lib/Dialect/ATen/Transforms/ATenToStd.cpp @@ -6,16 +6,17 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenToStd.h" +#include "npcomp/Dialect/ATen/Transforms/ATenToStd.h" + #include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "npcomp/Dialect/ATen/ATenDialect.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" using namespace mlir; using namespace mlir::NPCOMP; namespace { // import patterns -#include "npcomp/Dialect/ATen/ATenToStd.cpp.inc" +#include "npcomp/Dialect/ATen/Transforms/ATenToStd.cpp.inc" } // namespace namespace mlir { diff --git a/lib/Dialect/ATen/Transforms/CMakeLists.txt b/lib/Dialect/ATen/Transforms/CMakeLists.txt new file mode 100644 index 000000000..1fcd4dc38 --- /dev/null +++ b/lib/Dialect/ATen/Transforms/CMakeLists.txt @@ -0,0 +1,12 @@ +add_npcomp_conversion_library(NPCOMPATenPasses + ATenPasses.cpp + ATenLayerNamePass.cpp + ATenLoweringPass.cpp + ATenOpReport.cpp + ATenToStd.cpp + LivenessReport.cpp + ReturnEliminationPass.cpp + + DEPENDS + MLIRATenToStdIncGen +) diff --git a/lib/Dialect/ATen/LivenessReport.cpp b/lib/Dialect/ATen/Transforms/LivenessReport.cpp similarity index 91% rename from lib/Dialect/ATen/LivenessReport.cpp rename to lib/Dialect/ATen/Transforms/LivenessReport.cpp index 6f74cc95b..b556e8d84 100644 --- a/lib/Dialect/ATen/LivenessReport.cpp +++ b/lib/Dialect/ATen/Transforms/LivenessReport.cpp @@ -6,7 +6,8 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ATenDialect.h" +#include "npcomp/Dialect/ATen/Transforms/LivenessReport.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" #include "mlir/Analysis/Liveness.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" @@ -15,8 +16,6 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/JSON.h" -#include "npcomp/Dialect/ATen/LivenessReport.h" - #include #include #include @@ -25,28 +24,6 @@ using namespace mlir; -namespace { - -uint64_t getTensorVolume(const ShapedType ty) { - - if (!ty.hasRank()) - return 1; - - uint64_t volume = 1; - for (auto &d : ty.getShape()) - volume *= d; - return volume; -} - -uint64_t getTensorVolume(const Type ty) { - if (auto t = ty.dyn_cast()) { - return getTensorVolume(t); - } else { - return 1; - } -} -} // namespace - namespace mlir { namespace NPCOMP { namespace aten { @@ -72,9 +49,6 @@ std::string LivenessReport::generateTextReport() { std::string LivenessReport::emitJSONReport() { resolveLiveness(); llvm::json::Object top; - auto context = module.getContext(); - auto loc = mlir::UnknownLoc::get(context); - auto graph = module.lookupSymbol("graph"); std::map> liveAt; @@ -117,7 +91,6 @@ std::string LivenessReport::emitJSONReport() { if (v.getDefiningOp()) { if (auto a = v.getDefiningOp()->getAttrOfType("layer_name")) { - auto definingOp = v.getDefiningOp(); auto ld = layerDetail.getInteger(a.getValue().str()); if (ld) layerDetail[a.getValue().str()] = *ld + vol; diff --git a/lib/Dialect/ATen/ReturnEliminationPass.cpp b/lib/Dialect/ATen/Transforms/ReturnEliminationPass.cpp similarity index 94% rename from lib/Dialect/ATen/ReturnEliminationPass.cpp rename to lib/Dialect/ATen/Transforms/ReturnEliminationPass.cpp index 57e8a06f5..7744f3ed9 100644 --- a/lib/Dialect/ATen/ReturnEliminationPass.cpp +++ b/lib/Dialect/ATen/Transforms/ReturnEliminationPass.cpp @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// -#include "npcomp/Dialect/ATen/ReturnEliminationPass.h" -#include "npcomp/Dialect/ATen/ATenDialect.h" +#include "npcomp/Dialect/ATen/Transforms/ReturnEliminationPass.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -78,8 +78,8 @@ public: newCallArgs.push_back(valueMap[v]); } - auto newCallOp = builder->create(op->getLoc(), newFnName, - ArrayRef{}, newCallArgs); + builder->create(op->getLoc(), newFnName, ArrayRef{}, + newCallArgs); erasedOps.insert(op); auto fn = module.lookupSymbol(callOp.callee()); if (fn && fn.use_empty()) @@ -105,7 +105,6 @@ public: void runOnOperation() override { auto module = getOperation(); - auto context = module.getContext(); // check that a function called "graph" exists auto graph = module.lookupSymbol("graph"); diff --git a/lib/InitAll.cpp b/lib/InitAll.cpp index f30e7deea..6dec0e700 100644 --- a/lib/InitAll.cpp +++ b/lib/InitAll.cpp @@ -8,8 +8,8 @@ #include "npcomp/InitAll.h" -#include "npcomp/Dialect/ATen/ATenDialect.h" -#include "npcomp/Dialect/ATen/ATenPasses.h" +#include "npcomp/Dialect/ATen/IR/ATenDialect.h" +#include "npcomp/Dialect/ATen/Transforms/ATenPasses.h" #include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h" #include "npcomp/Dialect/Basicpy/Transforms/Passes.h" #include "npcomp/Dialect/Numpy/IR/NumpyDialect.h" diff --git a/lib/RefBackend/TensorToMemref/LowerStructuralToMemref.cpp b/lib/RefBackend/TensorToMemref/LowerStructuralToMemref.cpp index f5025e003..c4f27772f 100644 --- a/lib/RefBackend/TensorToMemref/LowerStructuralToMemref.cpp +++ b/lib/RefBackend/TensorToMemref/LowerStructuralToMemref.cpp @@ -28,8 +28,6 @@ using namespace mlir::NPCOMP; // conversion about them. //===----------------------------------------------------------------------===// - - namespace { // This is a type conversion similar to CallOpSignatureConversion. class LowerSelectOpTypes : public OpConversionPattern {