//===-------------------------------------------------------*- tablegen -*-===// // // This file is licensed under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // // Operation summaries and descriptions were systematically derived from public // API docstrings and are licensed accordingly: // https://github.com/pytorch/pytorch/blob/master/LICENSE //===----------------------------------------------------------------------===// // This file is automatically generated. Please do not edit. // Generated via: // python -m torch_mlir_utils.codegen.torch_signature_ods_gen //===----------------------------------------------------------------------===// // ----------------------------------------------------------------------------- // Binary arithmetic ops // ----------------------------------------------------------------------------- def aten_AddOp: aten_Op<"add", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::add"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other, AnyTorchScalarType:$alpha ); let results = (outs AnyTorchImmutableTensor ); } def aten_Atan2Op: aten_Op<"atan2", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::atan2"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } def aten_DivOp: aten_Op<"div", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::div"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } def aten_FloorDivideOp: aten_Op<"floor_divide", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::floor_divide"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } def aten_MulOp: aten_Op<"mul", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::mul"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } def aten_RemainderOp: aten_Op<"remainder", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::remainder"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } def aten_TrueDivideOp: aten_Op<"true_divide", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::true_divide"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } def aten_MaximumOp: aten_Op<"maximum", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::maximum"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } def aten_MinimumOp: aten_Op<"minimum", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::minimum"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$other ); let results = (outs AnyTorchImmutableTensor ); } // ----------------------------------------------------------------------------- // Unary arithmetic ops // ----------------------------------------------------------------------------- def aten_AbsOp: aten_Op<"abs", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::abs"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_AcosOp: aten_Op<"acos", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::acos"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_AngleOp: aten_Op<"angle", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::angle"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_AsinOp: aten_Op<"asin", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::asin"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_AtanOp: aten_Op<"atan", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::atan"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_CeilOp: aten_Op<"ceil", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::ceil"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_ConjOp: aten_Op<"conj", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::conj"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_CosOp: aten_Op<"cos", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::cos"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_CoshOp: aten_Op<"cosh", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::cosh"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_DigammaOp: aten_Op<"digamma", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::digamma"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_ErfOp: aten_Op<"erf", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::erf"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_ErfcOp: aten_Op<"erfc", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::erfc"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_ErfinvOp: aten_Op<"erfinv", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::erfinv"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_ExpOp: aten_Op<"exp", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::exp"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_Expm1Op: aten_Op<"expm1", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::expm1"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_FloorOp: aten_Op<"floor", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::floor"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_FracOp: aten_Op<"frac", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::frac"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_LgammaOp: aten_Op<"lgamma", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::lgamma"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_LogOp: aten_Op<"log", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::log"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_Log10Op: aten_Op<"log10", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::log10"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_Log1pOp: aten_Op<"log1p", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::log1p"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_Log2Op: aten_Op<"log2", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::log2"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_NegOp: aten_Op<"neg", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::neg"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_ReluOp: aten_Op<"relu", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::relu"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_ReciprocalOp: aten_Op<"reciprocal", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::reciprocal"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_RoundOp: aten_Op<"round", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::round"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_RsqrtOp: aten_Op<"rsqrt", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::rsqrt"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_SigmoidOp: aten_Op<"sigmoid", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::sigmoid"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_SignOp: aten_Op<"sign", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::sign"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_SinOp: aten_Op<"sin", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::sin"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_SinhOp: aten_Op<"sinh", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::sinh"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_SqrtOp: aten_Op<"sqrt", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::sqrt"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_TanOp: aten_Op<"tan", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::tan"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_TanhOp: aten_Op<"tanh", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::tanh"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_TruncOp: aten_Op<"trunc", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::trunc"; let arguments = (ins AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } // ----------------------------------------------------------------------------- // NN ops // ----------------------------------------------------------------------------- def aten_ConvolutionOp: aten_Op<"convolution", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::convolution_overrideable"; let arguments = (ins AnyTorchImmutableTensor:$input, AnyTorchImmutableTensor:$weight, AnyTorchOptionalImmutableTensor:$bias, AnyTorchIntListType:$stride, AnyTorchIntListType:$padding, AnyTorchIntListType:$dilation, AnyTorchBoolType:$transposed, AnyTorchIntListType:$output_padding, AnyTorchIntType:$groups ); let results = (outs AnyTorchImmutableTensor ); } def aten_ConvolutionBackwardOp: aten_Op<"convolution_backward", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::convolution_backward_overrideable"; let arguments = (ins AnyTorchImmutableTensor:$grad_output, AnyTorchImmutableTensor:$input, AnyTorchImmutableTensor:$weight, AnyTorchIntListType:$stride, AnyTorchIntListType:$padding, AnyTorchIntListType:$dilation, AnyTorchBoolType:$transposed, AnyTorchIntListType:$output_padding, AnyTorchIntType:$groups, AnyTorchBoolListType:$output_mask ); let results = (outs AnyTorchOptionalImmutableTensor:$grad_input, AnyTorchOptionalImmutableTensor:$grad_weight, AnyTorchOptionalImmutableTensor:$grad_bias ); } def aten_LogSoftmaxOp: aten_Op<"log_softmax", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::_log_softmax"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchIntType:$dim, AnyTorchBoolType:$half_to_float ); let results = (outs AnyTorchImmutableTensor ); } def aten_LogSoftmaxBackwardDataOp: aten_Op<"log_softmax_backward_data", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::_log_softmax_backward_data"; let arguments = (ins AnyTorchImmutableTensor:$grad_output, AnyTorchImmutableTensor:$output, AnyTorchIntType:$dim, AnyTorchImmutableTensor:$self ); let results = (outs AnyTorchImmutableTensor ); } def aten_MmOp: aten_Op<"mm", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::mm"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$mat2 ); let results = (outs AnyTorchImmutableTensor ); } // ----------------------------------------------------------------------------- // Loss function ops // ----------------------------------------------------------------------------- def aten_NllLossForwardOp: aten_Op<"nll_loss_forward", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::nll_loss_forward"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$target, AnyTorchOptionalImmutableTensor:$weight, AnyTorchIntType:$reduction, AnyTorchIntType:$ignore_index ); let results = (outs AnyTorchImmutableTensor:$output, AnyTorchImmutableTensor:$total_weight ); } def aten_NllLossBackwardOp: aten_Op<"nll_loss_backward", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::nll_loss_backward"; let arguments = (ins AnyTorchImmutableTensor:$grad_output, AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$target, AnyTorchOptionalImmutableTensor:$weight, AnyTorchIntType:$reduction, AnyTorchIntType:$ignore_index, AnyTorchImmutableTensor:$total_weight ); let results = (outs AnyTorchImmutableTensor ); } def aten_NllLoss2dForwardOp: aten_Op<"nll_loss2d_forward", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::nll_loss2d_forward"; let arguments = (ins AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$target, AnyTorchOptionalImmutableTensor:$weight, AnyTorchIntType:$reduction, AnyTorchIntType:$ignore_index ); let results = (outs AnyTorchImmutableTensor:$output, AnyTorchImmutableTensor:$total_weight ); } def aten_NllLoss2dBackwardOp: aten_Op<"nll_loss2d_backward", [NoSideEffect, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::nll_loss2d_backward"; let arguments = (ins AnyTorchImmutableTensor:$grad_output, AnyTorchImmutableTensor:$self, AnyTorchImmutableTensor:$target, AnyTorchOptionalImmutableTensor:$weight, AnyTorchIntType:$reduction, AnyTorchIntType:$ignore_index, AnyTorchImmutableTensor:$total_weight ); let results = (outs AnyTorchImmutableTensor ); } def aten_CopyInplaceOp: aten_Op<"copy.inplace", [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "Recognized op for kernel aten::copy_"; let arguments = (ins AnyTorchMutableTensor:$self, AnyTorchImmutableTensor:$src ); let results = (outs ); }