mirror of https://github.com/llvm/torch-mlir
565 lines
17 KiB
TableGen
565 lines
17 KiB
TableGen
//===-------------------------------------------------------*- tablegen -*-===//
|
|
//
|
|
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
// Operation summaries and descriptions were systematically derived from public
|
|
// API docstrings and are licensed accordingly:
|
|
// https://github.com/pytorch/pytorch/blob/master/LICENSE
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file is automatically generated. Please do not edit.
|
|
// Generated via:
|
|
// python -m torch_mlir_utils.codegen.torch_ods_gen
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def Torch_AtenTanhOp : Torch_Op<"aten.tanh", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::tanh : (Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self attr-dict `:` type($self) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenTanh_Op : Torch_Op<"aten.tanh_", [
|
|
IsTrailingUnderscoreInplaceVariant,
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::tanh_ : (Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self attr-dict `:` type($self) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenReluOp : Torch_Op<"aten.relu", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::relu : (Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self attr-dict `:` type($self) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenRelu_Op : Torch_Op<"aten.relu_", [
|
|
IsTrailingUnderscoreInplaceVariant,
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::relu_ : (Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self attr-dict `:` type($self) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenAddTensorOp : Torch_Op<"aten.add.Tensor", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other,
|
|
AnyTorchScalarType:$alpha
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other `,` $alpha attr-dict `:` type($self) `,` type($other) `,` type($alpha) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenAdd_TensorOp : Torch_Op<"aten.add_.Tensor", [
|
|
IsTrailingUnderscoreInplaceVariant,
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::add_.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other,
|
|
AnyTorchScalarType:$alpha
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other `,` $alpha attr-dict `:` type($self) `,` type($other) `,` type($alpha) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenSubTensorOp : Torch_Op<"aten.sub.Tensor", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other,
|
|
AnyTorchScalarType:$alpha
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other `,` $alpha attr-dict `:` type($self) `,` type($other) `,` type($alpha) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenSub_TensorOp : Torch_Op<"aten.sub_.Tensor", [
|
|
IsTrailingUnderscoreInplaceVariant,
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::sub_.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other,
|
|
AnyTorchScalarType:$alpha
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other `,` $alpha attr-dict `:` type($self) `,` type($other) `,` type($alpha) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenMulTensorOp : Torch_Op<"aten.mul.Tensor", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other attr-dict `:` type($self) `,` type($other) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenMul_TensorOp : Torch_Op<"aten.mul_.Tensor", [
|
|
IsTrailingUnderscoreInplaceVariant,
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::mul_.Tensor : (Tensor, Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other attr-dict `:` type($self) `,` type($other) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenDivTensorOp : Torch_Op<"aten.div.Tensor", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::div.Tensor : (Tensor, Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other attr-dict `:` type($self) `,` type($other) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenDiv_TensorOp : Torch_Op<"aten.div_.Tensor", [
|
|
IsTrailingUnderscoreInplaceVariant,
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::div_.Tensor : (Tensor, Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$other
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $other attr-dict `:` type($self) `,` type($other) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenLerpTensorOp : Torch_Op<"aten.lerp.Tensor", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::lerp.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$end,
|
|
AnyTorchTensorType:$weight
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $end `,` $weight attr-dict `:` type($self) `,` type($end) `,` type($weight) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenLerp_TensorOp : Torch_Op<"aten.lerp_.Tensor", [
|
|
IsTrailingUnderscoreInplaceVariant,
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::lerp_.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$end,
|
|
AnyTorchTensorType:$weight
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $end `,` $weight attr-dict `:` type($self) `,` type($end) `,` type($weight) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenLinearOp : Torch_Op<"aten.linear", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::linear : (Tensor, Tensor, Tensor?) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$input,
|
|
AnyTorchTensorType:$weight,
|
|
AnyTorchOptionalTensor:$bias
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$input `,` $weight `,` $bias attr-dict `:` type($input) `,` type($weight) `,` type($bias) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenMmOp : Torch_Op<"aten.mm", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::mm : (Tensor, Tensor) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchTensorType:$mat2
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $mat2 attr-dict `:` type($self) `,` type($mat2) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenConv2dOp : Torch_Op<"aten.conv2d", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$input,
|
|
AnyTorchTensorType:$weight,
|
|
AnyTorchOptionalTensor:$bias,
|
|
AnyTorchIntListType:$stride,
|
|
AnyTorchIntListType:$padding,
|
|
AnyTorchIntListType:$dilation,
|
|
Torch_IntType:$groups
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$input `,` $weight `,` $bias `,` $stride `,` $padding `,` $dilation `,` $groups attr-dict `:` type($input) `,` type($weight) `,` type($bias) `,` type($stride) `,` type($padding) `,` type($dilation) `,` type($groups) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenBatchNormOp : Torch_Op<"aten.batch_norm", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float, bool) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$input,
|
|
AnyTorchOptionalTensor:$weight,
|
|
AnyTorchOptionalTensor:$bias,
|
|
AnyTorchOptionalTensor:$running_mean,
|
|
AnyTorchOptionalTensor:$running_var,
|
|
Torch_BoolType:$training,
|
|
Torch_FloatType:$momentum,
|
|
Torch_FloatType:$eps,
|
|
Torch_BoolType:$cudnn_enabled
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$input `,` $weight `,` $bias `,` $running_mean `,` $running_var `,` $training `,` $momentum `,` $eps `,` $cudnn_enabled attr-dict `:` type($input) `,` type($weight) `,` type($bias) `,` type($running_mean) `,` type($running_var) `,` type($training) `,` type($momentum) `,` type($eps) `,` type($cudnn_enabled) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenMaxPool2dOp : Torch_Op<"aten.max_pool2d", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::max_pool2d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchIntListType:$kernel_size,
|
|
AnyTorchIntListType:$stride,
|
|
AnyTorchIntListType:$padding,
|
|
AnyTorchIntListType:$dilation,
|
|
Torch_BoolType:$ceil_mode
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $kernel_size `,` $stride `,` $padding `,` $dilation `,` $ceil_mode attr-dict `:` type($self) `,` type($kernel_size) `,` type($stride) `,` type($padding) `,` type($dilation) `,` type($ceil_mode) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenAdaptiveAvgPool2dOp : Torch_Op<"aten.adaptive_avg_pool2d", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::adaptive_avg_pool2d : (Tensor, int[]) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
AnyTorchIntListType:$output_size
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $output_size attr-dict `:` type($self) `,` type($output_size) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenUnsqueezeOp : Torch_Op<"aten.unsqueeze", [
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::unsqueeze : (Tensor, int) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
Torch_IntType:$dim
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $dim attr-dict `:` type($self) `,` type($dim) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenFlattenUsingIntsOp : Torch_Op<"aten.flatten.using_ints", [
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::flatten.using_ints : (Tensor, int, int) -> (Tensor)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self,
|
|
Torch_IntType:$start_dim,
|
|
Torch_IntType:$end_dim
|
|
);
|
|
let results = (outs
|
|
AnyTorchTensorType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $start_dim `,` $end_dim attr-dict `:` type($self) `,` type($start_dim) `,` type($end_dim) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenDimOp : Torch_Op<"aten.dim", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::dim : (Tensor) -> (int)`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self
|
|
);
|
|
let results = (outs
|
|
Torch_IntType:$result
|
|
);
|
|
let assemblyFormat = "$self attr-dict `:` type($self) `->` type($result)";
|
|
let hasFolder = 1;
|
|
}
|
|
|
|
def Torch_AtenSizeOp : Torch_Op<"aten.size", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::size : (Tensor) -> (int[])`";
|
|
let arguments = (ins
|
|
AnyTorchTensorType:$self
|
|
);
|
|
let results = (outs
|
|
AnyTorchIntListType:$result
|
|
);
|
|
let assemblyFormat = "$self attr-dict `:` type($self) `->` type($result)";
|
|
let hasCanonicalizer = 1;
|
|
}
|
|
|
|
def Torch_AtenGtIntOp : Torch_Op<"aten.gt.int", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::gt.int : (int, int) -> (bool)`";
|
|
let arguments = (ins
|
|
Torch_IntType:$a,
|
|
Torch_IntType:$b
|
|
);
|
|
let results = (outs
|
|
Torch_BoolType:$result
|
|
);
|
|
let assemblyFormat = "$a `,` $b attr-dict `:` type($a) `,` type($b) `->` type($result)";
|
|
let hasFolder = 1;
|
|
}
|
|
|
|
def Torch_AtenNeIntOp : Torch_Op<"aten.ne.int", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::ne.int : (int, int) -> (bool)`";
|
|
let arguments = (ins
|
|
Torch_IntType:$a,
|
|
Torch_IntType:$b
|
|
);
|
|
let results = (outs
|
|
Torch_BoolType:$result
|
|
);
|
|
let assemblyFormat = "$a `,` $b attr-dict `:` type($a) `,` type($b) `->` type($result)";
|
|
let hasFolder = 1;
|
|
}
|
|
|
|
def Torch_AtenAddIntOp : Torch_Op<"aten.add.int", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::add.int : (int, int) -> (int)`";
|
|
let arguments = (ins
|
|
Torch_IntType:$a,
|
|
Torch_IntType:$b
|
|
);
|
|
let results = (outs
|
|
Torch_IntType:$result
|
|
);
|
|
let assemblyFormat = "$a `,` $b attr-dict `:` type($a) `,` type($b) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenMulIntOp : Torch_Op<"aten.mul.int", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::mul.int : (int, int) -> (int)`";
|
|
let arguments = (ins
|
|
Torch_IntType:$a,
|
|
Torch_IntType:$b
|
|
);
|
|
let results = (outs
|
|
Torch_IntType:$result
|
|
);
|
|
let assemblyFormat = "$a `,` $b attr-dict `:` type($a) `,` type($b) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenAddFloatIntOp : Torch_Op<"aten.add.float_int", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::add.float_int : (float, int) -> (float)`";
|
|
let arguments = (ins
|
|
Torch_FloatType:$a,
|
|
Torch_IntType:$b
|
|
);
|
|
let results = (outs
|
|
Torch_FloatType:$result
|
|
);
|
|
let assemblyFormat = "$a `,` $b attr-dict `:` type($a) `,` type($b) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenMulFloatOp : Torch_Op<"aten.mul.float", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::mul.float : (float, float) -> (float)`";
|
|
let arguments = (ins
|
|
Torch_FloatType:$a,
|
|
Torch_FloatType:$b
|
|
);
|
|
let results = (outs
|
|
Torch_FloatType:$result
|
|
);
|
|
let assemblyFormat = "$a `,` $b attr-dict `:` type($a) `,` type($b) `->` type($result)";
|
|
}
|
|
|
|
def Torch_AtenLtFloatIntOp : Torch_Op<"aten.lt.float_int", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::lt.float_int : (float, int) -> (bool)`";
|
|
let arguments = (ins
|
|
Torch_FloatType:$a,
|
|
Torch_IntType:$b
|
|
);
|
|
let results = (outs
|
|
Torch_BoolType:$result
|
|
);
|
|
let assemblyFormat = "$a `,` $b attr-dict `:` type($a) `,` type($b) `->` type($result)";
|
|
}
|
|
|
|
def Torch_Aten__Is__Op : Torch_Op<"aten.__is__", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::__is__ : (t1, t2) -> (bool)`";
|
|
let arguments = (ins
|
|
AnyTorchType:$self,
|
|
AnyTorchType:$obj
|
|
);
|
|
let results = (outs
|
|
Torch_BoolType:$result
|
|
);
|
|
let assemblyFormat = "$self `,` $obj attr-dict `:` type($self) `,` type($obj) `->` type($result)";
|
|
let hasFolder = 1;
|
|
}
|
|
|
|
def Torch_AtenLenTOp : Torch_Op<"aten.len.t", [
|
|
AllowsTypeRefinement,
|
|
HasValueSemantics
|
|
]> {
|
|
let summary = "Generated op for `aten::len.t : (t[]) -> (int)`";
|
|
let arguments = (ins
|
|
AnyTorchListType:$a
|
|
);
|
|
let results = (outs
|
|
Torch_IntType:$result
|
|
);
|
|
let assemblyFormat = "$a attr-dict `:` type($a) `->` type($result)";
|
|
let hasFolder = 1;
|
|
let hasCanonicalizer = 1;
|
|
}
|
|
|
|
def Torch_Aten__Getitem__TOp : Torch_Op<"aten.__getitem__.t", [
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::__getitem__.t : (t[], int) -> (t)`";
|
|
let arguments = (ins
|
|
AnyTorchListType:$list,
|
|
Torch_IntType:$idx
|
|
);
|
|
let results = (outs
|
|
AnyTorchType:$result
|
|
);
|
|
let assemblyFormat = "$list `,` $idx attr-dict `:` type($list) `,` type($idx) `->` type($result)";
|
|
let hasCanonicalizer = 1;
|
|
}
|
|
|
|
def Torch_Aten_SetItemTOp : Torch_Op<"aten._set_item.t", [
|
|
AllowsTypeRefinement
|
|
]> {
|
|
let summary = "Generated op for `aten::_set_item.t : (t[], int, t) -> (t[])`";
|
|
let arguments = (ins
|
|
AnyTorchListType:$l,
|
|
Torch_IntType:$idx,
|
|
AnyTorchType:$el
|
|
);
|
|
let results = (outs
|
|
AnyTorchListType:$result
|
|
);
|
|
let assemblyFormat = "$l `,` $idx `,` $el attr-dict `:` type($l) `,` type($idx) `,` type($el) `->` type($result)";
|
|
}
|
|
|