mirror of https://github.com/llvm/torch-mlir
Blacklist _convolution op (#1048)
* Blacklist _convolution op in LTC * Removed duplicate Torch_AtenSelectScatterOp instance from autogen .td * Removed duplicate Torch_AtenSliceScatterOp instance from autogen .tdpull/1125/head
parent
47bb38d180
commit
cec74b8d37
|
@ -1,13 +1,14 @@
|
|||
blacklist:
|
||||
# List of unsupported ops in LTC autogen because of some error
|
||||
- _index_put_impl_ # Error: TODO not sure if there are other valid types to handle here
|
||||
- empty_like # Error: TODO add support for type BaseType(name=<BaseTy.MemoryFormat: 12>)
|
||||
- index.Tensor # Error: TODO not sure if there are other valid types to handle here
|
||||
- index_put # Error: TODO not sure if there are other valid types to handle here
|
||||
- index_put_ # Error: TODO not sure if there are other valid types to handle here
|
||||
- _index_put_impl_ # Error: TODO not sure if there are other valid types to handle here
|
||||
- stack # Error: TODO not sure if there are other valid types to handle here
|
||||
|
||||
# Additional ops which autogen is supported for but don't compile yet
|
||||
- _convolution
|
||||
- detach
|
||||
- item
|
||||
- size
|
||||
|
|
|
@ -181,6 +181,11 @@ TOSA_PASS_SET = {
|
|||
}
|
||||
|
||||
LTC_XFAIL_SET = {
|
||||
"_Convolution2DAllFalseModule_basic",
|
||||
"_Convolution2DBenchmarkModule_basic",
|
||||
"_Convolution2DCudnnModule_basic",
|
||||
"_Convolution2DDeterministicModule_basic",
|
||||
"_Convolution2DTF32Module_basic",
|
||||
"AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule_basic",
|
||||
"AdaptiveAvgPool2dNonUnitOutputSizeStaticModule_basic",
|
||||
"AddIntModule_basic",
|
||||
|
|
|
@ -5508,32 +5508,6 @@ def Torch_AtenSelectIntOp : Torch_Op<"aten.select.int", [
|
|||
}];
|
||||
}
|
||||
|
||||
def Torch_AtenSelectScatterOp : Torch_Op<"aten.select_scatter", [
|
||||
AllowsTypeRefinement,
|
||||
HasValueSemantics,
|
||||
ReadOnly
|
||||
]> {
|
||||
let summary = "Generated op for `aten::select_scatter : (Tensor, Tensor, int, int) -> (Tensor)`";
|
||||
let arguments = (ins
|
||||
AnyTorchTensorType:$self,
|
||||
AnyTorchTensorType:$src,
|
||||
Torch_IntType:$dim,
|
||||
Torch_IntType:$index
|
||||
);
|
||||
let results = (outs
|
||||
AnyTorchTensorType:$result
|
||||
);
|
||||
let hasCustomAssemblyFormat = 1;
|
||||
let extraClassDefinition = [{
|
||||
ParseResult AtenSelectScatterOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||
return parseDefaultTorchOp(parser, result, 4, 1);
|
||||
}
|
||||
void AtenSelectScatterOp::print(OpAsmPrinter &printer) {
|
||||
printDefaultTorchOp(printer, *this, 4, 1);
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
def Torch_AtenSizeIntOp : Torch_Op<"aten.size.int", [
|
||||
AllowsTypeRefinement,
|
||||
HasValueSemantics,
|
||||
|
@ -5989,34 +5963,6 @@ def Torch_AtenSliceTensorOp : Torch_Op<"aten.slice.Tensor", [
|
|||
}];
|
||||
}
|
||||
|
||||
def Torch_AtenSliceScatterOp : Torch_Op<"aten.slice_scatter", [
|
||||
AllowsTypeRefinement,
|
||||
HasValueSemantics,
|
||||
ReadOnly
|
||||
]> {
|
||||
let summary = "Generated op for `aten::slice_scatter : (Tensor, Tensor, int, int?, int?, int) -> (Tensor)`";
|
||||
let arguments = (ins
|
||||
AnyTorchTensorType:$self,
|
||||
AnyTorchTensorType:$src,
|
||||
Torch_IntType:$dim,
|
||||
AnyTorchOptionalIntType:$start,
|
||||
AnyTorchOptionalIntType:$end,
|
||||
Torch_IntType:$step
|
||||
);
|
||||
let results = (outs
|
||||
AnyTorchTensorType:$result
|
||||
);
|
||||
let hasCustomAssemblyFormat = 1;
|
||||
let extraClassDefinition = [{
|
||||
ParseResult AtenSliceScatterOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||
return parseDefaultTorchOp(parser, result, 6, 1);
|
||||
}
|
||||
void AtenSliceScatterOp::print(OpAsmPrinter &printer) {
|
||||
printDefaultTorchOp(printer, *this, 6, 1);
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
def Torch_AtenLenTensorOp : Torch_Op<"aten.len.Tensor", [
|
||||
AllowsTypeRefinement,
|
||||
HasValueSemantics,
|
||||
|
|
|
@ -443,7 +443,6 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
|||
emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)")
|
||||
emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)")
|
||||
emit("aten::select.int : (Tensor, int, int) -> (Tensor)")
|
||||
emit("aten::select_scatter : (Tensor, Tensor, int, int) -> (Tensor)")
|
||||
emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True)
|
||||
emit("aten::stack : (Tensor[], int) -> (Tensor)")
|
||||
emit("aten::sum : (Tensor, int?) -> (Tensor)")
|
||||
|
@ -462,7 +461,6 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
|||
emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)")
|
||||
emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)")
|
||||
emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)")
|
||||
emit("aten::slice_scatter : (Tensor, Tensor, int, int?, int?, int) -> (Tensor)")
|
||||
emit("aten::len.Tensor : (Tensor) -> (int)")
|
||||
emit("aten::cpu : (Tensor) -> (Tensor)")
|
||||
emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)")
|
||||
|
|
Loading…
Reference in New Issue