mirror of https://github.com/llvm/torch-mlir
Added support for native_batch_norm_backward (#890)
parent
bfe8ff4b42
commit
a635fd2287
|
@ -7514,6 +7514,40 @@ def Torch_AtenEmbeddingDenseBackwardOp : Torch_Op<"aten.embedding_dense_backward
|
||||||
}];
|
}];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def Torch_AtenNativeBatchNormBackwardOp : Torch_Op<"aten.native_batch_norm_backward", [
|
||||||
|
AllowsTypeRefinement,
|
||||||
|
HasValueSemantics,
|
||||||
|
ReadOnly
|
||||||
|
]> {
|
||||||
|
let summary = "Generated op for `aten::native_batch_norm_backward : (Tensor, Tensor, Tensor?, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, bool[]) -> (Tensor, Tensor, Tensor)`";
|
||||||
|
let arguments = (ins
|
||||||
|
AnyTorchTensorType:$grad_out,
|
||||||
|
AnyTorchTensorType:$input,
|
||||||
|
AnyTorchOptionalTensorType:$weight,
|
||||||
|
AnyTorchOptionalTensorType:$running_mean,
|
||||||
|
AnyTorchOptionalTensorType:$running_var,
|
||||||
|
AnyTorchOptionalTensorType:$save_mean,
|
||||||
|
AnyTorchOptionalTensorType:$save_invstd,
|
||||||
|
Torch_BoolType:$train,
|
||||||
|
Torch_FloatType:$eps,
|
||||||
|
AnyTorchListOfTorchBoolType:$output_mask
|
||||||
|
);
|
||||||
|
let results = (outs
|
||||||
|
AnyTorchTensorType:$result0,
|
||||||
|
AnyTorchTensorType:$result1,
|
||||||
|
AnyTorchTensorType:$result2
|
||||||
|
);
|
||||||
|
let hasCustomAssemblyFormat = 1;
|
||||||
|
let extraClassDefinition = [{
|
||||||
|
ParseResult AtenNativeBatchNormBackwardOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||||
|
return parseDefaultTorchOp(parser, result, 10, 3);
|
||||||
|
}
|
||||||
|
void AtenNativeBatchNormBackwardOp::print(OpAsmPrinter &printer) {
|
||||||
|
printDefaultTorchOp(printer, *this, 10, 3);
|
||||||
|
}
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
|
||||||
def Torch_PrimLayoutOp : Torch_Op<"prim.layout", [
|
def Torch_PrimLayoutOp : Torch_Op<"prim.layout", [
|
||||||
AllowsTypeRefinement,
|
AllowsTypeRefinement,
|
||||||
HasValueSemantics,
|
HasValueSemantics,
|
||||||
|
@ -7890,4 +7924,3 @@ def Torch_QuantizedLinearOp : Torch_Op<"quantized.linear", [
|
||||||
}
|
}
|
||||||
}];
|
}];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -538,6 +538,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
||||||
emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
|
emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
|
||||||
emit("aten::native_layer_norm_backward : (Tensor, Tensor, int[], Tensor, Tensor, Tensor?, Tensor?, bool[]) -> (Tensor, Tensor, Tensor)")
|
emit("aten::native_layer_norm_backward : (Tensor, Tensor, int[], Tensor, Tensor, Tensor?, Tensor?, bool[]) -> (Tensor, Tensor, Tensor)")
|
||||||
emit("aten::embedding_dense_backward : (Tensor, Tensor, int, int, bool) -> (Tensor)")
|
emit("aten::embedding_dense_backward : (Tensor, Tensor, int, int, bool) -> (Tensor)")
|
||||||
|
emit("aten::native_batch_norm_backward : (Tensor, Tensor, Tensor?, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, bool[]) -> (Tensor, Tensor, Tensor)")
|
||||||
|
|
||||||
# ==========================================================================
|
# ==========================================================================
|
||||||
# `prim::` namespace.
|
# `prim::` namespace.
|
||||||
|
|
Loading…
Reference in New Issue