mirror of https://github.com/llvm/torch-mlir
Apply the trait NoMemoryEffect to most ReadOnly ops
parent
0913b967ac
commit
7e30ef798b
File diff suppressed because it is too large
Load Diff
|
@ -242,6 +242,7 @@ def emit_op(
|
|||
has_folder: bool = False,
|
||||
has_canonicalizer: bool = False,
|
||||
has_verifier: bool = False,
|
||||
has_memory_effects: bool = False,
|
||||
):
|
||||
"""Main entry point for op emission.
|
||||
|
||||
|
@ -257,6 +258,12 @@ def emit_op(
|
|||
traits += ["HasValueSemantics"]
|
||||
if operator.is_readonly():
|
||||
traits += ["ReadOnly"]
|
||||
# If a ReadOnly op has no returns, it is likely to have side effects.
|
||||
# E.g. `prim.RaiseException` and `prim.Print`
|
||||
# Besides ops with no returned values, there may be other ReadOnly ops with memory effects.
|
||||
# In such cases, these ops can be emitted with `has_memory_effects=True` to avoid this trait.
|
||||
if operator.is_readonly() and len(operator.returns) != 0 and not has_memory_effects:
|
||||
traits += ["NoMemoryEffect"]
|
||||
|
||||
raw_emit_op(
|
||||
operator,
|
||||
|
|
|
@ -1557,7 +1557,6 @@ func.func @torch.prim.unchecked_cast$derefine(%arg0: !torch.list<int>) -> !torch
|
|||
|
||||
// CHECK-LABEL: func.func @torch.aten.Int.Tensor(
|
||||
// CHECK-SAME: %[[NUM:.*]]: !torch.int) -> !torch.int {
|
||||
// CHECK: %[[T:.*]] = torch.prim.NumToTensor.Scalar %[[NUM]] : !torch.int -> !torch.vtensor<[],si64>
|
||||
// CHECK: return %[[NUM]] : !torch.int
|
||||
func.func @torch.aten.Int.Tensor(%arg0: !torch.int) -> !torch.int {
|
||||
%tensor = torch.prim.NumToTensor.Scalar %arg0: !torch.int -> !torch.vtensor<[],si64>
|
||||
|
@ -1585,7 +1584,6 @@ func.func @torch.aten.Int.float() -> !torch.int {
|
|||
|
||||
// CHECK-LABEL: func.func @torch.aten.Float.Tensor(
|
||||
// CHECK-SAME: %[[NUM:.*]]: !torch.float) -> !torch.float {
|
||||
// CHECK: %[[T:.*]] = torch.prim.NumToTensor.Scalar %[[NUM]] : !torch.float -> !torch.vtensor<[],f64>
|
||||
// CHECK: return %[[NUM]] : !torch.float
|
||||
func.func @torch.aten.Float.Tensor(%arg0: !torch.float) -> !torch.float {
|
||||
%tensor = torch.prim.NumToTensor.Scalar %arg0: !torch.float -> !torch.vtensor<[],f64>
|
||||
|
|
Loading…
Reference in New Issue