Remove last mentions of IREE.

pull/346/head
Sean Silva 2021-10-01 17:27:42 +00:00
parent 9fc059e948
commit 5917f1dc47
2 changed files with 4 additions and 6 deletions

View File

@ -93,7 +93,7 @@ def ConvertTorchToLinalg : Pass<"convert-torch-to-linalg", "FuncOp"> {
4. All this code operates on ranked tensors, for which using individual
SSA values for sizes (rather than a "shape type") seems to
work really well at this level of abstraction based on prior experience
in IREE. (unranked code tends to benefit from having a discrete
in other projects. (unranked code tends to benefit from having a discrete
"shape type" to model shapes).
We will see if we end up needing something like `shape.assuming`, but for

View File

@ -88,7 +88,7 @@ LOWERING_PIPELINE = ",".join([
class RefBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend):
"""Main entry-point for the backend."""
"""Main entry-point for the reference backend."""
def __init__(self):
super().__init__()
@ -102,10 +102,8 @@ class RefBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend):
imported_module: The MLIR module consisting of funcs in the torch
dialect.
Returns:
An opaque, backend specific module object that can be passed to load.
The object may actually be something more specific to the backend (i.e.
for IREE, it is a serialized VM flatbuffer) but the contract is that
it is operated on by methods on this class.
An opaque, backend specific compiled artifact object that can be
passed to `load`.
"""
with imported_module.context:
pm = PassManager.parse(LOWERING_PIPELINE)