diff --git a/include/torch-mlir/Conversion/Passes.td b/include/torch-mlir/Conversion/Passes.td index a45b64475..086578a22 100644 --- a/include/torch-mlir/Conversion/Passes.td +++ b/include/torch-mlir/Conversion/Passes.td @@ -93,7 +93,7 @@ def ConvertTorchToLinalg : Pass<"convert-torch-to-linalg", "FuncOp"> { 4. All this code operates on ranked tensors, for which using individual SSA values for sizes (rather than a "shape type") seems to work really well at this level of abstraction based on prior experience - in IREE. (unranked code tends to benefit from having a discrete + in other projects. (unranked code tends to benefit from having a discrete "shape type" to model shapes). We will see if we end up needing something like `shape.assuming`, but for diff --git a/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py b/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py index 694840a1b..d3b81fd48 100644 --- a/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py +++ b/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py @@ -88,7 +88,7 @@ LOWERING_PIPELINE = ",".join([ class RefBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend): - """Main entry-point for the backend.""" + """Main entry-point for the reference backend.""" def __init__(self): super().__init__() @@ -102,10 +102,8 @@ class RefBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend): imported_module: The MLIR module consisting of funcs in the torch dialect. Returns: - An opaque, backend specific module object that can be passed to load. - The object may actually be something more specific to the backend (i.e. - for IREE, it is a serialized VM flatbuffer) but the contract is that - it is operated on by methods on this class. + An opaque, backend specific compiled artifact object that can be + passed to `load`. """ with imported_module.context: pm = PassManager.parse(LOWERING_PIPELINE)