From cff144b3acf9a34835da313bf87b711ec21c6294 Mon Sep 17 00:00:00 2001 From: Peiming Liu Date: Wed, 8 May 2024 21:18:17 -0700 Subject: [PATCH] =?UTF-8?q?[sparse]=20fix=20double=20free=20due=20to=20inc?= =?UTF-8?q?ompatibility=20between=20buffer-deallo=E2=80=A6=20(#3303)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …cation and sparse tensors. **NOTE**: This PR _doges_ the issue in buffer-deallocation pass instead of resolving it. In the future, we need to fix the bug in buffer-deallocation pass when handling code generated by sparse compiler. --- .../linalg_on_tensors_backends/refbackend.py | 7 +++-- test/python/fx_importer/sparse_test.py | 31 ++++++++++++------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py b/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py index 08e8ff64d..8935a2a06 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py +++ b/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py @@ -155,7 +155,8 @@ LOWERING_PIPELINE = ( "sparse-assembler{direct-out}", "sparsification-and-bufferization", "sparse-storage-specifier-to-llvm", - "inline", # inline sparse helper methods where useful + # Buffer deallocation pass does not know how to handle realloc. + "func.func(expand-realloc)", # Bufferize. "func.func(scf-bufferize)", "func.func(tm-tensor-bufferize)", @@ -167,6 +168,9 @@ LOWERING_PIPELINE = ( "func.func(tensor-bufferize)", "func.func(finalizing-bufferize)", "func.func(buffer-deallocation)", + # Buffer-deallocation does not work with the inlined code generated + # by sparse tensor dialect. + "inline", # inline sparse helper methods where useful # Munge to make it ExecutionEngine compatible. # Specifically, we rewrite calling convention boundaries to be in terms # of unranked memref, and we rewrite the return to actually be a @@ -180,7 +184,6 @@ LOWERING_PIPELINE = ( "func.func(tm-tensor-to-loops)", "func.func(refback-munge-memref-copy)", "func.func(convert-linalg-to-loops)", - "func.func(expand-realloc)", "func.func(lower-affine)", "convert-scf-to-cf", "func.func(refback-expand-ops-for-llvm)", diff --git a/test/python/fx_importer/sparse_test.py b/test/python/fx_importer/sparse_test.py index 95a859359..b84805163 100644 --- a/test/python/fx_importer/sparse_test.py +++ b/test/python/fx_importer/sparse_test.py @@ -364,26 +364,30 @@ def test_sparse_SpMM(): # CHECK-LABEL: test_sparse_eltwise # CHECK: #[[$CSRD:.*]] = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense), posWidth = 64, crdWidth = 64 }> # CHECK: func.func @main( -# CHECK-SAME: %[[A:.*]]: !torch.vtensor<[8,4,2],f32,#[[$CSRD]]>) -> !torch.vtensor<[8,4,2],f32,#[[$CSRD]]> { -# CHECK: %[[R:.*]] = torch.aten.neg %[[A]] : !torch.vtensor<[8,4,2],f32,#[[$CSRD]]> -> !torch.vtensor<[8,4,2],f32,#[[$CSRD]]> -# CHECK: return %[[R]] : !torch.vtensor<[8,4,2],f32,#[[$CSRD]]> +# CHECK-SAME: %[[A:.*]]: !torch.vtensor<[4,2,2],f32,#[[$CSRD]]>) -> !torch.vtensor<[4,2,2],f32,#[[$CSRD]]> { +# CHECK: %[[R:.*]] = torch.aten.neg %[[A]] : !torch.vtensor<[4,2,2],f32,#[[$CSRD]]> -> !torch.vtensor<[4,2,2],f32,#[[$CSRD]]> +# CHECK: return %[[R]] : !torch.vtensor<[4,2,2],f32,#[[$CSRD]]> # CHECK: } # CHECK: #[[$BCSR:.*]] = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : batch, d1 : dense, d2 : compressed), posWidth = 64, crdWidth = 64 }> # CHECK: func.func @main( -# CHECK-SAME: %[[A:.*]]: !torch.vtensor<[8,4,2],f32,#[[$BCSR]]>) -> !torch.vtensor<[8,4,2],f32,#[[$BCSR]]> { -# CHECK: %[[R:.*]] = torch.aten.neg %[[A]] : !torch.vtensor<[8,4,2],f32,#[[$BCSR]]> -> !torch.vtensor<[8,4,2],f32,#[[$BCSR]]> -# CHECK: return %[[R]] : !torch.vtensor<[8,4,2],f32,#[[$BCSR]]> +# CHECK-SAME: %[[A:.*]]: !torch.vtensor<[4,2,2],f32,#[[$BCSR]]>) -> !torch.vtensor<[4,2,2],f32,#[[$BCSR]]> { +# CHECK: %[[R:.*]] = torch.aten.neg %[[A]] : !torch.vtensor<[4,2,2],f32,#[[$BCSR]]> -> !torch.vtensor<[4,2,2],f32,#[[$BCSR]]> +# CHECK: return %[[R]] : !torch.vtensor<[4,2,2],f32,#[[$BCSR]]> # CHECK: } # # CHECK: torch.sparse -# CHECK: tensor(crow_indices=tensor([ 0, 4, 8, 12, 16, 20, 24, 28, 32]), -# CHECK: col_indices=tensor([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, -# CHECK: 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]), +# CHECK: tensor(crow_indices=tensor([0, 2, 4, 6, 8]), +# CHECK: col_indices=tensor([0, 1, 0, 1, 0, 1, 0, 1]), # CHECK: values=tensor({{\[}}[ -1., -2.], # ... -# CHECK: [-63., -64.]{{\]}}), size=(8, 4, 2), nnz=32, +# CHECK: [-15., -16.]{{\]}}), size=(4, 2, 2), nnz=8, # CHECK: layout=torch.sparse_csr) +# # CHECK: torch.mlir +# CHECK: [0 2 4 6 8] +# CHECK: [0 1 0 1 0 1 0 1] +# CHECK: [ -1. -2. -3. -4. -5. -6. -7. -8. -9. -10. -11. -12. -13. -14. +# CHECK: -15. -16.] # CHECK: torch.mlir.batch # def test_sparse_eltwise(): @@ -396,7 +400,7 @@ def test_sparse_eltwise(): net = EltNet() dense_input = torch.reshape( - torch.arange(1, 65, dtype=torch.float32), shape=(8, 4, 2) + torch.arange(1, 17, dtype=torch.float32), shape=(4, 2, 2) ) # This yields a plain CSR with dense **sub**tensor @@ -411,12 +415,15 @@ def test_sparse_eltwise(): # Run it with PyTorch torch.sparse and with TORCH-MLIR sparse_jit. res1 = net(sparse_input) + res2 = sparse_jit(net, sparse_input) # TODO: make these work - # res2 = sparse_jit(net, sparse_input) # res3 = sparse_jit(net, batch_input) print("torch.sparse") print(res1) print("torch.mlir") + print(res2[0]) + print(res2[1]) + print(res2[2]) print("torch.mlir.batch")