From bb52a460cb101ed63d73357953fbbb544cd62c59 Mon Sep 17 00:00:00 2001 From: Ashay Rane Date: Mon, 16 May 2022 12:54:35 -0700 Subject: [PATCH] mlir: bump llvm tag to 5380e3 (#856) In addition to updating the llvm-project submodule, this patch also: 1. updates shape functions and tests so that `func` and `call` operations refer to the `func` dialect 2. avoid duplicate registration of dialects --- .../test/tmtensor/bufferize.mlir | 16 +- .../test/tmtensor/canonicalize.mlir | 4 +- .../test/tmtensor/convert_to_loops.mlir | 48 +- .../test/tmtensor/invalid.mlir | 40 +- externals/llvm-project | 2 +- lib/Dialect/Torch/Transforms/ShapeLibrary.cpp | 796 +++++++++--------- .../importer/jit_ir/csrc/module_builder.cpp | 1 - test/Conversion/TorchToLinalg/basic.mlir | 50 +- .../Conversion/TorchToLinalg/elementwise.mlir | 20 +- test/Conversion/TorchToLinalg/flatten.mlir | 20 +- test/Conversion/TorchToLinalg/unsqueeze.mlir | 20 +- test/Conversion/TorchToSCF/basic.mlir | 24 +- test/Conversion/TorchToStd/basic.mlir | 76 +- test/Conversion/TorchToTosa/basic.mlir | 168 ++-- .../GlobalizeObjectGraph/free-functions.mlir | 12 +- .../Torch/GlobalizeObjectGraph/methods.mlir | 12 +- .../module-uses-error.mlir | 2 +- .../GlobalizeObjectGraph/module-uses.mlir | 12 +- .../multiple-instances-error.mlir | 4 +- ...ltiple-instances-multiple-module-args.mlir | 16 +- .../multiple-instances.mlir | 10 +- .../GlobalizeObjectGraph/visibility.mlir | 4 +- .../Torch/adjust-calling-conventions.mlir | 28 +- test/Dialect/Torch/canonicalize.mlir | 456 +++++----- test/Dialect/Torch/decompose-complex-ops.mlir | 186 ++-- .../Torch/drop-shape-calculations.mlir | 4 +- test/Dialect/Torch/inline-global-slots.mlir | 4 +- .../Torch/maximize-value-semantics.mlir | 72 +- test/Dialect/Torch/ops.mlir | 52 +- .../prepare-for-globalize-object-graph.mlir | 8 +- .../Torch/reduce-op-variants-error.mlir | 4 +- test/Dialect/Torch/reduce-op-variants.mlir | 44 +- test/Dialect/Torch/refine-public-return.mlir | 18 +- test/Dialect/Torch/refine-types-branch.mlir | 30 +- test/Dialect/Torch/refine-types-ops.mlir | 92 +- test/Dialect/Torch/refine-types.mlir | 56 +- .../Torch/reify-shape-calculations.mlir | 58 +- .../Torch/simplify-shape-calculations.mlir | 64 +- .../finalizing-backend-type-conversion.mlir | 24 +- .../func-backend-type-conversion.mlir | 46 +- test/Dialect/TorchConversion/ops.mlir | 4 +- ...fy-invariants-before-backend-lowering.mlir | 6 +- ...fy-linalg-on-tensors-backend-contract.mlir | 8 +- .../verify-tosa-backend-contract.mlir | 8 +- test/RefBackend/insert-rng-globals.mlir | 4 +- .../RefBackend/munge-calling-conventions.mlir | 20 +- .../annotations/arg-tensor-type-bound.py | 2 +- .../functions-that-call-methods.py | 6 +- .../jit_ir/ivalue_import/functions.py | 4 +- .../jit_ir/ivalue_import/methods-derefine.py | 2 +- .../importer/jit_ir/ivalue_import/methods.py | 2 +- .../importer/jit_ir/ivalue_import/prim.py | 4 +- .../jit_ir/ivalue_import/submodules-select.py | 2 +- .../importer/jit_ir/node_import/classes.py | 2 +- .../importer/jit_ir/node_import/debug-info.py | 2 +- .../importer/jit_ir/node_import/dict.py | 4 +- .../function-block-arg-adjustment.py | 2 +- .../jit_ir/node_import/function-derefine.py | 6 +- test/python/importer/jit_ir/node_import/if.py | 2 +- .../importer/jit_ir/node_import/list.py | 2 +- .../importer/jit_ir/node_import/loop.py | 6 +- .../importer/jit_ir/node_import/prim.py | 26 +- .../importer/jit_ir/node_import/tuple.py | 8 +- .../importer/jit_ir/node_import/union.py | 2 +- 64 files changed, 1368 insertions(+), 1369 deletions(-) diff --git a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/bufferize.mlir b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/bufferize.mlir index 463ae39d6..7059524fe 100644 --- a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/bufferize.mlir +++ b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/bufferize.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-dialects-opt -split-input-file -tm-tensor-bufferize %s | FileCheck %s // ----- -// CHECK-LABEL: func @scan_1d_inclusive( +// CHECK-LABEL: func.func @scan_1d_inclusive( // CHECK-SAME: %[[IN_TENSOR:.*]]: tensor<128xi32>, %[[OUT_TENSOR:.*]]: tensor<128xi32>, // CHECK-SAME: %[[ACC_TENSOR:.*]]: tensor) -> (tensor<128xi32>, tensor) { // CHECK: %[[IN_MEMREF:.*]] = bufferization.to_memref %[[IN_TENSOR]] : memref<128xi32> @@ -16,7 +16,7 @@ // CHECK: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32> // CHECK: %[[ACC_TENSOR_NEW:.*]] = bufferization.to_tensor %[[ACC_MEMREF_NEW]] : memref // CHECK: return %[[OUT_TENSOR_NEW]], %[[ACC_TENSOR_NEW]] : tensor<128xi32>, tensor -func @scan_1d_inclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tensor) -> (tensor<128xi32>, tensor) { +func.func @scan_1d_inclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tensor) -> (tensor<128xi32>, tensor) { %ret_out, %ret_acc = tm_tensor.scan dimension(0) inclusive(true) ins(%in : tensor<128xi32>) outs(%out, %acc: tensor<128xi32>, tensor) { ^bb0(%arg0 : i32, %arg1 : i32): @@ -27,7 +27,7 @@ func @scan_1d_inclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tenso } // ----- -// CHECK-LABEL: func @scan_1d_exclusive( +// CHECK-LABEL: func.func @scan_1d_exclusive( // CHECK-SAME: %[[IN_TENSOR:.*]]: tensor<128xi32>, %[[OUT_TENSOR:.*]]: tensor<128xi32>, // CHECK-SAME: %[[ACC_TENSOR:.*]]: tensor) -> (tensor<128xi32>, tensor) { // CHECK: %[[IN_MEMREF:.*]] = bufferization.to_memref %[[IN_TENSOR]] : memref<128xi32> @@ -44,7 +44,7 @@ func @scan_1d_inclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tenso // CHECK: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32> // CHECK: %[[ACC_TENSOR_NEW:.*]] = bufferization.to_tensor %[[ACC_MEMREF_NEW]] : memref // CHECK: return %[[OUT_TENSOR_NEW]], %[[ACC_TENSOR_NEW]] : tensor<128xi32>, tensor -func @scan_1d_exclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tensor) -> (tensor<128xi32>, tensor) { +func.func @scan_1d_exclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tensor) -> (tensor<128xi32>, tensor) { %ret_out, %ret_acc = tm_tensor.scan dimension(0) inclusive(false) ins(%in : tensor<128xi32>) outs(%out, %acc: tensor<128xi32>, tensor) { ^bb0(%arg0 : i32, %arg1 : i32): @@ -55,7 +55,7 @@ func @scan_1d_exclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tenso } // ----- -// CHECK-LABEL: func @scatter_update_scalar_1D( +// CHECK-LABEL: func.func @scatter_update_scalar_1D( // CHECK-SAME: %[[ORIG_TENSOR:.*]]: tensor<8xi32>, // CHECK-SAME: %[[INDICES_TENSOR:.*]]: tensor<3x1xi32>, // CHECK-SAME: %[[UPDATES_TENSOR:.*]]: tensor<3xi32>) -> tensor<8xi32> { @@ -71,7 +71,7 @@ func @scan_1d_exclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tenso // CHECK: } // CHECK: %[[OUT_TENSOR:.*]] = bufferization.to_tensor %[[ORIG_MEMREF_NEW]] : memref<8xi32> // CHECK: return %[[OUT_TENSOR]] : tensor<8xi32> -func @scatter_update_scalar_1D( +func.func @scatter_update_scalar_1D( %original: tensor<8xi32>, %indices: tensor<3x1xi32>, %updates: tensor<3xi32>) -> tensor<8xi32> { %0 = tm_tensor.scatter unique_indices(true) @@ -83,7 +83,7 @@ func @scatter_update_scalar_1D( return %0 : tensor<8xi32> } -// CHECK-LABEL: func @scatter_add_scalar_1D( +// CHECK-LABEL: func.func @scatter_add_scalar_1D( // CHECK-SAME: %[[ORIG_TENSOR:.*]]: tensor<8xi32>, // CHECK-SAME: %[[INDICES_TENSOR:.*]]: tensor<3x1xi32>, // CHECK-SAME: %[[UPDATES_TENSOR:.*]]: tensor<3xi32>) -> tensor<8xi32> { @@ -101,7 +101,7 @@ func @scatter_update_scalar_1D( // CHECK: } // CHECK: %[[OUT_TENSOR:.*]] = bufferization.to_tensor %[[ORIG_MEMREF_NEW]] : memref<8xi32> // CHECK: return %[[OUT_TENSOR]] : tensor<8xi32> -func @scatter_add_scalar_1D( +func.func @scatter_add_scalar_1D( %original: tensor<8xi32>, %indices: tensor<3x1xi32>, %updates: tensor<3xi32>) -> tensor<8xi32> { %0 = tm_tensor.scatter unique_indices(true) diff --git a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/canonicalize.mlir b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/canonicalize.mlir index a226056c1..b3cb49b9a 100644 --- a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/canonicalize.mlir +++ b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/canonicalize.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-dialects-opt -canonicalize -split-input-file %s | FileCheck %s -// CHECK-LABEL: func @tensor.cast( -func @tensor.cast(%arg0: tensor<128xi32>) -> tensor<128xi32> { +// CHECK-LABEL: func.func @tensor.cast( +func.func @tensor.cast(%arg0: tensor<128xi32>) -> tensor<128xi32> { %init = linalg.init_tensor [128] : tensor<128xi32> %c0 = linalg.init_tensor [] : tensor diff --git a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/convert_to_loops.mlir b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/convert_to_loops.mlir index 1237cb870..6994ce3ad 100644 --- a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/convert_to_loops.mlir +++ b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/convert_to_loops.mlir @@ -1,6 +1,6 @@ // RUN: torch-mlir-dialects-opt -split-input-file -tm-tensor-to-loops %s | FileCheck %s -func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) { +func.func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) { %c0 = memref.alloc() : memref tm_tensor.scan dimension(0) inclusive(true) ins(%0 : memref<128xi32>) outs(%1, %c0 : memref<128xi32>, memref) { @@ -10,7 +10,7 @@ func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) { } return } -// CHECK-LABEL: func @scan_1d_inclusive +// CHECK-LABEL: func.func @scan_1d_inclusive // CHECK-SAME: %[[BUFI:[a-zA-Z0-9]+]] // CHECK-SAME: %[[BUFO:[a-zA-Z0-9]+]] // CHECK-DAG: %[[C128:.+]] = arith.constant 128 : index @@ -33,7 +33,7 @@ func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) { // ----- -func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) { +func.func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) { %c0 = memref.alloc() : memref tm_tensor.scan dimension(0) inclusive(false) ins(%0 : memref<128xi32>) outs(%1, %c0 : memref<128xi32>, memref) { @@ -43,7 +43,7 @@ func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) { } return } -// CHECK-LABEL: func @scan_1d_exclusive +// CHECK-LABEL: func.func @scan_1d_exclusive // CHECK-SAME: %[[BUFI:[a-zA-Z0-9]+]] // CHECK-SAME: %[[BUFO:[a-zA-Z0-9]+]] // CHECK-DAG: %[[C128:.+]] = arith.constant 128 : index @@ -66,7 +66,7 @@ func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) { // ----- -func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) { +func.func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) { %t0 = memref.alloc() : memref<32xi32> tm_tensor.scan dimension(0) inclusive(true) ins(%0 : memref<16x32xi32>) outs(%1, %t0 : memref<16x32xi32>, memref<32xi32>) { @@ -76,7 +76,7 @@ func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) { } return } -// CHECK-LABEL: func @scan_2d +// CHECK-LABEL: func.func @scan_2d // CHECK-SAME: %[[BUFI:[a-zA-Z0-9]+]] // CHECK-SAME: %[[BUFO:[a-zA-Z0-9]+]] // CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index @@ -102,7 +102,7 @@ func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) { // ----- -func @scatter_update_scalar_1D( +func.func @scatter_update_scalar_1D( %original: memref<8xi32>, %indices: memref<3x1xi32>, %updates: memref<3xi32>) { tm_tensor.scatter unique_indices(true) @@ -113,7 +113,7 @@ func @scatter_update_scalar_1D( } return } -// CHECK-LABEL: func @scatter_update_scalar_1D +// CHECK-LABEL: func.func @scatter_update_scalar_1D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -128,7 +128,7 @@ func @scatter_update_scalar_1D( // ----- -func @scatter_add_scalar_2D( +func.func @scatter_add_scalar_2D( %original: memref<4x3xi32>, %indices: memref<3x2xi32>, %updates: memref<3xi32>) { tm_tensor.scatter unique_indices(true) @@ -140,7 +140,7 @@ func @scatter_add_scalar_2D( } return } -// CHECK-LABEL: func @scatter_add_scalar_2D +// CHECK-LABEL: func.func @scatter_add_scalar_2D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -159,7 +159,7 @@ func @scatter_add_scalar_2D( // ----- -func @scatter_update_slice_2D( +func.func @scatter_update_slice_2D( %original: memref<4x3xi32>, %indices: memref<2x1xi32>, %updates: memref<2x3xi32>) { tm_tensor.scatter unique_indices(true) @@ -170,7 +170,7 @@ func @scatter_update_slice_2D( } return } -// CHECK: func @scatter_update_slice_2D +// CHECK: func.func @scatter_update_slice_2D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -189,7 +189,7 @@ func @scatter_update_slice_2D( // ----- -func @scatter_add_scalar_1D( +func.func @scatter_add_scalar_1D( %original: memref<8xi32>, %indices: memref<3x1xi32>, %updates: memref<3xi32>) { tm_tensor.scatter unique_indices(true) @@ -201,7 +201,7 @@ func @scatter_add_scalar_1D( } return } -// CHECK-LABEL: func @scatter_add_scalar_1D +// CHECK-LABEL: func.func @scatter_add_scalar_1D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -218,7 +218,7 @@ func @scatter_add_scalar_1D( // ----- -func @scatter_add_slice_2D( +func.func @scatter_add_slice_2D( %original: memref<4x3xi32>, %indices: memref<2x1xi32>, %updates: memref<2x3xi32>) { tm_tensor.scatter unique_indices(true) @@ -230,7 +230,7 @@ func @scatter_add_slice_2D( } return } -// CHECK: func @scatter_add_slice_2D +// CHECK: func.func @scatter_add_slice_2D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -248,7 +248,7 @@ func @scatter_add_slice_2D( // ----- -func @scatter_update_scalar_dynamic_1D( +func.func @scatter_update_scalar_dynamic_1D( %original: memref, %indices: memref, %updates: memref) { tm_tensor.scatter unique_indices(true) @@ -259,7 +259,7 @@ func @scatter_update_scalar_dynamic_1D( } return } -// CHECK-LABEL: func @scatter_update_scalar_dynamic_1D +// CHECK-LABEL: func.func @scatter_update_scalar_dynamic_1D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -274,7 +274,7 @@ func @scatter_update_scalar_dynamic_1D( // ----- -func @scatter_add_scalar_dynamic_2D( +func.func @scatter_add_scalar_dynamic_2D( %original: memref, %indices: memref, %updates: memref) { tm_tensor.scatter unique_indices(true) @@ -286,7 +286,7 @@ func @scatter_add_scalar_dynamic_2D( } return } -// CHECK-LABEL: func @scatter_add_scalar_dynamic_2D +// CHECK-LABEL: func.func @scatter_add_scalar_dynamic_2D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -305,7 +305,7 @@ func @scatter_add_scalar_dynamic_2D( // ----- -func @scatter_update_slice_dynamic_2D( +func.func @scatter_update_slice_dynamic_2D( %original: memref, %indices: memref, %updates: memref) { tm_tensor.scatter unique_indices(true) @@ -316,7 +316,7 @@ func @scatter_update_slice_dynamic_2D( } return } -// CHECK: func @scatter_update_slice_dynamic_2D +// CHECK: func.func @scatter_update_slice_dynamic_2D // CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]] // CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]] // CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]] @@ -333,7 +333,7 @@ func @scatter_update_slice_dynamic_2D( // ----- -func @scatter_partial_slices(%arg0: memref<2x64x12xf32>, %arg1: memref<2x3xi32>, %arg2: memref<2x1x12xf32>) { +func.func @scatter_partial_slices(%arg0: memref<2x64x12xf32>, %arg1: memref<2x3xi32>, %arg2: memref<2x1x12xf32>) { tm_tensor.scatter unique_indices(true) ins(%arg2, %arg1 : memref<2x1x12xf32>, memref<2x3xi32>) @@ -344,7 +344,7 @@ func @scatter_partial_slices(%arg0: memref<2x64x12xf32>, %arg1: memref<2x3xi32>, return } -// CHECK-LABEL: func @scatter_partial_slices +// CHECK-LABEL: func.func @scatter_partial_slices // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]] // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]] // CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]] diff --git a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/invalid.mlir b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/invalid.mlir index 2beaf60d0..db343c6c3 100644 --- a/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/invalid.mlir +++ b/externals/llvm-external-projects/torch-mlir-dialects/test/tmtensor/invalid.mlir @@ -1,6 +1,6 @@ // RUN: torch-mlir-dialects-opt -split-input-file -verify-diagnostics %s -func @scatter_mixed_tensor_memref( +func.func @scatter_mixed_tensor_memref( %update : memref, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}} @@ -16,7 +16,7 @@ func @scatter_mixed_tensor_memref( // ----- -func @scatter_mixed_tensor_memref( +func.func @scatter_mixed_tensor_memref( %update : tensor, %indices : memref, %original : tensor) -> tensor { // expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}} @@ -32,7 +32,7 @@ func @scatter_mixed_tensor_memref( // ----- -func @scatter_extra_outputs( +func.func @scatter_extra_outputs( %update : tensor, %indices : tensor, %original : tensor) -> (tensor, tensor) { // expected-error @+1 {{expected number of outputs to be same as the number of results}} @@ -48,7 +48,7 @@ func @scatter_extra_outputs( // ----- -func @scatter_mixed_tensor_memref( +func.func @scatter_mixed_tensor_memref( %update : tensor, %indices : tensor, %original : memref) -> tensor { // expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}} @@ -64,7 +64,7 @@ func @scatter_mixed_tensor_memref( // ----- -func @scatter_output_type_mismatch( +func.func @scatter_output_type_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor<4x?xf32> { // expected-error @+1 {{expected type of `outs` operand #0 'tensor' to be same as result type 'tensor<4x?xf32>'}} @@ -80,7 +80,7 @@ func @scatter_output_type_mismatch( // ----- -func @scatter_mixed_tensor_memref( +func.func @scatter_mixed_tensor_memref( %update : memref, %indices : tensor, %original : memref) { // expected-error @+1 {{expected inputs and outputs to be MemRefType or scalar}} @@ -96,7 +96,7 @@ func @scatter_mixed_tensor_memref( // ----- -func @scatter_mixed_tensor_memref( +func.func @scatter_mixed_tensor_memref( %update : memref, %indices : memref, %original : tensor) { // expected-error @+1 {{expected inputs and outputs to be MemRefType or scalar}} @@ -112,7 +112,7 @@ func @scatter_mixed_tensor_memref( // ----- -func @scatter_dim_mismatch( +func.func @scatter_dim_mismatch( %update : tensor, %indices : tensor<48x1xi32>, %original : tensor) -> tensor { // expected-error @+1 {{mismatch in shape of indices and update value at dim#0}} @@ -128,7 +128,7 @@ func @scatter_dim_mismatch( // ----- -func @scatter_dim_mismatch( +func.func @scatter_dim_mismatch( %update : tensor<64x?xf32>, %indices : tensor<48x1xi32>, %original : tensor) -> tensor { // expected-error @+1 {{mismatch in shape of indices and update value at dim#0}} @@ -144,7 +144,7 @@ func @scatter_dim_mismatch( // ----- -func @scatter_dim_mismatch( +func.func @scatter_dim_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{op update value rank exceeds the rank of the original value}} @@ -160,7 +160,7 @@ func @scatter_dim_mismatch( // ----- -func @scatter_dim_mismatch( +func.func @scatter_dim_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{mismatch in shape of update value dim#1 and original value at dim#1}} @@ -176,7 +176,7 @@ func @scatter_dim_mismatch( // ----- -func @scatter_region_type_mismatch( +func.func @scatter_region_type_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{expected region to have scalar argument of integer or float types}} @@ -193,7 +193,7 @@ func @scatter_region_type_mismatch( // ----- -func @scatter_region_type_mismatch( +func.func @scatter_region_type_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{mismatch in argument 0 of region 'i64' and element type of update value 'i32'}} @@ -210,7 +210,7 @@ func @scatter_region_type_mismatch( // ----- -func @scatter_region_type_mismatch( +func.func @scatter_region_type_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{mismatch in argument 1 of region 'i64' and element type of original value 'i32'}} @@ -227,7 +227,7 @@ func @scatter_region_type_mismatch( // ----- -func @scatter_region_type_mismatch( +func.func @scatter_region_type_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{mismatch in region argument types 'i32' and 'i64'}} @@ -244,7 +244,7 @@ func @scatter_region_type_mismatch( // ----- -func @scatter_region_type_mismatch( +func.func @scatter_region_type_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{expected region to have two arguments}} @@ -261,7 +261,7 @@ func @scatter_region_type_mismatch( // ----- -func @scatter_yield_mismatch( +func.func @scatter_yield_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { %0 = tm_tensor.scatter unique_indices(true) @@ -278,7 +278,7 @@ func @scatter_yield_mismatch( // ----- -func @scatter_yield_mismatch( +func.func @scatter_yield_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { %0 = tm_tensor.scatter unique_indices(true) @@ -295,7 +295,7 @@ func @scatter_yield_mismatch( // ----- -func @scatter_index_depth_dynamic( +func.func @scatter_index_depth_dynamic( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{expected index depth is static}} @@ -312,7 +312,7 @@ func @scatter_index_depth_dynamic( // ----- -func @scatter_original_rank_mismatch( +func.func @scatter_original_rank_mismatch( %update : tensor, %indices : tensor, %original : tensor) -> tensor { // expected-error @+1 {{op index depth and update value does not cover rank of original value}} diff --git a/externals/llvm-project b/externals/llvm-project index e1318078a..5380e30e0 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit e1318078a4e160eb723bcbcfcdcc9a1b618f7067 +Subproject commit 5380e30e047bbac9b2cceb69162eb8db1e1a7abf diff --git a/lib/Dialect/Torch/Transforms/ShapeLibrary.cpp b/lib/Dialect/Torch/Transforms/ShapeLibrary.cpp index f1ecc727c..cd3e8064a 100644 --- a/lib/Dialect/Torch/Transforms/ShapeLibrary.cpp +++ b/lib/Dialect/Torch/Transforms/ShapeLibrary.cpp @@ -28,15 +28,15 @@ StringRef mlir::torch::Torch::getShapeLibrary() { #pragma clang diagnostic ignored "-Woverlength-strings" constexpr StringLiteral shapeLib(R"mlir( module { - func @"__torch_mlir_shape_fn.aten.tanh"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.tanh"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0: !torch.list) -> !torch.list { %true = torch.constant.bool true %0 = torch.prim.ListConstruct : () -> !torch.list %1 = torch.aten.len.t %arg0 : !torch.list -> !torch.int @@ -48,250 +48,250 @@ module { } : (!torch.int, !torch.bool) -> () return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.erf"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.erf"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.sigmoid"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.sigmoid"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.hardsigmoid"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.hardsigmoid"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.square"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.square"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.hardswish"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.hardswish"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.silu"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.silu"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.exp"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.exp"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.sin"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.sin"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.cos"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.cos"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.hardtanh"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.hardtanh"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.sqrt"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.sqrt"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.neg"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.neg"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.floor"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.floor"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.detach"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.detach"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.log2"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.log2"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.rsqrt"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.rsqrt"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.abs"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.abs"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.reciprocal"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.reciprocal"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.tanh_backward"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.tanh_backward"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.gelu_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.str) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.gelu_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.str) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.ceil"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.ceil"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.log"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.log"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.relu"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.relu"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten._softmax"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten._softmax"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.softmax.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.softmax.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten._log_softmax"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten._log_softmax"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.log_softmax.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.log_softmax.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.clamp"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.clamp"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.rsub.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.rsub.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.to.dtype"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool, %arg3: !torch.bool, %arg4: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.to.dtype"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool, %arg3: !torch.bool, %arg4: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.to.dtype_layout"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.bool, %arg6: !torch.bool, %arg7: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.to.dtype_layout"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.bool, %arg6: !torch.bool, %arg7: !torch.optional) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.to.other"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.bool, %arg4: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.to.other"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.bool, %arg4: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.type_as"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.type_as"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.dropout"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.dropout"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.gelu"(%arg0: !torch.list, %arg1: !torch.str) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.gelu"(%arg0: !torch.list, %arg1: !torch.str) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.contiguous"(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.contiguous"(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.clone"(%arg0: !torch.list, %arg1: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.clone"(%arg0: !torch.list, %arg1: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten._log_softmax_backward_data"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten._log_softmax_backward_data"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.eq.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.eq.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.ne.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.ne.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.gt.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.gt.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.ge.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.ge.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.le.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.le.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.lt.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.lt.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.add.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.add.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.sub.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.sub.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.mul.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.mul.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.div.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.div.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.floor_divide.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.floor_divide.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.pow.Tensor_Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.pow.Tensor_Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.leaky_relu"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.leaky_relu"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.gather"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.list, %arg3: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg2) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.gather"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.list, %arg3: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg2) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.layer_norm"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.float, %arg5: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.layer_norm"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.float, %arg5: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten._softmax_backward_data"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg1) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten._softmax_backward_data"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg1) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.any"(%arg0: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.any"(%arg0: !torch.list) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.all"(%arg0: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.all"(%arg0: !torch.list) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.max"(%arg0: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.max"(%arg0: !torch.list) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.sum"(%arg0: !torch.list, %arg1: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.sum"(%arg0: !torch.list, %arg1: !torch.optional) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.mean"(%arg0: !torch.list, %arg1: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.mean"(%arg0: !torch.list, %arg1: !torch.optional) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.var"(%arg0: !torch.list, %arg1: !torch.bool) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.var"(%arg0: !torch.list, %arg1: !torch.bool) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.std"(%arg0: !torch.list, %arg1: !torch.bool) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.std"(%arg0: !torch.list, %arg1: !torch.bool) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.argmax"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.bool) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.argmax"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.bool) -> !torch.list { %none = torch.constant.none %0 = torch.aten.__is__ %arg1, %none : !torch.optional, !torch.none -> !torch.bool %1 = torch.prim.If %0 -> (!torch.list) { @@ -299,17 +299,17 @@ module { torch.prim.If.yield %2 : !torch.list } else { %2 = torch.prim.unchecked_cast %arg1 : !torch.optional -> !torch.int - %3 = call @__torch__._reduce_along_dim(%arg0, %2, %arg2) : (!torch.list, !torch.int, !torch.bool) -> !torch.list + %3 = func.call @__torch__._reduce_along_dim(%arg0, %2, %arg2) : (!torch.list, !torch.int, !torch.bool) -> !torch.list torch.prim.If.yield %3 : !torch.list } return %1 : !torch.list } - func @__torch__._reduce_along_dim(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { + func.func @__torch__._reduce_along_dim(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { %int1 = torch.constant.int 1 %int9223372036854775807 = torch.constant.int 9223372036854775807 %true = torch.constant.bool true %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %2 = torch.prim.ListConstruct : () -> !torch.list %3 = torch.aten.len.t %arg0 : !torch.list -> !torch.int %4 = torch.prim.ListConstruct %int9223372036854775807, %3 : (!torch.int, !torch.int) -> !torch.list @@ -334,7 +334,7 @@ module { } : (!torch.int, !torch.bool) -> () return %2 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg0: !torch.int, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg0: !torch.int, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.int { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " @@ -377,21 +377,21 @@ module { } return %8 : !torch.int } - func @"__torch_mlir_shape_fn.aten.any.dim"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { - %0 = call @__torch__._reduce_along_dim(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.bool) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.any.dim"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.list { + %0 = func.call @__torch__._reduce_along_dim(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.bool) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.max.dim"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.tuple, list> { - %0 = call @__torch__._reduce_along_dim(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.bool) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.max.dim"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.tuple, list> { + %0 = func.call @__torch__._reduce_along_dim(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.bool) -> !torch.list %1 = torch.prim.TupleConstruct %0, %0 : !torch.list, !torch.list -> !torch.tuple, list> return %1 : !torch.tuple, list> } - func @"__torch_mlir_shape_fn.aten.mean.dim"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.mean.dim"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.optional) -> !torch.list { %0 = torch.derefine %arg3 : !torch.optional to !torch.any - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mean_dim(%arg0, %arg1, %arg2, %0) : (!torch.list, !torch.list, !torch.bool, !torch.any) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mean_dim(%arg0, %arg1, %arg2, %0) : (!torch.list, !torch.list, !torch.bool, !torch.any) -> !torch.list return %1 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mean_dim(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.any) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mean_dim(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.any) -> !torch.list { %int1 = torch.constant.int 1 %true = torch.constant.bool true %false = torch.constant.bool false @@ -404,7 +404,7 @@ module { ^bb0(%arg5: !torch.int, %arg6: !torch.bool): %4 = torch.aten.__getitem__.t %arg1, %arg5 : !torch.list, !torch.int -> !torch.int %5 = torch.aten.len.t %arg0 : !torch.list -> !torch.int - %6 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%4, %5, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %6 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%4, %5, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %7 = torch.aten.eq.int %arg4, %6 : !torch.int, !torch.int -> !torch.bool %8 = torch.prim.If %7 -> (!torch.bool) { torch.prim.If.yield %true : !torch.bool @@ -430,16 +430,16 @@ module { } : (!torch.int, !torch.bool) -> () return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.sum.dim_IntList"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.sum.dim_IntList"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool, %arg3: !torch.optional) -> !torch.list { %0 = torch.derefine %arg3 : !torch.optional to !torch.any - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mean_dim(%arg0, %arg1, %arg2, %0) : (!torch.list, !torch.list, !torch.bool, !torch.any) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mean_dim(%arg0, %arg1, %arg2, %0) : (!torch.list, !torch.list, !torch.bool, !torch.any) -> !torch.list return %1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.permute"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.permute(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.permute"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.permute(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.permute(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.permute(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int1 = torch.constant.int 1 %str = torch.constant.str "AssertionError: " %none = torch.constant.none @@ -459,7 +459,7 @@ module { torch.prim.Loop %3, %true, init() { ^bb0(%arg2: !torch.int): %7 = torch.aten.__getitem__.t %arg1, %arg2 : !torch.list, !torch.int -> !torch.int - %8 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%7, %3, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %8 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%7, %3, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %9 = torch.aten.append.t %4, %8 : !torch.list, !torch.int -> !torch.list %10 = torch.aten.__getitem__.t %arg0, %8 : !torch.list, !torch.int -> !torch.int %11 = torch.aten.append.t %5, %10 : !torch.list, !torch.int -> !torch.list @@ -486,18 +486,18 @@ module { } : (!torch.int, !torch.bool) -> () return %5 : !torch.list } - func @"__torch_mlir_shape_fn.aten.transpose.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.transpose(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.transpose.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.transpose(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.int) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.transpose(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.transpose(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { %true = torch.constant.bool true %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg2, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg2, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %3 = torch.aten.eq.int %1, %2 : !torch.int, !torch.int -> !torch.bool %4 = torch.prim.If %3 -> (!torch.list) { - %5 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list + %5 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list torch.prim.If.yield %5 : !torch.list } else { %5 = torch.prim.ListConstruct : () -> !torch.list @@ -527,17 +527,17 @@ module { } return %4 : !torch.list } - func @"__torch_mlir_shape_fn.aten.t"(%arg0: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.t"(%arg0: !torch.list) -> !torch.list { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.transpose(%arg0, %int0, %int1) : (!torch.list, !torch.int, !torch.int) -> !torch.list + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.transpose(%arg0, %int0, %int1) : (!torch.list, !torch.int, !torch.int) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.matmul"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.matmul(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.matmul"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.matmul(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.matmul(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.matmul(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int0 = torch.constant.int 0 %int2 = torch.constant.int 2 %int1 = torch.constant.int 1 @@ -558,7 +558,7 @@ module { torch.prim.If.yield %false : !torch.bool } %5 = torch.prim.If %4 -> (!torch.list) { - %6 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.dot(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + %6 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.dot(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list torch.prim.If.yield %6 : !torch.list } else { %6 = torch.aten.eq.int %1, %int2 : !torch.int, !torch.int -> !torch.bool @@ -569,7 +569,7 @@ module { torch.prim.If.yield %false : !torch.bool } %8 = torch.prim.If %7 -> (!torch.list) { - %9 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mv(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + %9 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mv(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list torch.prim.If.yield %9 : !torch.list } else { %9 = torch.aten.eq.int %1, %int1 : !torch.int, !torch.int -> !torch.bool @@ -580,9 +580,9 @@ module { torch.prim.If.yield %false : !torch.bool } %11 = torch.prim.If %10 -> (!torch.list) { - %12 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unsqueeze(%arg0, %int0) : (!torch.list, !torch.int) -> !torch.list - %13 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%12, %arg1) : (!torch.list, !torch.list) -> !torch.list - %14 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze(%13, %int0) : (!torch.list, !torch.int) -> !torch.list + %12 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unsqueeze(%arg0, %int0) : (!torch.list, !torch.int) -> !torch.list + %13 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%12, %arg1) : (!torch.list, !torch.list) -> !torch.list + %14 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze(%13, %int0) : (!torch.list, !torch.int) -> !torch.list torch.prim.If.yield %14 : !torch.list } else { %12 = torch.aten.eq.int %1, %int2 : !torch.int, !torch.int -> !torch.bool @@ -593,7 +593,7 @@ module { torch.prim.If.yield %false : !torch.bool } %14 = torch.prim.If %13 -> (!torch.list) { - %15 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + %15 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list torch.prim.If.yield %15 : !torch.list } else { %15 = torch.aten.ge.int %1, %int1 : !torch.int, !torch.int -> !torch.bool @@ -628,7 +628,7 @@ module { %29 = torch.aten.append.t %23, %28 : !torch.list, !torch.int -> !torch.list torch.prim.Loop.condition %true, iter() } : (!torch.int, !torch.bool) -> () - %25 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%20, %23) : (!torch.list, !torch.list) -> !torch.list + %25 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%20, %23) : (!torch.list, !torch.list) -> !torch.list %26 = torch.aten.gt.int %1, %int1 : !torch.int, !torch.int -> !torch.bool torch.prim.If %26 -> () { %28 = torch.aten.append.t %25, %19 : !torch.list, !torch.int -> !torch.list @@ -658,7 +658,7 @@ module { } return %5 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.dot(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.dot(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %false = torch.constant.bool false @@ -691,7 +691,7 @@ module { %6 = torch.prim.ListConstruct : () -> !torch.list return %6 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mv(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mv(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 @@ -726,12 +726,12 @@ module { %7 = torch.prim.ListConstruct %6 : (!torch.int) -> !torch.list return %7 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { %int1 = torch.constant.int 1 %true = torch.constant.bool true %0 = torch.prim.ListConstruct : () -> !torch.list %1 = torch.aten.len.t %arg0 : !torch.list -> !torch.int - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %1, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %1, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %3 = torch.aten.len.t %arg0 : !torch.list -> !torch.int torch.prim.Loop %3, %true, init() { ^bb0(%arg2: !torch.int): @@ -756,7 +756,7 @@ module { } : (!torch.int, !torch.bool) -> () return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 @@ -794,17 +794,17 @@ module { %9 = torch.prim.ListConstruct %7, %8 : (!torch.int, !torch.int) -> !torch.list return %9 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unsqueeze(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unsqueeze(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { %int1 = torch.constant.int 1 %true = torch.constant.bool true %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int %1 = torch.aten.add.int %0, %int1 : !torch.int, !torch.int -> !torch.int - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %1, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int - %3 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %1, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %3 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list torch.aten.insert.t %3, %2, %int1 : !torch.list, !torch.int, !torch.int return %3 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %true = torch.constant.bool true @@ -870,22 +870,22 @@ module { } : (!torch.int, !torch.bool) -> () return %3 : !torch.list } - func @"__torch_mlir_shape_fn.aten.mm"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.mm"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.addmm"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.float, %arg4: !torch.float) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.addmm"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.float, %arg4: !torch.float) -> !torch.list { %0 = torch.derefine %arg3 : !torch.float to !torch.any %1 = torch.derefine %arg4 : !torch.float to !torch.any - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.addmm(%arg0, %arg1, %arg2, %0, %1) : (!torch.list, !torch.list, !torch.list, !torch.any, !torch.any) -> !torch.list + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.addmm(%arg0, %arg1, %arg2, %0, %1) : (!torch.list, !torch.list, !torch.list, !torch.any, !torch.any) -> !torch.list return %2 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.addmm(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.any, %arg4: !torch.any) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.addmm(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.any, %arg4: !torch.any) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.mm(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list return %1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.bmm"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.bmm"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 %int0 = torch.constant.int 0 @@ -934,11 +934,11 @@ module { %13 = torch.prim.ListConstruct %10, %11, %12 : (!torch.int, !torch.int, !torch.int) -> !torch.list return %13 : !torch.list } - func @"__torch_mlir_shape_fn.aten.embedding"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.embedding(%arg0, %arg1, %arg2, %arg3, %arg4) : (!torch.list, !torch.list, !torch.int, !torch.bool, !torch.bool) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.embedding"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.embedding(%arg0, %arg1, %arg2, %arg3, %arg4) : (!torch.list, !torch.list, !torch.int, !torch.bool, !torch.bool) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.embedding(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.embedding(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 @@ -955,25 +955,25 @@ module { %2 = torch.aten.len.t %arg1 : !torch.list -> !torch.int %3 = torch.aten.eq.int %2, %int1 : !torch.int, !torch.int -> !torch.bool %4 = torch.prim.If %3 -> (!torch.list) { - %5 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.index_select(%arg0, %int0, %arg1) : (!torch.list, !torch.int, !torch.list) -> !torch.list + %5 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.index_select(%arg0, %int0, %arg1) : (!torch.list, !torch.int, !torch.list) -> !torch.list torch.prim.If.yield %5 : !torch.list } else { - %5 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg1) : (!torch.list) -> !torch.list + %5 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg1) : (!torch.list) -> !torch.list %6 = torch.aten.__getitem__.t %arg0, %int1 : !torch.list, !torch.int -> !torch.int %7 = torch.aten.append.t %5, %6 : !torch.list, !torch.int -> !torch.list torch.prim.If.yield %5 : !torch.list } return %4 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.index_select(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.index_select(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.list) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %true = torch.constant.bool true %str = torch.constant.str "AssertionError: " %none = torch.constant.none %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.multiply_integers(%arg2) : (!torch.list) -> !torch.int + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.multiply_integers(%arg2) : (!torch.list) -> !torch.int %3 = torch.aten.len.t %arg2 : !torch.list -> !torch.int %4 = torch.aten.le.int %3, %int1 : !torch.int, !torch.int -> !torch.bool torch.prim.If %4 -> () { @@ -1013,7 +1013,7 @@ module { } : (!torch.int, !torch.bool) -> () return %7 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.multiply_integers(%arg0: !torch.list) -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.multiply_integers(%arg0: !torch.list) -> !torch.int { %int1 = torch.constant.int 1 %true = torch.constant.bool true %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int @@ -1025,11 +1025,11 @@ module { } : (!torch.int, !torch.bool, !torch.int) -> !torch.int return %1 : !torch.int } - func @"__torch_mlir_shape_fn.aten.expand"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.expand(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.expand"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.expand(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.expand(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.expand(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " @@ -1049,7 +1049,7 @@ module { %4 = torch.aten.len.t %arg0 : !torch.list -> !torch.int %5 = torch.aten.eq.int %3, %int0 : !torch.int, !torch.int -> !torch.bool %6 = torch.prim.If %5 -> (!torch.list) { - %7 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg1) : (!torch.list) -> !torch.list + %7 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg1) : (!torch.list) -> !torch.list torch.prim.If.yield %7 : !torch.list } else { %7 = torch.prim.ListConstruct : () -> !torch.list @@ -1100,24 +1100,24 @@ module { } return %6 : !torch.list } - func @"__torch_mlir_shape_fn.aten.expand_as"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg1) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.expand_as"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg1) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.broadcast_to"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.expand(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.broadcast_to"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.expand(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.view"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.view"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.numel(%arg0) : (!torch.list) -> !torch.int - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.infer_size_impl(%arg1, %0) : (!torch.list, !torch.int) -> !torch.list + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.numel(%arg0) : (!torch.list) -> !torch.int + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.infer_size_impl(%arg1, %0) : (!torch.list, !torch.int) -> !torch.list return %1 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.infer_size_impl(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.infer_size_impl(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %none = torch.constant.none @@ -1188,7 +1188,7 @@ module { } else { torch.prim.If.yield } - %7 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list + %7 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list %8 = torch.aten.__isnot__ %3#1, %none : !torch.optional, !torch.none -> !torch.bool torch.prim.If %8 -> () { %9 = torch.prim.unchecked_cast %3#1 : !torch.optional -> !torch.int @@ -1200,7 +1200,7 @@ module { } return %7 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.numel(%arg0: !torch.list) -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.numel(%arg0: !torch.list) -> !torch.int { %int1 = torch.constant.int 1 %true = torch.constant.bool true %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int @@ -1212,25 +1212,25 @@ module { } : (!torch.int, !torch.bool, !torch.int) -> !torch.int return %1 : !torch.int } - func @"__torch_mlir_shape_fn.aten.reshape"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.reshape"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten._reshape_alias"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten._reshape_alias"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.view(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten._unsafe_view"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten._unsafe_view"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { return %arg1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.resize_"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.resize_"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional) -> !torch.list { return %arg1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.max_pool2d"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_pool2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) : (!torch.list, !torch.list, !torch.list, !torch.list, !torch.list, !torch.bool) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.max_pool2d"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_pool2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) : (!torch.list, !torch.list, !torch.list, !torch.list, !torch.list, !torch.bool) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_pool2d(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.bool) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_pool2d(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.bool) -> !torch.list { %int4 = torch.constant.int 4 %int3 = torch.constant.int 3 %int0 = torch.constant.int 0 @@ -1390,9 +1390,9 @@ module { %37 = torch.aten.__getitem__.t %arg0, %int-3 : !torch.list, !torch.int -> !torch.int %38 = torch.aten.__getitem__.t %arg0, %int-2 : !torch.list, !torch.int -> !torch.int %39 = torch.aten.__getitem__.t %arg0, %int-1 : !torch.list, !torch.int -> !torch.int - %40 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%38, %3, %20, %13, %27, %arg5) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int - %41 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%39, %6, %23, %16, %30, %arg5) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int - %42 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pool2d_shape_check(%arg0, %3, %6, %13, %16, %20, %23, %27, %30, %37, %38, %39, %40, %41) : (!torch.list, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.none + %40 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%38, %3, %20, %13, %27, %arg5) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int + %41 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%39, %6, %23, %16, %30, %arg5) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int + %42 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pool2d_shape_check(%arg0, %3, %6, %13, %16, %20, %23, %27, %30, %37, %38, %39, %40, %41) : (!torch.list, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.none %43 = torch.aten.len.t %arg0 : !torch.list -> !torch.int %44 = torch.aten.eq.int %43, %int3 : !torch.int, !torch.int -> !torch.bool %45 = torch.prim.If %44 -> (!torch.list) { @@ -1404,7 +1404,7 @@ module { } return %45 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%arg0: !torch.int, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int, %arg5: !torch.bool) -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%arg0: !torch.int, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int, %arg5: !torch.bool) -> !torch.int { %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: stride should not be zeero" %none = torch.constant.none @@ -1415,10 +1415,10 @@ module { torch.prim.RaiseException %str, %none : !torch.str, !torch.none torch.prim.If.yield } - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape_pad_lr(%arg0, %arg1, %arg2, %arg2, %arg3, %arg4, %arg5) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape_pad_lr(%arg0, %arg1, %arg2, %arg2, %arg3, %arg4, %arg5) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int return %1 : !torch.int } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape_pad_lr(%arg0: !torch.int, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int, %arg5: !torch.int, %arg6: !torch.bool) -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape_pad_lr(%arg0: !torch.int, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int, %arg5: !torch.int, %arg6: !torch.bool) -> !torch.int { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %0 = torch.aten.add.int %arg0, %arg2 : !torch.int, !torch.int -> !torch.int @@ -1434,7 +1434,7 @@ module { torch.prim.If.yield %int0 : !torch.int } %7 = torch.aten.add.int %5, %6 : !torch.int, !torch.int -> !torch.int - %8 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.div_rtn(%7, %arg4) : (!torch.int, !torch.int) -> !torch.int + %8 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.div_rtn(%7, %arg4) : (!torch.int, !torch.int) -> !torch.int %9 = torch.aten.add.int %8, %int1 : !torch.int, !torch.int -> !torch.int %10 = torch.prim.If %arg6 -> (!torch.int) { %11 = torch.aten.sub.int %9, %int1 : !torch.int, !torch.int -> !torch.int @@ -1453,11 +1453,11 @@ module { } return %10 : !torch.int } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.div_rtn(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.div_rtn(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { %0 = torch.aten.floordiv.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.int return %0 : !torch.int } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pool2d_shape_check(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int, %arg5: !torch.int, %arg6: !torch.int, %arg7: !torch.int, %arg8: !torch.int, %arg9: !torch.int, %arg10: !torch.int, %arg11: !torch.int, %arg12: !torch.int, %arg13: !torch.int) -> !torch.none { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pool2d_shape_check(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int, %arg5: !torch.int, %arg6: !torch.int, %arg7: !torch.int, %arg8: !torch.int, %arg9: !torch.int, %arg10: !torch.int, %arg11: !torch.int, %arg12: !torch.int, %arg13: !torch.int) -> !torch.none { %int4 = torch.constant.int 4 %int3 = torch.constant.int 3 %int2 = torch.constant.int 2 @@ -1583,19 +1583,19 @@ module { } return %none : !torch.none } - func @"__torch_mlir_shape_fn.aten.max_pool2d_with_indices"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.bool) -> !torch.tuple, list> { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_pool2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) : (!torch.list, !torch.list, !torch.list, !torch.list, !torch.list, !torch.bool) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.max_pool2d_with_indices"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.bool) -> !torch.tuple, list> { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_pool2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) : (!torch.list, !torch.list, !torch.list, !torch.list, !torch.list, !torch.bool) -> !torch.list %1 = torch.prim.TupleConstruct %0, %0 : !torch.list, !torch.list -> !torch.tuple, list> return %1 : !torch.tuple, list> } - func @"__torch_mlir_shape_fn.aten.max_pool2d_with_indices_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.bool, %arg7: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.max_pool2d_with_indices_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.bool, %arg7: !torch.list) -> !torch.list { return %arg1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.avg_pool2d"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.bool, %arg5: !torch.bool, %arg6: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.avg_pool2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.bool, !torch.optional) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.avg_pool2d"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.bool, %arg5: !torch.bool, %arg6: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.avg_pool2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.bool, !torch.optional) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.avg_pool2d(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.bool, %arg5: !torch.bool, %arg6: !torch.optional) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.avg_pool2d(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.list, %arg4: !torch.bool, %arg5: !torch.bool, %arg6: !torch.optional) -> !torch.list { %int4 = torch.constant.int 4 %int3 = torch.constant.int 3 %int0 = torch.constant.int 0 @@ -1730,9 +1730,9 @@ module { %30 = torch.aten.__getitem__.t %arg0, %int-3 : !torch.list, !torch.int -> !torch.int %31 = torch.aten.__getitem__.t %arg0, %int-2 : !torch.list, !torch.int -> !torch.int %32 = torch.aten.__getitem__.t %arg0, %int-1 : !torch.list, !torch.int -> !torch.int - %33 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%31, %3, %20, %13, %int1, %arg4) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int - %34 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%32, %6, %23, %16, %int1, %arg4) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int - %35 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pool2d_shape_check(%arg0, %3, %6, %13, %16, %20, %23, %int1, %int1, %30, %31, %32, %33, %34) : (!torch.list, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.none + %33 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%31, %3, %20, %13, %int1, %arg4) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int + %34 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pooling_output_shape(%32, %6, %23, %16, %int1, %arg4) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.bool) -> !torch.int + %35 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pool2d_shape_check(%arg0, %3, %6, %13, %16, %20, %23, %int1, %int1, %30, %31, %32, %33, %34) : (!torch.list, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.none %36 = torch.aten.len.t %arg0 : !torch.list -> !torch.int %37 = torch.aten.eq.int %36, %int3 : !torch.int, !torch.int -> !torch.bool %38 = torch.prim.If %37 -> (!torch.list) { @@ -1744,11 +1744,11 @@ module { } return %38 : !torch.list } - func @"__torch_mlir_shape_fn.aten.adaptive_avg_pool2d"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.adaptive_avg_pool2d(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.adaptive_avg_pool2d"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.adaptive_avg_pool2d(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.adaptive_avg_pool2d(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.adaptive_avg_pool2d(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %int4 = torch.constant.int 4 @@ -1815,20 +1815,20 @@ module { } : (!torch.int, !torch.bool) -> () return %7 : !torch.list } - func @"__torch_mlir_shape_fn.aten.flatten.using_ints"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.flatten(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.flatten.using_ints"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.flatten(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.int) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.flatten(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.flatten(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %true = torch.constant.bool true %str = torch.constant.str "AssertionError: " %none = torch.constant.none %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %2 = torch.aten.len.t %arg0 : !torch.list -> !torch.int - %3 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg2, %2, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %3 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg2, %2, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %4 = torch.aten.le.int %1, %3 : !torch.int, !torch.int -> !torch.bool torch.prim.If %4 -> () { torch.prim.If.yield @@ -1887,19 +1887,19 @@ module { } return %7 : !torch.list } - func @"__torch_mlir_shape_fn.aten.linear"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.linear(%arg0, %arg1, %arg2) : (!torch.list, !torch.list, !torch.optional>) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.linear"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.linear(%arg0, %arg1, %arg2) : (!torch.list, !torch.list, !torch.optional>) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.linear(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.linear(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>) -> !torch.list { %none = torch.constant.none %str = torch.constant.str "AssertionError: " - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.t(%arg1) : (!torch.list) -> !torch.list - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.matmul(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.t(%arg1) : (!torch.list) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.matmul(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list %2 = torch.aten.__isnot__ %arg2, %none : !torch.optional>, !torch.none -> !torch.bool torch.prim.If %2 -> () { %3 = torch.prim.unchecked_cast %arg2 : !torch.optional> -> !torch.list - %4 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%3, %1) : (!torch.list, !torch.list) -> !torch.list + %4 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%3, %1) : (!torch.list, !torch.list) -> !torch.list %5 = torch.aten.eq.int_list %4, %1 : !torch.list, !torch.list -> !torch.bool torch.prim.If %5 -> () { torch.prim.If.yield @@ -1913,7 +1913,7 @@ module { } return %1 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.t(%arg0: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.t(%arg0: !torch.list) -> !torch.list { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %int2 = torch.constant.int 2 @@ -1948,88 +1948,88 @@ module { } return %4 : !torch.list } - func @"__torch_mlir_shape_fn.aten.zeros"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.zeros"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.ones"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.ones"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.empty.memory_format"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.empty.memory_format"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.full"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.full"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.full_like"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional, %arg6: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.full_like"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional, %arg6: !torch.optional) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.zeros_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.zeros_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.ones_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.ones_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.empty_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.empty_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.new_zeros"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.new_zeros"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { return %arg1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.new_ones"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.new_ones"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { return %arg1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.new_empty"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.new_empty"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { return %arg1 : !torch.list } - func @"__torch_mlir_shape_fn.aten._to_copy"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.bool, %arg6: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten._to_copy"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.bool, %arg6: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.masked_fill.Scalar"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.masked_fill.Scalar"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.zero"(%arg0: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.zero"(%arg0: !torch.list) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.fill.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.fill.Scalar"(%arg0: !torch.list, %arg1: !torch.float) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.copy"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.copy"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.uniform"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float, %arg3: !torch.any) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.uniform"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float, %arg3: !torch.any) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.bernoulli.float"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.any) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.bernoulli.float"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.any) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.bernoulli.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.any) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.bernoulli.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.any) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.index_put_impl"(%arg0: !torch.list, %arg1: !torch.list>>, %arg2: !torch.list, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.index_put_impl"(%arg0: !torch.list, %arg1: !torch.list>>, %arg2: !torch.list, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.bernoulli"(%arg0: !torch.list, %arg1: !torch.any) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.bernoulli"(%arg0: !torch.list, %arg1: !torch.any) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.rand_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.rand_like"(%arg0: !torch.list, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.arange.start_step"(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.float, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional, %arg6: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.arange.start_step"(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.float, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional, %arg6: !torch.optional) -> !torch.list { %0 = torch.derefine %arg3 : !torch.optional to !torch.any %1 = torch.derefine %arg4 : !torch.optional to !torch.any %2 = torch.derefine %arg5 : !torch.optional to !torch.any %3 = torch.derefine %arg6 : !torch.optional to !torch.any - %4 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start_step(%arg0, %arg1, %arg2, %0, %1, %2, %3) : (!torch.float, !torch.float, !torch.float, !torch.any, !torch.any, !torch.any, !torch.any) -> !torch.list + %4 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start_step(%arg0, %arg1, %arg2, %0, %1, %2, %3) : (!torch.float, !torch.float, !torch.float, !torch.any, !torch.any, !torch.any, !torch.any) -> !torch.list return %4 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start_step(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.float, %arg3: !torch.any, %arg4: !torch.any, %arg5: !torch.any, %arg6: !torch.any) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start_step(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.float, %arg3: !torch.any, %arg4: !torch.any, %arg5: !torch.any, %arg6: !torch.any) -> !torch.list { %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " %none = torch.constant.none @@ -2066,15 +2066,15 @@ module { %5 = torch.prim.ListConstruct %4 : (!torch.int) -> !torch.list return %5 : !torch.list } - func @"__torch_mlir_shape_fn.aten.arange.start"(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.arange.start"(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional, %arg5: !torch.optional) -> !torch.list { %0 = torch.derefine %arg2 : !torch.optional to !torch.any %1 = torch.derefine %arg3 : !torch.optional to !torch.any %2 = torch.derefine %arg4 : !torch.optional to !torch.any %3 = torch.derefine %arg5 : !torch.optional to !torch.any - %4 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start(%arg0, %arg1, %0, %1, %2, %3) : (!torch.float, !torch.float, !torch.any, !torch.any, !torch.any, !torch.any) -> !torch.list + %4 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start(%arg0, %arg1, %0, %1, %2, %3) : (!torch.float, !torch.float, !torch.any, !torch.any, !torch.any, !torch.any) -> !torch.list return %4 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.any, %arg3: !torch.any, %arg4: !torch.any, %arg5: !torch.any) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_start(%arg0: !torch.float, %arg1: !torch.float, %arg2: !torch.any, %arg3: !torch.any, %arg4: !torch.any, %arg5: !torch.any) -> !torch.list { %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " %none = torch.constant.none @@ -2097,15 +2097,15 @@ module { %4 = torch.prim.ListConstruct %3 : (!torch.int) -> !torch.list return %4 : !torch.list } - func @"__torch_mlir_shape_fn.aten.arange"(%arg0: !torch.float, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.arange"(%arg0: !torch.float, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.optional) -> !torch.list { %0 = torch.derefine %arg1 : !torch.optional to !torch.any %1 = torch.derefine %arg2 : !torch.optional to !torch.any %2 = torch.derefine %arg3 : !torch.optional to !torch.any %3 = torch.derefine %arg4 : !torch.optional to !torch.any - %4 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_end(%arg0, %0, %1, %2, %3) : (!torch.float, !torch.any, !torch.any, !torch.any, !torch.any) -> !torch.list + %4 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_end(%arg0, %0, %1, %2, %3) : (!torch.float, !torch.any, !torch.any, !torch.any, !torch.any) -> !torch.list return %4 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_end(%arg0: !torch.float, %arg1: !torch.any, %arg2: !torch.any, %arg3: !torch.any, %arg4: !torch.any) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.arange_end(%arg0: !torch.float, %arg1: !torch.any, %arg2: !torch.any, %arg3: !torch.any, %arg4: !torch.any) -> !torch.list { %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " %none = torch.constant.none @@ -2120,67 +2120,67 @@ module { %2 = torch.prim.ListConstruct %1 : (!torch.int) -> !torch.list return %2 : !torch.list } - func @"__torch_mlir_shape_fn.aten.add.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.add.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.sub.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.sub.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.mul.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.mul.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.div.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.div.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.__and__.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.__and__.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.minimum"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.minimum"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.maximum"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.maximum"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.bitwise_and.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.bitwise_and.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.threshold"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.threshold"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.threshold_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.threshold_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.eq.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.eq.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.gt.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.gt.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.lt.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.lt.Tensor"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.unsqueeze"(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unsqueeze(%arg0, %arg1) : (!torch.list, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.unsqueeze"(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unsqueeze(%arg0, %arg1) : (!torch.list, !torch.int) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.squeeze"(%arg0: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze_nodim(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.squeeze"(%arg0: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze_nodim(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze_nodim(%arg0: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze_nodim(%arg0: !torch.list) -> !torch.list { %int1 = torch.constant.int 1 %true = torch.constant.bool true %0 = torch.prim.ListConstruct : () -> !torch.list @@ -2200,64 +2200,64 @@ module { } : (!torch.int, !torch.bool) -> () return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.squeeze.dim"(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze(%arg0, %arg1) : (!torch.list, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.squeeze.dim"(%arg0: !torch.list, %arg1: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.squeeze(%arg0, %arg1) : (!torch.list, !torch.int) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.prim.NumToTensor.Scalar"(%arg0: !torch.float) -> !torch.list { + func.func @"__torch_mlir_shape_fn.prim.NumToTensor.Scalar"(%arg0: !torch.float) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.tensor.float"(%arg0: !torch.float, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.bool) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.tensor.float"(%arg0: !torch.float, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.bool) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.tensor.int"(%arg0: !torch.int, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.bool) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.tensor.int"(%arg0: !torch.int, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.bool) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.tensor.bool"(%arg0: !torch.bool, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.bool) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.tensor.bool"(%arg0: !torch.bool, %arg1: !torch.optional, %arg2: !torch.optional, %arg3: !torch.bool) -> !torch.list { %0 = torch.prim.ListConstruct : () -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten._shape_as_tensor"(%arg0: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten._shape_as_tensor"(%arg0: !torch.list) -> !torch.list { %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int %1 = torch.prim.ListConstruct %0 : (!torch.int) -> !torch.list return %1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.where.self"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.where.self"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list return %1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.where.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.where.Scalar"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.where.ScalarOther"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.where.ScalarOther"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.where.ScalarSelf"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg2) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.where.ScalarSelf"(%arg0: !torch.list, %arg1: !torch.float, %arg2: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %arg2) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.lerp.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.lerp.Tensor"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list return %1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.addcmul"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.addcmul"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list return %1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.addcdiv"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.addcdiv"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg1, %arg2) : (!torch.list, !torch.list) -> !torch.list + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg0, %0) : (!torch.list, !torch.list) -> !torch.list return %1 : !torch.list } - func @"__torch_mlir_shape_fn.aten.topk"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.tuple, list> { + func.func @"__torch_mlir_shape_fn.aten.topk"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.bool, %arg4: !torch.bool) -> !torch.tuple, list> { %str = torch.constant.str "k ({}) is too big for dimension {} of size {}" %str_0 = torch.constant.str "AssertionError: " %none = torch.constant.none @@ -2276,11 +2276,11 @@ module { %3 = torch.prim.TupleConstruct %arg0, %arg0 : !torch.list, !torch.list -> !torch.tuple, list> return %3 : !torch.tuple, list> } - func @"__torch_mlir_shape_fn.aten.conv2d"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.conv2d"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv2d(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv2d(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list { %int4 = torch.constant.int 4 %str = torch.constant.str "AssertionError: " %none = torch.constant.none @@ -2300,15 +2300,15 @@ module { torch.prim.RaiseException %str, %none : !torch.str, !torch.none torch.prim.If.yield } - %4 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv_output_size(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list + %4 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv_output_size(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list return %4 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv_output_size(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv_output_size(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list { %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 %int0 = torch.constant.int 0 %true = torch.constant.bool true - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_shape_forward(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.none + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_shape_forward(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.none %1 = torch.aten.len.t %arg5 : !torch.list -> !torch.int %2 = torch.aten.gt.int %1, %int0 : !torch.int, !torch.int -> !torch.bool %3 = torch.aten.len.t %arg0 : !torch.list -> !torch.int @@ -2347,7 +2347,7 @@ module { } : (!torch.int, !torch.bool) -> () return %4 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_shape_forward(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.none { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_shape_forward(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.none { %int2 = torch.constant.int 2 %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 @@ -2357,7 +2357,7 @@ module { %false = torch.constant.bool false %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int %1 = torch.aten.len.t %arg1 : !torch.list -> !torch.int - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_non_negative(%arg4) : (!torch.list) -> !torch.bool + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_non_negative(%arg4) : (!torch.list) -> !torch.bool %3 = torch.aten.__not__ %2 : !torch.bool -> !torch.bool torch.prim.If %3 -> () { torch.prim.If.yield @@ -2365,7 +2365,7 @@ module { torch.prim.RaiseException %str, %none : !torch.str, !torch.none torch.prim.If.yield } - %4 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_non_negative(%arg3) : (!torch.list) -> !torch.bool + %4 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_non_negative(%arg3) : (!torch.list) -> !torch.bool %5 = torch.aten.__not__ %4 : !torch.bool -> !torch.bool torch.prim.If %5 -> () { torch.prim.If.yield @@ -2456,7 +2456,7 @@ module { } : (!torch.int, !torch.bool) -> () return %none : !torch.none } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_non_negative(%arg0: !torch.list) -> !torch.bool { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_non_negative(%arg0: !torch.list) -> !torch.bool { %int0 = torch.constant.int 0 %false = torch.constant.bool false %true = torch.constant.bool true @@ -2474,21 +2474,21 @@ module { } : (!torch.int, !torch.bool, !torch.bool) -> !torch.bool return %1 : !torch.bool } - func @"__torch_mlir_shape_fn.aten.convolution"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.bool, %arg7: !torch.list, %arg8: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv_output_size(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg8) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.convolution"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.bool, %arg7: !torch.list, %arg8: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.conv_output_size(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg8) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.flip"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.flip"(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.batch_norm"(%arg0: !torch.list, %arg1: !torch.optional>, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.optional>, %arg5: !torch.bool, %arg6: !torch.float, %arg7: !torch.float, %arg8: !torch.bool) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.batch_norm"(%arg0: !torch.list, %arg1: !torch.optional>, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.optional>, %arg5: !torch.bool, %arg6: !torch.float, %arg7: !torch.float, %arg8: !torch.bool) -> !torch.list { return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.slice.Tensor"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.slice(%arg0, %arg1, %arg2, %arg3, %arg4) : (!torch.list, !torch.int, !torch.optional, !torch.optional, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.slice.Tensor"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.slice(%arg0, %arg1, %arg2, %arg3, %arg4) : (!torch.list, !torch.int, !torch.optional, !torch.optional, !torch.int) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.slice(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.slice(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.optional, %arg3: !torch.optional, %arg4: !torch.int) -> !torch.list { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " @@ -2502,7 +2502,7 @@ module { torch.prim.RaiseException %str, %none : !torch.str, !torch.none torch.prim.If.yield } - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %3 = torch.aten.__isnot__ %arg2, %none : !torch.optional, !torch.none -> !torch.bool %4 = torch.prim.If %3 -> (!torch.int) { %25 = torch.prim.unchecked_cast %arg2 : !torch.optional -> !torch.int @@ -2515,7 +2515,7 @@ module { %25 = torch.prim.unchecked_cast %arg3 : !torch.optional -> !torch.int torch.prim.If.yield %25 : !torch.int } else { - %25 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_int() : () -> !torch.int + %25 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_int() : () -> !torch.int torch.prim.If.yield %25 : !torch.int } %7 = torch.aten.gt.int %arg4, %int0 : !torch.int, !torch.int -> !torch.bool @@ -2525,7 +2525,7 @@ module { torch.prim.RaiseException %str, %none : !torch.str, !torch.none torch.prim.If.yield } - %8 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_int() : () -> !torch.int + %8 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_int() : () -> !torch.int %9 = torch.aten.eq.int %4, %8 : !torch.int, !torch.int -> !torch.bool %10 = torch.prim.If %9 -> (!torch.int) { torch.prim.If.yield %int0 : !torch.int @@ -2577,22 +2577,22 @@ module { torch.prim.If.yield %27 : !torch.int } %19 = torch.aten.sub.int %18, %16 : !torch.int, !torch.int -> !torch.int - %20 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list + %20 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%arg0) : (!torch.list) -> !torch.list %21 = torch.aten.add.int %19, %arg4 : !torch.int, !torch.int -> !torch.int %22 = torch.aten.sub.int %21, %int1 : !torch.int, !torch.int -> !torch.int %23 = torch.aten.floordiv.int %22, %arg4 : !torch.int, !torch.int -> !torch.int %24 = torch.aten._set_item.t %20, %2, %23 : !torch.list, !torch.int, !torch.int -> !torch.list return %20 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_int() -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.max_int() -> !torch.int { %int9223372036854775807 = torch.constant.int 9223372036854775807 return %int9223372036854775807 : !torch.int } - func @"__torch_mlir_shape_fn.aten.select.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.select(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.select.int"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.select(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.int) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.select(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.select(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.int) -> !torch.list { %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " %none = torch.constant.none @@ -2605,7 +2605,7 @@ module { torch.prim.RaiseException %str, %none : !torch.str, !torch.none torch.prim.If.yield } - %2 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %2 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg1, %0, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %3 = torch.aten.__getitem__.t %arg0, %2 : !torch.list, !torch.int -> !torch.int %4 = torch.aten.neg.int %3 : !torch.int -> !torch.int %5 = torch.aten.lt.int %arg2, %4 : !torch.int, !torch.int -> !torch.bool @@ -2637,19 +2637,19 @@ module { } : (!torch.int, !torch.bool) -> () return %8 : !torch.list } - func @"__torch_mlir_shape_fn.aten.index_select"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.index_select(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.index_select"(%arg0: !torch.list, %arg1: !torch.int, %arg2: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.index_select(%arg0, %arg1, %arg2) : (!torch.list, !torch.int, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.index_put"(%arg0: !torch.list, %arg1: !torch.list>>, %arg2: !torch.list, %arg3: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.index_put"(%arg0: !torch.list, %arg1: !torch.list>>, %arg2: !torch.list, %arg3: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.index_put.hacked_twin"(%arg0: !torch.list, %arg1: !torch.list>, %arg2: !torch.list, %arg3: !torch.bool) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.index_put.hacked_twin"(%arg0: !torch.list, %arg1: !torch.list>, %arg2: !torch.list, %arg3: !torch.bool) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg0) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.nll_loss_forward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.int, %arg4: !torch.int) -> !torch.tuple, list> { + func.func @"__torch_mlir_shape_fn.aten.nll_loss_forward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.int, %arg4: !torch.int) -> !torch.tuple, list> { %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 %int0 = torch.constant.int 0 @@ -2743,11 +2743,11 @@ module { } return %14 : !torch.tuple, list> } - func @"__torch_mlir_shape_fn.aten.nll_loss_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.optional>, %arg4: !torch.int, %arg5: !torch.int, %arg6: !torch.list) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg1) : (!torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.nll_loss_backward"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.optional>, %arg4: !torch.int, %arg5: !torch.int, %arg6: !torch.list) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.unary(%arg1) : (!torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.native_layer_norm"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.float) -> !torch.tuple, list, list> { + func.func @"__torch_mlir_shape_fn.aten.native_layer_norm"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.float) -> !torch.tuple, list, list> { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " @@ -2780,7 +2780,7 @@ module { %7 = torch.prim.TupleConstruct %arg0, %0, %0 : !torch.list, !torch.list, !torch.list -> !torch.tuple, list, list> return %7 : !torch.tuple, list, list> } - func @"__torch_mlir_shape_fn.aten.native_batch_norm"(%arg0: !torch.list, %arg1: !torch.optional>, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.optional>, %arg5: !torch.bool, %arg6: !torch.float, %arg7: !torch.float) -> !torch.tuple, list, list> { + func.func @"__torch_mlir_shape_fn.aten.native_batch_norm"(%arg0: !torch.list, %arg1: !torch.optional>, %arg2: !torch.optional>, %arg3: !torch.optional>, %arg4: !torch.optional>, %arg5: !torch.bool, %arg6: !torch.float, %arg7: !torch.float) -> !torch.tuple, list, list> { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %0 = torch.prim.If %arg5 -> (!torch.tuple, list, list>) { @@ -2798,11 +2798,11 @@ module { } return %0 : !torch.tuple, list, list> } - func @"__torch_mlir_shape_fn.aten.constant_pad_nd"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pad(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.constant_pad_nd"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pad(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pad(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pad(%arg0: !torch.list, %arg1: !torch.list) -> !torch.list { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %int2 = torch.constant.int 2 @@ -2848,11 +2848,11 @@ module { } : (!torch.int, !torch.bool) -> () return %arg0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.pad"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.str, %arg3: !torch.optional) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pad(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.pad"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.str, %arg3: !torch.optional) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.pad(%arg0, %arg1) : (!torch.list, !torch.list) -> !torch.list return %0 : !torch.list } - func @"__torch_mlir_shape_fn.aten.index.Tensor"(%arg0: !torch.list, %arg1: !torch.list>>) -> !torch.list { + func.func @"__torch_mlir_shape_fn.aten.index.Tensor"(%arg0: !torch.list, %arg1: !torch.list>>) -> !torch.list { %str = torch.constant.str "AssertionError: More indices than dimensions to index" %none = torch.constant.none %true = torch.constant.bool true @@ -2873,7 +2873,7 @@ module { %7 = torch.aten.__isnot__ %6, %none : !torch.optional>, !torch.none -> !torch.bool %8 = torch.prim.If %7 -> (!torch.list) { %9 = torch.prim.unchecked_cast %6 : !torch.optional> -> !torch.list - %10 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg3, %9) : (!torch.list, !torch.list) -> !torch.list + %10 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.broadcast(%arg3, %9) : (!torch.list, !torch.list) -> !torch.list torch.prim.If.yield %10 : !torch.list } else { torch.prim.If.yield %arg3 : !torch.list @@ -2882,17 +2882,17 @@ module { } : (!torch.int, !torch.bool, !torch.list) -> !torch.list return %5 : !torch.list } - func @"__torch_mlir_shape_fn.aten.cat"(%arg0: !torch.list>, %arg1: !torch.int) -> !torch.list { - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.cat(%arg0, %arg1) : (!torch.list>, !torch.int) -> !torch.list + func.func @"__torch_mlir_shape_fn.aten.cat"(%arg0: !torch.list>, %arg1: !torch.int) -> !torch.list { + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.cat(%arg0, %arg1) : (!torch.list>, !torch.int) -> !torch.list return %0 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.cat(%arg0: !torch.list>, %arg1: !torch.int) -> !torch.list { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.cat(%arg0: !torch.list>, %arg1: !torch.int) -> !torch.list { %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: " %none = torch.constant.none %true = torch.constant.bool true - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_no_zero_dim(%arg0) : (!torch.list>) -> !torch.none - %1 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.legacy_cat_wrap_dim(%arg1, %arg0) : (!torch.int, !torch.list>) -> !torch.int + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_no_zero_dim(%arg0) : (!torch.list>) -> !torch.none + %1 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.legacy_cat_wrap_dim(%arg1, %arg0) : (!torch.int, !torch.list>) -> !torch.int %2 = torch.aten.len.t %arg0 : !torch.list> -> !torch.int %3 = torch.aten.gt.int %2, %int0 : !torch.int, !torch.int -> !torch.bool torch.prim.If %3 -> () { @@ -2906,7 +2906,7 @@ module { %6 = torch.prim.Loop %4, %true, init(%5) { ^bb0(%arg2: !torch.int, %arg3: !torch.optional>): %9 = torch.aten.__getitem__.t %arg0, %arg2 : !torch.list>, !torch.int -> !torch.list - %10 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.should_skip(%9) : (!torch.list) -> !torch.bool + %10 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.should_skip(%9) : (!torch.list) -> !torch.bool %11 = torch.aten.__not__ %10 : !torch.bool -> !torch.bool %12 = torch.prim.If %11 -> (!torch.optional>) { %13 = torch.derefine %9 : !torch.list to !torch.optional> @@ -2926,10 +2926,10 @@ module { %11 = torch.prim.Loop %10, %true, init(%int0) { ^bb0(%arg2: !torch.int, %arg3: !torch.int): %14 = torch.aten.__getitem__.t %arg0, %arg2 : !torch.list>, !torch.int -> !torch.list - %15 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.should_skip(%14) : (!torch.list) -> !torch.bool + %15 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.should_skip(%14) : (!torch.list) -> !torch.bool %16 = torch.aten.__not__ %15 : !torch.bool -> !torch.bool %17 = torch.prim.If %16 -> (!torch.int) { - %18 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_shape_except_dim(%9, %14, %1, %arg2) : (!torch.list, !torch.list, !torch.int, !torch.int) -> !torch.none + %18 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_shape_except_dim(%9, %14, %1, %arg2) : (!torch.list, !torch.list, !torch.int, !torch.int) -> !torch.none %19 = torch.aten.__getitem__.t %14, %1 : !torch.list, !torch.int -> !torch.int %20 = torch.aten.add.int %arg3, %19 : !torch.int, !torch.int -> !torch.int torch.prim.If.yield %20 : !torch.int @@ -2938,13 +2938,13 @@ module { } torch.prim.Loop.condition %true, iter(%17 : !torch.int) } : (!torch.int, !torch.bool, !torch.int) -> !torch.int - %12 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%9) : (!torch.list) -> !torch.list + %12 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers._copy(%9) : (!torch.list) -> !torch.list %13 = torch.aten._set_item.t %12, %1, %11 : !torch.list, !torch.int, !torch.int -> !torch.list torch.prim.If.yield %12 : !torch.list } return %8 : !torch.list } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_no_zero_dim(%arg0: !torch.list>) -> !torch.none { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_no_zero_dim(%arg0: !torch.list>) -> !torch.none { %int0 = torch.constant.int 0 %true = torch.constant.bool true %str = torch.constant.str "AssertionError: " @@ -2965,7 +2965,7 @@ module { } : (!torch.int, !torch.bool) -> () return %none : !torch.none } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.legacy_cat_wrap_dim(%arg0: !torch.int, %arg1: !torch.list>) -> !torch.int { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.legacy_cat_wrap_dim(%arg0: !torch.int, %arg1: !torch.list>) -> !torch.int { %int0 = torch.constant.int 0 %none = torch.constant.none %true = torch.constant.bool true @@ -2992,7 +2992,7 @@ module { } %10 = torch.prim.If %9 -> (!torch.optional) { %11 = torch.aten.len.t %5 : !torch.list -> !torch.int - %12 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg0, %11, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int + %12 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.maybe_wrap_dim(%arg0, %11, %true) : (!torch.int, !torch.int, !torch.bool) -> !torch.int %13 = torch.derefine %12 : !torch.int to !torch.optional torch.prim.If.yield %13 : !torch.optional } else { @@ -3009,11 +3009,11 @@ module { } return %4 : !torch.int } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.should_skip(%arg0: !torch.list) -> !torch.bool { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.should_skip(%arg0: !torch.list) -> !torch.bool { %int1 = torch.constant.int 1 %int0 = torch.constant.int 0 %false = torch.constant.bool false - %0 = call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.numel(%arg0) : (!torch.list) -> !torch.int + %0 = func.call @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.numel(%arg0) : (!torch.list) -> !torch.int %1 = torch.aten.eq.int %0, %int0 : !torch.int, !torch.int -> !torch.bool %2 = torch.prim.If %1 -> (!torch.bool) { %3 = torch.aten.len.t %arg0 : !torch.list -> !torch.int @@ -3024,7 +3024,7 @@ module { } return %2 : !torch.bool } - func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_shape_except_dim(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.int) -> !torch.none { + func.func @__torch__.torch_mlir.dialects.torch.importer.jit_ir.build_tools.upstream_shape_helpers.check_cat_shape_except_dim(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.int, %arg3: !torch.int) -> !torch.none { %int0 = torch.constant.int 0 %str = torch.constant.str "AssertionError: Tensors must have same number of dimensions" %none = torch.constant.none @@ -3063,18 +3063,18 @@ module { } : (!torch.int, !torch.bool) -> () return %none : !torch.none } - func @"__torch_mlir_shape_fn.aten.bincount"(%arg0: !torch.list, %arg1: !torch.optional>, %arg2: !torch.int) -> !torch.list { - %0 = call @__torch__.hacky_get_unknown_dimension_size() : () -> !torch.int + func.func @"__torch_mlir_shape_fn.aten.bincount"(%arg0: !torch.list, %arg1: !torch.optional>, %arg2: !torch.int) -> !torch.list { + %0 = func.call @__torch__.hacky_get_unknown_dimension_size() : () -> !torch.int %1 = torch.prim.ListConstruct %0 : (!torch.int) -> !torch.list return %1 : !torch.list } - func @__torch__.hacky_get_unknown_dimension_size() -> !torch.int { + func.func @__torch__.hacky_get_unknown_dimension_size() -> !torch.int { %0 = torch.prim.CreateObject !torch.nn.Module<"__torch__.DummyClassType"> %1 = torch.prim.CallMethod %0["__init__"] () : !torch.nn.Module<"__torch__.DummyClassType">, () -> !torch.none %2 = torch.operator "prim.id"(%0) : (!torch.nn.Module<"__torch__.DummyClassType">) -> !torch.int return %2 : !torch.int } - func @__torch__.DummyClassType.__init__(%arg0: !torch.nn.Module<"__torch__.DummyClassType">) -> !torch.none { + func.func @__torch__.DummyClassType.__init__(%arg0: !torch.nn.Module<"__torch__.DummyClassType">) -> !torch.none { %none = torch.constant.none return %none : !torch.none } diff --git a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/module_builder.cpp b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/module_builder.cpp index cf75f7c0f..70ac687b9 100644 --- a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/module_builder.cpp +++ b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/module_builder.cpp @@ -116,7 +116,6 @@ ModuleBuilder::ModuleBuilder(pybind11::object contextObj) unknownLoc(mlirLocationUnknownGet(context)) { // TODO: Rework this once dialect registration C-APIs are in place. // https://reviews.llvm.org/D88162 - mlirRegisterAllDialects(context); torchMlirRegisterAllDialects(context); registerPythonSysStderrDiagnosticHandler(context); diff --git a/test/Conversion/TorchToLinalg/basic.mlir b/test/Conversion/TorchToLinalg/basic.mlir index 95f1a8bab..1e0f66582 100644 --- a/test/Conversion/TorchToLinalg/basic.mlir +++ b/test/Conversion/TorchToLinalg/basic.mlir @@ -1,6 +1,6 @@ // RUN: torch-mlir-opt <%s -convert-torch-to-linalg -split-input-file -verify-diagnostics | FileCheck %s -// CHECK-LABEL: func @torch.aten.mm$basic( +// CHECK-LABEL: func.func @torch.aten.mm$basic( // CHECK-SAME: %[[LHS_VTENSOR:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[RHS_VTENSOR:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,2],f32> { // CHECK: %[[LHS:.*]] = torch_c.to_builtin_tensor %[[LHS_VTENSOR]] : !torch.vtensor<[?,?],f32> -> tensor @@ -22,7 +22,7 @@ // CHECK: %[[CASTED:.*]] = tensor.cast %[[MATMUL]] : tensor to tensor // CHECK: %[[RESULT_VTENSOR:.*]] = torch_c.from_builtin_tensor %[[CASTED]] : tensor -> !torch.vtensor<[?,2],f32> // CHECK: return %[[RESULT_VTENSOR]] : !torch.vtensor<[?,2],f32> -func @torch.aten.mm$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,2],f32> { +func.func @torch.aten.mm$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,2],f32> { %0 = torch.aten.mm %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,2],f32> return %0 : !torch.vtensor<[?,2],f32> } @@ -30,7 +30,7 @@ func @torch.aten.mm$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtenso // ----- // If the operands are missing dtype, we cannot lower it. -func @torch.aten.mm$no_convert$missing_dtype(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { +func.func @torch.aten.mm$no_convert$missing_dtype(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { // expected-error@+1 {{failed to legalize}} %0 = torch.aten.mm %arg0, %arg1 : !torch.vtensor, !torch.vtensor -> !torch.vtensor return %0 : !torch.vtensor @@ -40,7 +40,7 @@ func @torch.aten.mm$no_convert$missing_dtype(%arg0: !torch.vtensor, %arg1: !torc // Correctly handle the case that operands are statically the wrong rank // (rank 1 vs rank 2 expected for matmul.) -func @torch.aten.mm$no_convert$wrong_rank(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.mm$no_convert$wrong_rank(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,?],f32> { // expected-error@+1 {{failed to legalize}} %0 = torch.aten.mm %arg0, %arg1 : !torch.vtensor<[?],f32>, !torch.vtensor<[?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> @@ -49,7 +49,7 @@ func @torch.aten.mm$no_convert$wrong_rank(%arg0: !torch.vtensor<[?],f32>, %arg1: // ----- // If the result is missing dtype, we cannot lower it. -func @torch.aten.mm$no_convert$result_missing_dtype(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor { +func.func @torch.aten.mm$no_convert$result_missing_dtype(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor { // expected-error@+1 {{failed to legalize}} %0 = torch.aten.mm %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor return %0 : !torch.vtensor @@ -57,20 +57,20 @@ func @torch.aten.mm$no_convert$result_missing_dtype(%arg0: !torch.vtensor<[?,?], // ----- -// CHECK-LABEL: func @torch.aten.Int.Tensor$zero_rank +// CHECK-LABEL: func.func @torch.aten.Int.Tensor$zero_rank // CHECK-SAME: (%[[ARG:.*]]: !torch.vtensor<[],si64>) -> !torch.int { // CHECK: %[[I:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[],si64> -> tensor // CHECK: %[[EXT:.*]] = tensor.extract %[[I]][] : tensor // CHECK: %[[RET:.*]] = torch_c.from_i64 %[[EXT]] // CHECK: return %[[RET]] : !torch.int -func @torch.aten.Int.Tensor$zero_rank(%arg0: !torch.vtensor<[],si64>) -> !torch.int { +func.func @torch.aten.Int.Tensor$zero_rank(%arg0: !torch.vtensor<[],si64>) -> !torch.int { %0 = torch.aten.Int.Tensor %arg0 : !torch.vtensor<[],si64> -> !torch.int return %0 : !torch.int } // ----- -// CHECK-LABEL: func @torch.aten.Int.Tensor$non_zero_rank +// CHECK-LABEL: func.func @torch.aten.Int.Tensor$non_zero_rank // CHECK-SAME: (%[[ARG:.*]]: !torch.vtensor<[?,?],si64>) -> !torch.int { // CHECK: %[[I:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],si64> -> tensor // CHECK: %[[C0:.*]] = arith.constant 0 : index @@ -88,27 +88,27 @@ func @torch.aten.Int.Tensor$zero_rank(%arg0: !torch.vtensor<[],si64>) -> !torch. // CHECK: %[[EXT:.*]] = tensor.extract %[[I]][%[[ZERO]], %[[ZERO]]] : tensor // CHECK: %[[RET:.*]] = torch_c.from_i64 %[[EXT]] // CHECK: return %[[RET]] : !torch.int -func @torch.aten.Int.Tensor$non_zero_rank(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.int { +func.func @torch.aten.Int.Tensor$non_zero_rank(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.int { %0 = torch.aten.Int.Tensor %arg0 : !torch.vtensor<[?,?],si64> -> !torch.int return %0 : !torch.int } // ----- -// CHECK-LABEL: func @torch.aten.Float.Tensor$zero_rank +// CHECK-LABEL: func.func @torch.aten.Float.Tensor$zero_rank // CHECK-SAME: (%[[ARG:.*]]: !torch.vtensor<[],f64>) -> !torch.float { // CHECK: %[[F:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[],f64> -> tensor // CHECK: %[[EXT:.*]] = tensor.extract %[[F]][] : tensor // CHECK: %[[RET:.*]] = torch_c.from_f64 %[[EXT]] // CHECK: return %[[RET]] : !torch.float -func @torch.aten.Float.Tensor$zero_rank(%arg0: !torch.vtensor<[],f64>) -> !torch.float { +func.func @torch.aten.Float.Tensor$zero_rank(%arg0: !torch.vtensor<[],f64>) -> !torch.float { %0 = torch.aten.Float.Tensor %arg0 : !torch.vtensor<[],f64> -> !torch.float return %0 : !torch.float } // ----- -// CHECK-LABEL: func @torch.aten.Float.Tensor$non_zero_rank +// CHECK-LABEL: func.func @torch.aten.Float.Tensor$non_zero_rank // CHECK-SAME: (%[[ARG:.*]]: !torch.vtensor<[?,?],f64>) -> !torch.float { // CHECK: %[[F:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f64> -> tensor // CHECK: %[[C0:.*]] = arith.constant 0 : index @@ -126,27 +126,27 @@ func @torch.aten.Float.Tensor$zero_rank(%arg0: !torch.vtensor<[],f64>) -> !torch // CHECK: %[[EXT:.*]] = tensor.extract %[[F]][%[[ZERO]], %[[ZERO]]] : tensor // CHECK: %[[RET:.*]] = torch_c.from_f64 %[[EXT]] // CHECK: return %[[RET]] : !torch.float -func @torch.aten.Float.Tensor$non_zero_rank(%arg0: !torch.vtensor<[?,?],f64>) -> !torch.float { +func.func @torch.aten.Float.Tensor$non_zero_rank(%arg0: !torch.vtensor<[?,?],f64>) -> !torch.float { %0 = torch.aten.Float.Tensor %arg0 : !torch.vtensor<[?,?],f64> -> !torch.float return %0 : !torch.float } // ----- -// CHECK-LABEL: func @torch.aten.Bool.Tensor$zero_rank +// CHECK-LABEL: func.func @torch.aten.Bool.Tensor$zero_rank // CHECK-SAME: (%[[ARG:.*]]: !torch.vtensor<[],i1>) -> !torch.bool { // CHECK: %[[B:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[],i1> -> tensor // CHECK: %[[EXT:.*]] = tensor.extract %[[B]][] : tensor // CHECK: %[[RES:.*]] = torch_c.from_i1 %[[EXT]] // CHECK: return %[[RES]] : !torch.bool -func @torch.aten.Bool.Tensor$zero_rank(%arg0: !torch.vtensor<[],i1>) -> !torch.bool { +func.func @torch.aten.Bool.Tensor$zero_rank(%arg0: !torch.vtensor<[],i1>) -> !torch.bool { %0 = torch.aten.Bool.Tensor %arg0 : !torch.vtensor<[],i1> -> !torch.bool return %0 : !torch.bool } // ----- -// CHECK-LABEL: func @torch.aten.Bool.Tensor$non_zero_rank +// CHECK-LABEL: func.func @torch.aten.Bool.Tensor$non_zero_rank // CHECK-SAME: (%[[ARG:.*]]: !torch.vtensor<[?,?],i1>) -> !torch.bool { // CHECK: %[[B:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],i1> -> tensor // CHECK: %[[C0:.*]] = arith.constant 0 : index @@ -164,59 +164,59 @@ func @torch.aten.Bool.Tensor$zero_rank(%arg0: !torch.vtensor<[],i1>) -> !torch.b // CHECK: %[[EXT:.*]] = tensor.extract %[[I]][%[[ZERO]], %[[ZERO]]] : tensor // CHECK: %[[RET:.*]] = torch_c.from_i1 %[[EXT]] // CHECK: return %[[RET]] : !torch.bool -func @torch.aten.Bool.Tensor$non_zero_rank(%arg0: !torch.vtensor<[?,?],i1>) -> !torch.bool { +func.func @torch.aten.Bool.Tensor$non_zero_rank(%arg0: !torch.vtensor<[?,?],i1>) -> !torch.bool { %0 = torch.aten.Bool.Tensor %arg0 : !torch.vtensor<[?,?],i1> -> !torch.bool return %0 : !torch.bool } // ----- -// CHECK: func @torch.prim.NumToTensor.Scalar$basic(%[[IN:.*]]: !torch.int) -> !torch.vtensor<[],si64> { +// CHECK: func.func @torch.prim.NumToTensor.Scalar$basic(%[[IN:.*]]: !torch.int) -> !torch.vtensor<[],si64> { // CHECK: %[[INI64:.*]] = torch_c.to_i64 %[[IN]] // CHECK: %[[NEWVEC:.*]] = linalg.init_tensor [] : tensor // CHECK: %[[FILLVEC:.*]] = linalg.fill ins(%[[INI64]] : i64) outs(%[[NEWVEC]] : tensor) -> tensor // CHECK: %[[OUTVEC:.*]] = torch_c.from_builtin_tensor %[[FILLVEC]] : tensor -> !torch.vtensor<[],si64> // CHECK: return %[[OUTVEC]] : !torch.vtensor<[],si64> -func @torch.prim.NumToTensor.Scalar$basic(%arg0: !torch.int) -> !torch.vtensor<[],si64> { +func.func @torch.prim.NumToTensor.Scalar$basic(%arg0: !torch.int) -> !torch.vtensor<[],si64> { %0 = torch.prim.NumToTensor.Scalar %arg0 : !torch.int -> !torch.vtensor<[],si64> return %0 : !torch.vtensor<[],si64> } // ----- -// CHECK-LABEL: func @torch.tensor_static_info_cast$basic( +// CHECK-LABEL: func.func @torch.tensor_static_info_cast$basic( // CHECK-SAME: %[[VALUE_T:.*]]: !torch.vtensor<[?],f32>) -> !torch.vtensor<[4],f32> { // CHECK: %[[T:.*]] = torch_c.to_builtin_tensor %[[VALUE_T]] : !torch.vtensor<[?],f32> -> tensor // CHECK: %[[T_CAST:.*]] = tensor.cast %[[T]] : tensor to tensor<4xf32> // CHECK: %[[VALUE_T_CAST:.*]] = torch_c.from_builtin_tensor %[[T_CAST]] : tensor<4xf32> -> !torch.vtensor<[4],f32> // CHECK: return %[[VALUE_T_CAST]] : !torch.vtensor<[4],f32> -func @torch.tensor_static_info_cast$basic(%t: !torch.vtensor<[?],f32>) -> !torch.vtensor<[4],f32> { +func.func @torch.tensor_static_info_cast$basic(%t: !torch.vtensor<[?],f32>) -> !torch.vtensor<[4],f32> { %t_cast = torch.tensor_static_info_cast %t : !torch.vtensor<[?],f32> to !torch.vtensor<[4],f32> return %t_cast : !torch.vtensor<[4],f32> } // ----- -// CHECK-LABEL: func @torch.aten.neg +// CHECK-LABEL: func.func @torch.aten.neg // CHECK: linalg.generic {{.*}} { // CHECK-NEXT: ^bb0(%[[LHS:.*]]: f32, %{{.*}}: f32): // CHECK-NEXT: %[[NEG:.*]] = arith.negf %[[LHS]] : f32 // CHECK-NEXT: linalg.yield %[[NEG]] : f32 // CHECK-NEXT: } -> tensor -func @torch.aten.neg(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.neg(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.neg %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.neg.bf16 +// CHECK-LABEL: func.func @torch.aten.neg.bf16 // CHECK: linalg.generic {{.*}} { // CHECK-NEXT: ^bb0(%[[LHS:.*]]: bf16, %{{.*}}: bf16): // CHECK-NEXT: %[[NEG:.*]] = arith.negf %[[LHS]] : bf16 // CHECK-NEXT: linalg.yield %[[NEG]] : bf16 // CHECK-NEXT: } -> tensor -func @torch.aten.neg.bf16(%arg0: !torch.vtensor<[?,?],bf16>) -> !torch.vtensor<[?,?],bf16> { +func.func @torch.aten.neg.bf16(%arg0: !torch.vtensor<[?,?],bf16>) -> !torch.vtensor<[?,?],bf16> { %0 = torch.aten.neg %arg0 : !torch.vtensor<[?,?],bf16> -> !torch.vtensor<[?,?],bf16> return %0 : !torch.vtensor<[?,?],bf16> } diff --git a/test/Conversion/TorchToLinalg/elementwise.mlir b/test/Conversion/TorchToLinalg/elementwise.mlir index 873bfb203..d8af82fd1 100644 --- a/test/Conversion/TorchToLinalg/elementwise.mlir +++ b/test/Conversion/TorchToLinalg/elementwise.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-opt <%s -convert-torch-to-linalg -split-input-file -mlir-print-local-scope -verify-diagnostics | FileCheck %s -// CHECK-LABEL: func @elementwise$unary( +// CHECK-LABEL: func.func @elementwise$unary( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[],f32> -> tensor // CHECK: %[[INIT_TENSOR:.*]] = linalg.init_tensor [] : tensor @@ -14,12 +14,12 @@ // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[CASTED]] : tensor -> !torch.vtensor<[],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[],f32> // CHECK: } -func @elementwise$unary(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { +func.func @elementwise$unary(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { %0 = torch.aten.tanh %arg0 : !torch.vtensor<[],f32> -> !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } -// CHECK-LABEL: func @elementwise$binary( +// CHECK-LABEL: func.func @elementwise$binary( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[BUILTIN_ARG0:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -41,23 +41,23 @@ func @elementwise$unary(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> // CHECK: %[[CASTED:.*]] = tensor.cast %[[GENERIC:.*]] : tensor to tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[CASTED]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @elementwise$binary(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @elementwise$binary(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.mul.Tensor %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } -// CHECK-LABEL: func @elementwise$ternary( +// CHECK-LABEL: func.func @elementwise$ternary( // CHECK: linalg.generic {indexing_maps = [ // CHECK-SAME: affine_map<(d0, d1, d2) -> (d0, d1, d2)>, // CHECK-SAME: affine_map<(d0, d1, d2) -> (d1, d2)>, // CHECK-SAME: affine_map<(d0, d1, d2) -> (d2)>, // CHECK-SAME: affine_map<(d0, d1, d2) -> (d0, d1, d2)>] -func @elementwise$ternary(%arg0: !torch.vtensor<[?,?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>, %arg2: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,?,?],f32> { +func.func @elementwise$ternary(%arg0: !torch.vtensor<[?,?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>, %arg2: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,?,?],f32> { %0 = torch.aten.lerp.Tensor %arg0, %arg1, %arg2 : !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?],f32>, !torch.vtensor<[?],f32> -> !torch.vtensor<[?,?,?],f32> return %0 : !torch.vtensor<[?,?,?],f32> } -// CHECK-LABEL: func @elementwise$with_scalar_capture( +// CHECK-LABEL: func.func @elementwise$with_scalar_capture( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],f32> { // CHECK: %[[C1:.*]] = torch.constant.int 1 @@ -69,18 +69,18 @@ func @elementwise$ternary(%arg0: !torch.vtensor<[?,?,?],f32>, %arg1: !torch.vten // CHECK: %[[RES:.*]] = arith.addf %[[LHS]], %[[SCALED]] : f32 // CHECK: linalg.yield %[[RES]] : f32 // CHECK: } -> tensor -func @elementwise$with_scalar_capture(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],f32> { +func.func @elementwise$with_scalar_capture(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],f32> { %int1 = torch.constant.int 1 %0 = torch.aten.add.Tensor %arg0, %arg1, %int1 : !torch.vtensor<[?],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[?],f32> return %0 : !torch.vtensor<[?],f32> } -// CHECK-LABEL: func @elementwise$static_1( +// CHECK-LABEL: func.func @elementwise$static_1( // CHECK: linalg.generic {indexing_maps = [ // CHECK-SAME: affine_map<(d0) -> (d0)>, // CHECK-SAME: affine_map<(d0) -> (0)>, // CHECK-SAME: affine_map<(d0) -> (d0)>] -func @elementwise$static_1(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[1],f32>) -> !torch.vtensor<[?],f32> { +func.func @elementwise$static_1(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[1],f32>) -> !torch.vtensor<[?],f32> { %1 = torch.aten.mul.Tensor %arg0, %arg1 : !torch.vtensor<[?],f32>, !torch.vtensor<[1],f32> -> !torch.vtensor<[?],f32> return %1 : !torch.vtensor<[?],f32> } diff --git a/test/Conversion/TorchToLinalg/flatten.mlir b/test/Conversion/TorchToLinalg/flatten.mlir index 196d1bd73..a2648e2b1 100644 --- a/test/Conversion/TorchToLinalg/flatten.mlir +++ b/test/Conversion/TorchToLinalg/flatten.mlir @@ -2,7 +2,7 @@ // ----- -// CHECK-LABEL: func @torch.aten.flatten.using_ints$basic( +// CHECK-LABEL: func.func @torch.aten.flatten.using_ints$basic( // CHECK-SAME: %[[TENSOR:.*]]: !torch.vtensor<[3,3,2,2,3,3,5],f32>) -> !torch.vtensor<[3,3,?,3,5],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[TENSOR]] : !torch.vtensor<[3,3,2,2,3,3,5],f32> -> tensor<3x3x2x2x3x3x5xf32> // CHECK: %[[COLLAPSED:.*]] = tensor.collapse_shape %[[BUILTIN_TENSOR]] {{\[\[}}0], [1], [2, 3, 4], [5], [6]] : tensor<3x3x2x2x3x3x5xf32> into tensor<3x3x12x3x5xf32> @@ -10,7 +10,7 @@ // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[DYNAMIC]] : tensor<3x3x?x3x5xf32> -> !torch.vtensor<[3,3,?,3,5],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[3,3,?,3,5],f32> -func @torch.aten.flatten.using_ints$basic(%arg0: !torch.vtensor<[3,3,2,2,3,3,5],f32>) -> !torch.vtensor<[3,3,?,3,5],f32> { +func.func @torch.aten.flatten.using_ints$basic(%arg0: !torch.vtensor<[3,3,2,2,3,3,5],f32>) -> !torch.vtensor<[3,3,?,3,5],f32> { %int2 = torch.constant.int 2 %int4 = torch.constant.int 4 %0 = torch.aten.flatten.using_ints %arg0, %int2, %int4 : !torch.vtensor<[3,3,2,2,3,3,5],f32>, !torch.int, !torch.int -> !torch.vtensor<[3,3,?,3,5],f32> @@ -19,7 +19,7 @@ func @torch.aten.flatten.using_ints$basic(%arg0: !torch.vtensor<[3,3,2,2,3,3,5], // ----- -// CHECK-LABEL: func @torch.aten.flatten.using_ints$basic_negative( +// CHECK-LABEL: func.func @torch.aten.flatten.using_ints$basic_negative( // CHECK-SAME: %[[TENSOR:.*]]: !torch.vtensor<[3,3,2,2,3,3,5],f32>) -> !torch.vtensor<[3,3,?,3,5],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[TENSOR]] : !torch.vtensor<[3,3,2,2,3,3,5],f32> -> tensor<3x3x2x2x3x3x5xf32> // CHECK: %[[COLLAPSED:.*]] = tensor.collapse_shape %[[BUILTIN_TENSOR]] {{\[\[}}0], [1], [2, 3, 4], [5], [6]] : tensor<3x3x2x2x3x3x5xf32> into tensor<3x3x12x3x5xf32> @@ -27,7 +27,7 @@ func @torch.aten.flatten.using_ints$basic(%arg0: !torch.vtensor<[3,3,2,2,3,3,5], // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[DYNAMIC]] : tensor<3x3x?x3x5xf32> -> !torch.vtensor<[3,3,?,3,5],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[3,3,?,3,5],f32> -func @torch.aten.flatten.using_ints$basic_negative(%arg0: !torch.vtensor<[3,3,2,2,3,3,5],f32>) -> !torch.vtensor<[3,3,?,3,5],f32> { +func.func @torch.aten.flatten.using_ints$basic_negative(%arg0: !torch.vtensor<[3,3,2,2,3,3,5],f32>) -> !torch.vtensor<[3,3,?,3,5],f32> { %int-5 = torch.constant.int -5 %int-3 = torch.constant.int -3 %0 = torch.aten.flatten.using_ints %arg0, %int-5, %int-3 : !torch.vtensor<[3,3,2,2,3,3,5],f32>, !torch.int, !torch.int -> !torch.vtensor<[3,3,?,3,5],f32> @@ -36,7 +36,7 @@ func @torch.aten.flatten.using_ints$basic_negative(%arg0: !torch.vtensor<[3,3,2, // ----- -// CHECK-LABEL: func @torch.aten.flatten.using_ints$flatten_front( +// CHECK-LABEL: func.func @torch.aten.flatten.using_ints$flatten_front( // CHECK-SAME: %[[TENSOR:.*]]: !torch.vtensor<[3,3,2,2],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[TENSOR]] : !torch.vtensor<[3,3,2,2],f32> -> tensor<3x3x2x2xf32> // CHECK: %[[COLLAPSED:.*]] = tensor.collapse_shape %[[BUILTIN_TENSOR]] {{\[\[}}0, 1, 2], [3]] : tensor<3x3x2x2xf32> into tensor<18x2xf32> @@ -44,7 +44,7 @@ func @torch.aten.flatten.using_ints$basic_negative(%arg0: !torch.vtensor<[3,3,2, // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[DYNAMIC]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.flatten.using_ints$flatten_front(%arg0: !torch.vtensor<[3,3,2,2],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.flatten.using_ints$flatten_front(%arg0: !torch.vtensor<[3,3,2,2],f32>) -> !torch.vtensor<[?,?],f32> { %int0 = torch.constant.int 0 %int2 = torch.constant.int 2 %0 = torch.aten.flatten.using_ints %arg0, %int0, %int2 : !torch.vtensor<[3,3,2,2],f32>, !torch.int, !torch.int -> !torch.vtensor<[?,?],f32> @@ -53,7 +53,7 @@ func @torch.aten.flatten.using_ints$flatten_front(%arg0: !torch.vtensor<[3,3,2,2 // ----- -// CHECK-LABEL: func @torch.aten.flatten.using_ints$flatten_back( +// CHECK-LABEL: func.func @torch.aten.flatten.using_ints$flatten_back( // CHECK-SAME: %[[TENSOR:.*]]: !torch.vtensor<[3,3,2,2],f32>) -> !torch.vtensor<[?,12],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[TENSOR]] : !torch.vtensor<[3,3,2,2],f32> -> tensor<3x3x2x2xf32> // CHECK: %[[COLLAPSED:.*]] = tensor.collapse_shape %[[BUILTIN_TENSOR]] {{\[\[}}0], [1, 2, 3]] : tensor<3x3x2x2xf32> into tensor<3x12xf32> @@ -61,7 +61,7 @@ func @torch.aten.flatten.using_ints$flatten_front(%arg0: !torch.vtensor<[3,3,2,2 // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[DYNAMIC]] : tensor -> !torch.vtensor<[?,12],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,12],f32> -func @torch.aten.flatten.using_ints$flatten_back(%arg0: !torch.vtensor<[3,3,2,2],f32>) -> !torch.vtensor<[?,12],f32> { +func.func @torch.aten.flatten.using_ints$flatten_back(%arg0: !torch.vtensor<[3,3,2,2],f32>) -> !torch.vtensor<[?,12],f32> { %int1 = torch.constant.int 1 %int-1 = torch.constant.int -1 %0 = torch.aten.flatten.using_ints %arg0, %int1, %int-1 : !torch.vtensor<[3,3,2,2],f32>, !torch.int, !torch.int -> !torch.vtensor<[?,12],f32> @@ -70,14 +70,14 @@ func @torch.aten.flatten.using_ints$flatten_back(%arg0: !torch.vtensor<[3,3,2,2] // ----- -// CHECK-LABEL: func @torch.aten.flatten.using_ints$rank0( +// CHECK-LABEL: func.func @torch.aten.flatten.using_ints$rank0( // CHECK-SAME: %[[TENSOR:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[TENSOR]] : !torch.vtensor<[],f32> -> tensor // CHECK: %[[COLLAPSED:.*]] = tensor.expand_shape %[[BUILTIN_TENSOR]] [] : tensor into tensor<1xf32> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[COLLAPSED]] : tensor<1xf32> -> !torch.vtensor<[1],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[1],f32> -func @torch.aten.flatten.using_ints$rank0(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { +func.func @torch.aten.flatten.using_ints$rank0(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { %int0 = torch.constant.int 0 %0 = torch.aten.flatten.using_ints %arg0, %int0, %int0 : !torch.vtensor<[],f32>, !torch.int, !torch.int -> !torch.vtensor<[1],f32> return %0 : !torch.vtensor<[1],f32> diff --git a/test/Conversion/TorchToLinalg/unsqueeze.mlir b/test/Conversion/TorchToLinalg/unsqueeze.mlir index ffb05005e..44e028e9d 100644 --- a/test/Conversion/TorchToLinalg/unsqueeze.mlir +++ b/test/Conversion/TorchToLinalg/unsqueeze.mlir @@ -3,61 +3,61 @@ // ----- -// CHECK-LABEL: func @torch.aten.unsqueeze$basic( +// CHECK-LABEL: func.func @torch.aten.unsqueeze$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[],f32> -> tensor // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[BUILTIN_TENSOR]] [] : tensor into tensor<1xf32> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[EXPANDED]] : tensor<1xf32> -> !torch.vtensor<[1],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[1],f32> -func @torch.aten.unsqueeze$basic(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { +func.func @torch.aten.unsqueeze$basic(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { %int0 = torch.constant.int 0 %0 = torch.aten.unsqueeze %arg0, %int0 : !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1],f32> return %0 : !torch.vtensor<[1],f32> } -// CHECK-LABEL: func @torch.aten.unsqueeze$basic_negative( +// CHECK-LABEL: func.func @torch.aten.unsqueeze$basic_negative( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[],f32> -> tensor // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[BUILTIN_TENSOR]] [] : tensor into tensor<1xf32> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[EXPANDED]] : tensor<1xf32> -> !torch.vtensor<[1],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[1],f32> -func @torch.aten.unsqueeze$basic_negative(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { +func.func @torch.aten.unsqueeze$basic_negative(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { %int-1 = torch.constant.int -1 %0 = torch.aten.unsqueeze %arg0, %int-1 : !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1],f32> return %0 : !torch.vtensor<[1],f32> } -// CHECK-LABEL: func @torch.aten.unsqueeze$higher_rank_front( +// CHECK-LABEL: func.func @torch.aten.unsqueeze$higher_rank_front( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[1,2,3,4],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[2,3,4],f32> -> tensor<2x3x4xf32> // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[BUILTIN_TENSOR]] {{\[\[}}0, 1], [2], [3]] : tensor<2x3x4xf32> into tensor<1x2x3x4xf32> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[EXPANDED]] : tensor<1x2x3x4xf32> -> !torch.vtensor<[1,2,3,4],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[1,2,3,4],f32> -func @torch.aten.unsqueeze$higher_rank_front(%arg0: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[1,2,3,4],f32> { +func.func @torch.aten.unsqueeze$higher_rank_front(%arg0: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[1,2,3,4],f32> { %int0 = torch.constant.int 0 %0 = torch.aten.unsqueeze %arg0, %int0 : !torch.vtensor<[2,3,4],f32>, !torch.int -> !torch.vtensor<[1,2,3,4],f32> return %0 : !torch.vtensor<[1,2,3,4],f32> } -// CHECK-LABEL: func @torch.aten.unsqueeze$higher_rank_back( +// CHECK-LABEL: func.func @torch.aten.unsqueeze$higher_rank_back( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,4,1],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[2,3,4],f32> -> tensor<2x3x4xf32> // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[BUILTIN_TENSOR]] {{\[\[}}0], [1], [2, 3]] : tensor<2x3x4xf32> into tensor<2x3x4x1xf32> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[EXPANDED]] : tensor<2x3x4x1xf32> -> !torch.vtensor<[2,3,4,1],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[2,3,4,1],f32> -func @torch.aten.unsqueeze$higher_rank_back(%arg0: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,4,1],f32> { +func.func @torch.aten.unsqueeze$higher_rank_back(%arg0: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,4,1],f32> { %int-1 = torch.constant.int -1 %0 = torch.aten.unsqueeze %arg0, %int-1 : !torch.vtensor<[2,3,4],f32>, !torch.int -> !torch.vtensor<[2,3,4,1],f32> return %0 : !torch.vtensor<[2,3,4,1],f32> } -// CHECK-LABEL: func @torch.aten.unsqueeze$higher_rank_middle( +// CHECK-LABEL: func.func @torch.aten.unsqueeze$higher_rank_middle( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1,4],f32> { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[2,3,4],f32> -> tensor<2x3x4xf32> // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[BUILTIN_TENSOR]] {{\[\[}}0], [1], [2, 3]] : tensor<2x3x4xf32> into tensor<2x3x1x4xf32> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[EXPANDED]] : tensor<2x3x1x4xf32> -> !torch.vtensor<[2,3,1,4],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[2,3,1,4],f32> -func @torch.aten.unsqueeze$higher_rank_middle(%arg0: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1,4],f32> { +func.func @torch.aten.unsqueeze$higher_rank_middle(%arg0: !torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1,4],f32> { %int2 = torch.constant.int 2 %0 = torch.aten.unsqueeze %arg0, %int2 : !torch.vtensor<[2,3,4],f32>, !torch.int -> !torch.vtensor<[2,3,1,4],f32> return %0 : !torch.vtensor<[2,3,1,4],f32> diff --git a/test/Conversion/TorchToSCF/basic.mlir b/test/Conversion/TorchToSCF/basic.mlir index 86c20368a..fadac3b4f 100644 --- a/test/Conversion/TorchToSCF/basic.mlir +++ b/test/Conversion/TorchToSCF/basic.mlir @@ -1,6 +1,6 @@ // RUN: torch-mlir-opt <%s -convert-torch-to-scf | FileCheck %s -// CHECK-LABEL: func @torch.prim.if( +// CHECK-LABEL: func.func @torch.prim.if( // CHECK-SAME: %[[VAL_0:.*]]: !torch.bool) -> !torch.int { // CHECK: %[[VAL_1:.*]] = torch_c.to_i1 %[[VAL_0]] // CHECK: %[[VAL_2:.*]] = torch.constant.int 2 @@ -14,7 +14,7 @@ // CHECK: } // CHECK: %[[VAL_7:.*]] = torch_c.from_i64 %[[VAL_8:.*]] // CHECK: return %[[VAL_7]] : !torch.int -func @torch.prim.if(%arg0: !torch.bool) -> !torch.int { +func.func @torch.prim.if(%arg0: !torch.bool) -> !torch.int { %int2 = torch.constant.int 2 %int1 = torch.constant.int 1 %0 = torch.prim.If %arg0 -> (!torch.int) { @@ -25,7 +25,7 @@ func @torch.prim.if(%arg0: !torch.bool) -> !torch.int { return %0 : !torch.int } -// CHECK-LABEL: func @aten.prim.if$nested( +// CHECK-LABEL: func.func @aten.prim.if$nested( // CHECK-SAME: %[[VAL_0:.*]]: !torch.bool, // CHECK-SAME: %[[VAL_1:.*]]: !torch.bool) -> !torch.int { // CHECK: %[[VAL_2:.*]] = torch_c.to_i1 %[[VAL_0]] @@ -48,7 +48,7 @@ func @torch.prim.if(%arg0: !torch.bool) -> !torch.int { // CHECK: } // CHECK: %[[VAL_13:.*]] = torch_c.from_i64 %[[VAL_14:.*]] // CHECK: return %[[VAL_13]] : !torch.int -func @aten.prim.if$nested(%arg0: !torch.bool, %arg1: !torch.bool) -> !torch.int { +func.func @aten.prim.if$nested(%arg0: !torch.bool, %arg1: !torch.bool) -> !torch.int { %int2 = torch.constant.int 2 %int3 = torch.constant.int 3 %int4 = torch.constant.int 4 @@ -65,7 +65,7 @@ func @aten.prim.if$nested(%arg0: !torch.bool, %arg1: !torch.bool) -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.prim.loop$while +// CHECK-LABEL: func.func @torch.prim.loop$while // CHECK-SAME: (%[[ARG0:.*]]: !torch.int) -> !torch.float { // CHECK: %[[TORCH_FLOAT_VAL:.*]] = torch.constant.float // CHECK-NEXT: %[[FLOAT_VAL:.*]] = torch_c.to_f64 %[[TORCH_FLOAT_VAL]] @@ -86,7 +86,7 @@ func @aten.prim.if$nested(%arg0: !torch.bool, %arg1: !torch.bool) -> !torch.int // CHECK-NEXT: } // CHECK-NEXT: %[[TORCH_LOOP:.*]] = torch_c.from_f64 %[[LOOP]] // CHECK-NEXT: return %[[TORCH_LOOP]] : !torch.float -func @torch.prim.loop$while(%arg0: !torch.int) -> !torch.float { +func.func @torch.prim.loop$while(%arg0: !torch.int) -> !torch.float { %float3.200000e00 = torch.constant.float 3.200000e+00 %int9223372036854775807 = torch.constant.int 9223372036854775807 %0 = torch.aten.lt.float_int %float3.200000e00, %arg0 : !torch.float, !torch.int -> !torch.bool @@ -99,7 +99,7 @@ func @torch.prim.loop$while(%arg0: !torch.int) -> !torch.float { return %1 : !torch.float } -// CHECK-LABEL: func @torch.prim.loop$while_with_multiple_values +// CHECK-LABEL: func.func @torch.prim.loop$while_with_multiple_values // CHECK-SAME: () -> (!torch.float, !torch.float) { // CHECK: %[[TORCH_FLOAT_VAL_0:.*]] = torch.constant.float // CHECK-NEXT: %[[FLOAT_VAL_0:.*]] = torch_c.to_f64 %[[TORCH_FLOAT_VAL_0]] @@ -127,7 +127,7 @@ func @torch.prim.loop$while(%arg0: !torch.int) -> !torch.float { // CHECK-NEXT: %[[TORCH_LOOP_0:.*]] = torch_c.from_f64 %[[LOOP]]#0 // CHECK-NEXT: %[[TORCH_LOOP_1:.*]] = torch_c.from_f64 %[[LOOP]]#1 // CHECK-NEXT: return %[[TORCH_LOOP_0]], %[[TORCH_LOOP_1]] : !torch.float, !torch.float -func @torch.prim.loop$while_with_multiple_values() -> (!torch.float, !torch.float) { +func.func @torch.prim.loop$while_with_multiple_values() -> (!torch.float, !torch.float) { %float3.200000e00 = torch.constant.float 3.200000e+00 %int9223372036854775807 = torch.constant.int 9223372036854775807 %float9.0 = torch.constant.float 9.0 @@ -143,7 +143,7 @@ func @torch.prim.loop$while_with_multiple_values() -> (!torch.float, !torch.floa return %1#0, %1#1 : !torch.float, !torch.float } -// CHECK-LABEL: func @torch.prim.Loop$for +// CHECK-LABEL: func.func @torch.prim.Loop$for // CHECK-SAME: (%[[TORCH_ARG0:.*]]: !torch.int) -> !torch.float { // CHECK: %[[ARG0:.*]] = torch_c.to_i64 %[[TORCH_ARG0]] // CHECK-NEXT: %{{.*}} = torch.constant.bool true @@ -164,7 +164,7 @@ func @torch.prim.loop$while_with_multiple_values() -> (!torch.float, !torch.floa // CHECK-NEXT: %[[RETURN:.*]] = torch_c.from_f64 %[[LOOP]] // CHECK-NEXT: return %[[RETURN]] : !torch.float // CHECK-NEXT: } -func @torch.prim.Loop$for(%arg0: !torch.int) -> !torch.float { +func.func @torch.prim.Loop$for(%arg0: !torch.int) -> !torch.float { %true = torch.constant.bool true %float0.000000e00 = torch.constant.float 0.000000e+00 %0 = torch.prim.Loop %arg0, %true, init(%float0.000000e00) { @@ -175,7 +175,7 @@ func @torch.prim.Loop$for(%arg0: !torch.int) -> !torch.float { return %0 : !torch.float } -// CHECK-LABEL: func @torch.prim.Loop$for_with_multiple_results +// CHECK-LABEL: func.func @torch.prim.Loop$for_with_multiple_results // CHECK-SAME: (%[[TORCH_ARG0:.*]]: !torch.int) -> (!torch.float, !torch.float) { // CHECK: %[[ARG0:.*]] = torch_c.to_i64 %[[TORCH_ARG0]] // CHECK-NEXT: %{{.*}} = torch.constant.bool true @@ -202,7 +202,7 @@ func @torch.prim.Loop$for(%arg0: !torch.int) -> !torch.float { // CHECK-NEXT: %[[RETURN_1:.*]] = torch_c.from_f64 %[[LOOP]]#1 // CHECK-NEXT: return %[[RETURN_0]], %[[RETURN_1]] : !torch.float, !torch.float // CHECK-NEXT: } -func @torch.prim.Loop$for_with_multiple_results(%arg0: !torch.int) -> (!torch.float, !torch.float) { +func.func @torch.prim.Loop$for_with_multiple_results(%arg0: !torch.int) -> (!torch.float, !torch.float) { %true = torch.constant.bool true %float0.000000e00 = torch.constant.float 0.000000e+00 %float9.0 = torch.constant.float 9.0 diff --git a/test/Conversion/TorchToStd/basic.mlir b/test/Conversion/TorchToStd/basic.mlir index f735972cc..27141b01a 100644 --- a/test/Conversion/TorchToStd/basic.mlir +++ b/test/Conversion/TorchToStd/basic.mlir @@ -1,19 +1,19 @@ // RUN: torch-mlir-opt <%s -convert-torch-to-std | FileCheck %s -// CHECK-LABEL: func @torch.aten.dim( +// CHECK-LABEL: func.func @torch.aten.dim( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<*,f32>) -> !torch.int { // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<*,f32> -> tensor<*xf32> // CHECK: %[[RANK:.*]] = tensor.rank %[[BUILTIN_TENSOR]] : tensor<*xf32> // CHECK: %[[RANK_I64:.*]] = arith.index_cast %[[RANK]] : index to i64 // CHECK: %[[RANK_TORCH_INT:.*]] = torch_c.from_i64 %[[RANK_I64]] // CHECK: return %[[RANK_TORCH_INT]] : !torch.int -func @torch.aten.dim(%arg0: !torch.vtensor<*,f32>) -> !torch.int { +func.func @torch.aten.dim(%arg0: !torch.vtensor<*,f32>) -> !torch.int { %0 = torch.aten.dim %arg0 : !torch.vtensor<*,f32> -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.runtime.assert( +// CHECK-LABEL: func.func @torch.runtime.assert( // CHECK-SAME: %[[X:.*]]: !torch.int, // CHECK-SAME: %[[Y:.*]]: !torch.int) { // CHECK: %[[X_I64:.*]] = torch_c.to_i64 %[[X]] @@ -21,13 +21,13 @@ func @torch.aten.dim(%arg0: !torch.vtensor<*,f32>) -> !torch.int { // CHECK: %[[CMP:.*]] = arith.cmpi ne, %[[X_I64]], %[[Y_I64]] : i64 // CHECK: assert %[[CMP]], "x must not be equal to y" // CHECK: return -func @torch.runtime.assert(%arg0: !torch.int, %arg1: !torch.int) { +func.func @torch.runtime.assert(%arg0: !torch.int, %arg1: !torch.int) { %0 = torch.aten.ne.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.bool torch.runtime.assert %0, "x must not be equal to y" return } -// CHECK-LABEL: func @torch.aten.ne.int( +// CHECK-LABEL: func.func @torch.aten.ne.int( // CHECK-SAME: %[[LHS:.*]]: !torch.int, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.bool { // CHECK: %[[LHS_I64:.*]] = torch_c.to_i64 %[[LHS]] @@ -35,12 +35,12 @@ func @torch.runtime.assert(%arg0: !torch.int, %arg1: !torch.int) { // CHECK: %[[CMP:.*]] = arith.cmpi ne, %[[LHS_I64]], %[[RHS_I64]] : i64 // CHECK: %[[CMP_TORCH_BOOL:.*]] = torch_c.from_i1 %[[CMP]] // CHECK: return %[[CMP_TORCH_BOOL]] : !torch.bool -func @torch.aten.ne.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { +func.func @torch.aten.ne.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { %0 = torch.aten.ne.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int( +// CHECK-LABEL: func.func @torch.aten.eq.int( // CHECK-SAME: %[[LHS:.*]]: !torch.int, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.bool { // CHECK: %[[LHS_I64:.*]] = torch_c.to_i64 %[[LHS]] @@ -48,12 +48,12 @@ func @torch.aten.ne.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { // CHECK: %[[CMP:.*]] = arith.cmpi eq, %[[LHS_I64]], %[[RHS_I64]] : i64 // CHECK: %[[CMP_TORCH_BOOL:.*]] = torch_c.from_i1 %[[CMP]] // CHECK: return %[[CMP_TORCH_BOOL]] : !torch.bool -func @torch.aten.eq.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { +func.func @torch.aten.eq.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { %0 = torch.aten.eq.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.gt.int( +// CHECK-LABEL: func.func @torch.aten.gt.int( // CHECK-SAME: %[[LHS:.*]]: !torch.int, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.bool { // CHECK: %[[LHS_I64:.*]] = torch_c.to_i64 %[[LHS]] @@ -61,48 +61,48 @@ func @torch.aten.eq.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { // CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[LHS_I64]], %[[RHS_I64]] : i64 // CHECK: %[[CMP_TORCH_BOOL:.*]] = torch_c.from_i1 %[[CMP]] // CHECK: return %[[CMP_TORCH_BOOL]] : !torch.bool -func @torch.aten.gt.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { +func.func @torch.aten.gt.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { %0 = torch.aten.gt.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.vtensor.literal() -> !torch.vtensor<[],f32> { +// CHECK-LABEL: func.func @torch.vtensor.literal() -> !torch.vtensor<[],f32> { // CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : tensor // CHECK: %[[VTENSOR:.*]] = torch_c.from_builtin_tensor %[[CST]] : tensor -> !torch.vtensor<[],f32> // CHECK: return %[[VTENSOR]] : !torch.vtensor<[],f32> -func @torch.vtensor.literal() -> !torch.vtensor<[],f32> { +func.func @torch.vtensor.literal() -> !torch.vtensor<[],f32> { %0 = torch.vtensor.literal(dense<0.0> : tensor) : !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } -// CHECK-LABEL: func @torch.constant.bool() -> !torch.bool { +// CHECK-LABEL: func.func @torch.constant.bool() -> !torch.bool { // CHECK: %[[CST:.*]] = arith.constant true // CHECK: %[[BOOL:.*]] = torch_c.from_i1 %[[CST]] // CHECK: return %[[BOOL]] : !torch.bool -func @torch.constant.bool() -> !torch.bool { +func.func @torch.constant.bool() -> !torch.bool { %true = torch.constant.bool true return %true : !torch.bool } -// CHECK-LABEL: func @torch.constant.float() -> !torch.float { +// CHECK-LABEL: func.func @torch.constant.float() -> !torch.float { // CHECK: %[[CST:.*]] = arith.constant 1.000000e+00 : f64 // CHECK: %[[FLOAT:.*]] = torch_c.from_f64 %[[CST]] // CHECK: return %[[FLOAT]] : !torch.float -func @torch.constant.float() -> !torch.float { +func.func @torch.constant.float() -> !torch.float { %float = torch.constant.float 1.000000e+00 return %float : !torch.float } -// CHECK-LABEL: func @torch.constant.int() -> !torch.int { +// CHECK-LABEL: func.func @torch.constant.int() -> !torch.int { // CHECK: %[[CST:.*]] = arith.constant 1 : i64 // CHECK: %[[INT:.*]] = torch_c.from_i64 %[[CST]] // CHECK: return %[[INT]] : !torch.int -func @torch.constant.int() -> !torch.int { +func.func @torch.constant.int() -> !torch.int { %int1 = torch.constant.int 1 return %int1 : !torch.int } -// CHECK-LABEL: func @torch.aten.add.int( +// CHECK-LABEL: func.func @torch.aten.add.int( // CHECK-SAME: %[[LHS:.*]]: !torch.int, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.int { // CHECK: %[[LHS_I64:.*]] = torch_c.to_i64 %[[LHS]] @@ -110,12 +110,12 @@ func @torch.constant.int() -> !torch.int { // CHECK: %[[ADD:.*]] = arith.addi %[[LHS_I64:.*]], [[RHS_I64:.*]] : i64 // CHECK: %[[OUT:.*]] = torch_c.from_i64 %[[INT:.*]] // CHECK: return %[[OUT:.*]] : !torch.int -func @torch.aten.add.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { +func.func @torch.aten.add.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { %0 = torch.aten.add.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.aten.sub.int( +// CHECK-LABEL: func.func @torch.aten.sub.int( // CHECK-SAME: %[[LHS:.*]]: !torch.int, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.int { // CHECK: %[[LHS_I64:.*]] = torch_c.to_i64 %[[LHS]] @@ -123,12 +123,12 @@ func @torch.aten.add.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { // CHECK: %[[SUB:.*]] = arith.subi %[[LHS_I64:.*]], [[RHS_I64:.*]] : i64 // CHECK: %[[OUT:.*]] = torch_c.from_i64 %[[INT:.*]] // CHECK: return %[[OUT:.*]] : !torch.int -func @torch.aten.sub.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { +func.func @torch.aten.sub.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { %0 = torch.aten.sub.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.aten.sub.float( +// CHECK-LABEL: func.func @torch.aten.sub.float( // CHECK-SAME: %[[LHS:.*]]: !torch.float, // CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.float { // CHECK: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]] @@ -136,12 +136,12 @@ func @torch.aten.sub.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { // CHECK: %[[SUB:.*]] = arith.subf %[[LHS_F64:.*]], [[RHS_F64:.*]] : f64 // CHECK: %[[OUT:.*]] = torch_c.from_f64 %[[SUB:.*]] // CHECK: return %[[OUT:.*]] : !torch.float -func @torch.aten.sub.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.float { +func.func @torch.aten.sub.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.float { %0 = torch.aten.sub.float %arg0, %arg1 : !torch.float, !torch.float -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func @torch.aten.mul.int( +// CHECK-LABEL: func.func @torch.aten.mul.int( // CHECK-SAME: %[[LHS:.*]]: !torch.int, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.int { // CHECK: %[[LHS_I64:.*]] = torch_c.to_i64 %[[LHS]] @@ -149,12 +149,12 @@ func @torch.aten.sub.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.f // CHECK: %[[MUL:.*]] = arith.muli %[[LHS_I64:.*]], [[RHS_I64:.*]] : i64 // CHECK: %[[OUT:.*]] = torch_c.from_i64 %[[MUL:.*]] // CHECK: return %[[OUT:.*]] : !torch.int -func @torch.aten.mul.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { +func.func @torch.aten.mul.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { %0 = torch.aten.mul.int %arg0, %arg1 : !torch.int, !torch.int -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.aten.div.float( +// CHECK-LABEL: func.func @torch.aten.div.float( // CHECK-SAME: %[[LHS:.*]]: !torch.float, // CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.float { // CHECK: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]] @@ -162,12 +162,12 @@ func @torch.aten.mul.int(%arg0: !torch.int, %arg1: !torch.int) -> !torch.int { // CHECK: %[[SUB:.*]] = arith.divf %[[LHS_F64:.*]], [[RHS_F64:.*]] : f64 // CHECK: %[[OUT:.*]] = torch_c.from_f64 %[[SUB:.*]] // CHECK: return %[[OUT:.*]] : !torch.float -func @torch.aten.div.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.float { +func.func @torch.aten.div.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.float { %0 = torch.aten.div.float %arg0, %arg1 : !torch.float, !torch.float -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func @torch.aten.ge.float( +// CHECK-LABEL: func.func @torch.aten.ge.float( // CHECK-SAME: %[[LHS:.*]]: !torch.float, // CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.bool { // CHECK: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]] @@ -175,12 +175,12 @@ func @torch.aten.div.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.f // CHECK: %[[CMP:.*]] = arith.cmpf uge, %[[LHS_F64]], %[[RHS_F64]] : f64 // CHECK: %[[CMP_TORCH_BOOL:.*]] = torch_c.from_i1 %[[CMP]] // CHECK: return %[[CMP_TORCH_BOOL]] : !torch.bool -func @torch.aten.ge.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.bool { +func.func @torch.aten.ge.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.bool { %0 = torch.aten.ge.float %arg0, %arg1 : !torch.float, !torch.float -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ge.float_int( +// CHECK-LABEL: func.func @torch.aten.ge.float_int( // CHECK-SAME: %[[LHS:.*]]: !torch.float, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.bool { // CHECK: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]] @@ -189,12 +189,12 @@ func @torch.aten.ge.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.bo // CHECK: %[[CMP:.*]] = arith.cmpf uge, %[[LHS_F64]], %[[RHS_F64]] : f64 // CHECK: %[[CMP_TORCH_BOOL:.*]] = torch_c.from_i1 %[[CMP]] // CHECK: return %[[CMP_TORCH_BOOL]] : !torch.bool -func @torch.aten.ge.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !torch.bool { +func.func @torch.aten.ge.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !torch.bool { %0 = torch.aten.ge.float_int %arg0, %arg1 : !torch.float, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ne.float_int( +// CHECK-LABEL: func.func @torch.aten.ne.float_int( // CHECK-SAME: %[[LHS:.*]]: !torch.float, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.bool { // CHECK: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]] @@ -203,24 +203,24 @@ func @torch.aten.ge.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !torch. // CHECK: %[[CMP:.*]] = arith.cmpf une, %[[LHS_F64]], %[[RHS_F64]] : f64 // CHECK: %[[CMP_TORCH_BOOL:.*]] = torch_c.from_i1 %[[CMP]] // CHECK: return %[[CMP_TORCH_BOOL]] : !torch.bool -func @torch.aten.ne.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !torch.bool { +func.func @torch.aten.ne.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !torch.bool { %0 = torch.aten.ne.float_int %arg0, %arg1 : !torch.float, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ceil.float( +// CHECK-LABEL: func.func @torch.aten.ceil.float( // CHECK-SAME: %[[ARG:.*]]: !torch.float) -> !torch.int { // CHECK: %[[ARG_F64:.*]] = torch_c.to_f64 %[[ARG]] // CHECK: %[[CEIL:.*]] = math.ceil %[[ARG_F64]] : f64 // CHECK: %[[CEIL_I64:.*]] = arith.fptosi %[[CEIL]] : f64 to i64 // CHECK: %[[OUT:.*]] = torch_c.from_i64 %[[CEIL_I64]] // CHECK: return %[[OUT]] : !torch.int -func @torch.aten.ceil.float(%arg0: !torch.float) -> !torch.int { +func.func @torch.aten.ceil.float(%arg0: !torch.float) -> !torch.int { %0 = torch.aten.ceil.float %arg0 : !torch.float -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.aten.gt.float_int( +// CHECK-LABEL: func.func @torch.aten.gt.float_int( // CHECK-SAME: %[[LHS:.*]]: !torch.float, // CHECK-SAME: %[[RHS:.*]]: !torch.int) -> !torch.bool { // CHECK: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]] @@ -229,7 +229,7 @@ func @torch.aten.ceil.float(%arg0: !torch.float) -> !torch.int { // CHECK: %[[CMP:.*]] = arith.cmpf ugt, %[[LHS_F64]], %[[RHS_F64]] : f64 // CHECK: %[[CMP_TORCH_BOOL:.*]] = torch_c.from_i1 %[[CMP]] // CHECK: return %[[CMP_TORCH_BOOL]] : !torch.bool -func @torch.aten.gt.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !torch.bool { +func.func @torch.aten.gt.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !torch.bool { %0 = torch.aten.gt.float_int %arg0, %arg1 : !torch.float, !torch.int -> !torch.bool return %0 : !torch.bool } diff --git a/test/Conversion/TorchToTosa/basic.mlir b/test/Conversion/TorchToTosa/basic.mlir index 5ffc5cc91..7b4d0b7de 100644 --- a/test/Conversion/TorchToTosa/basic.mlir +++ b/test/Conversion/TorchToTosa/basic.mlir @@ -1,38 +1,38 @@ // RUN: torch-mlir-opt <%s -convert-torch-to-tosa -split-input-file -verify-diagnostics | FileCheck %s -// CHECK-LABEL: func @torch.aten.tanh$basic( +// CHECK-LABEL: func.func @torch.aten.tanh$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.tanh"(%[[ARG_BUILTIN]]) : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.tanh$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.tanh$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.tanh %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.sigmoid$basic( +// CHECK-LABEL: func.func @torch.aten.sigmoid$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.sigmoid"(%[[ARG_BUILTIN]]) : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.sigmoid$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.sigmoid$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.sigmoid %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.relu$basic( +// CHECK-LABEL: func.func @torch.aten.relu$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.clamp"(%[[ARG_BUILTIN]]) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.relu$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.relu$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.relu %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } @@ -40,100 +40,100 @@ func @torch.aten.relu$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor< // ----- -// CHECK-LABEL: func @torch.aten.log$basic( +// CHECK-LABEL: func.func @torch.aten.log$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.log"(%[[ARG_BUILTIN]]) : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.log$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.log$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.log %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.exp$basic( +// CHECK-LABEL: func.func @torch.aten.exp$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.exp"(%[[ARG_BUILTIN]]) : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.exp$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.exp$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.exp %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.neg$basic( +// CHECK-LABEL: func.func @torch.aten.neg$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.negate"(%[[ARG_BUILTIN]]) : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.neg$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.neg$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.neg %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.floor$basic( +// CHECK-LABEL: func.func @torch.aten.floor$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.floor"(%[[ARG_BUILTIN]]) : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.floor$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.floor$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.floor %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.bitwise_not$basic( +// CHECK-LABEL: func.func @torch.aten.bitwise_not$basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.bitwise_not"(%[[ARG_BUILTIN]]) : (tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.bitwise_not$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.bitwise_not$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.bitwise_not %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.ceil$basic( +// CHECK-LABEL: func.func @torch.aten.ceil$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = "tosa.ceil"(%[[VAL_1]]) : (tensor) -> tensor // CHECK: %[[VAL_3:.*]] = torch_c.from_builtin_tensor %[[VAL_2]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_3]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.ceil$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.ceil$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.ceil %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.reciprocal$basic( +// CHECK-LABEL: func.func @torch.aten.reciprocal$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = "tosa.reciprocal"(%[[VAL_1]]) : (tensor) -> tensor // CHECK: %[[VAL_3:.*]] = torch_c.from_builtin_tensor %[[VAL_2]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_3]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.reciprocal$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.reciprocal$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.reciprocal %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.add$basic( +// CHECK-LABEL: func.func @torch.aten.add$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -145,7 +145,7 @@ func @torch.aten.reciprocal$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vt // CHECK: %[[VAL_8:.*]] = torch_c.from_builtin_tensor %[[VAL_7]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_8]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.add$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { +func.func @torch.aten.add$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { %int1 = torch.constant.int 1 %0 = torch.aten.add.Tensor %arg0, %arg1, %int1 : !torch.vtensor<[?, ?],f32>, !torch.vtensor<[?, ?],f32>, !torch.int -> !torch.vtensor<[?, ?],f32> return %0 : !torch.vtensor<[?, ?],f32> @@ -153,7 +153,7 @@ func @torch.aten.add$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vten // ----- -// CHECK-LABEL: func @torch.aten.sub$basic( +// CHECK-LABEL: func.func @torch.aten.sub$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -165,7 +165,7 @@ func @torch.aten.add$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vten // CHECK: %[[VAL_8:.*]] = torch_c.from_builtin_tensor %[[VAL_7]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_8]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.sub$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { +func.func @torch.aten.sub$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { %int1 = torch.constant.int 1 %0 = torch.aten.sub.Tensor %arg0, %arg1, %int1 : !torch.vtensor<[?, ?],f32>, !torch.vtensor<[?, ?],f32>, !torch.int -> !torch.vtensor<[?, ?],f32> return %0 : !torch.vtensor<[?, ?],f32> @@ -173,7 +173,7 @@ func @torch.aten.sub$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vten // ----- -// CHECK-LABEL: func @torch.aten.mul$basic( +// CHECK-LABEL: func.func @torch.aten.mul$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -181,14 +181,14 @@ func @torch.aten.sub$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vten // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.mul"(%[[ARG0_BUILTIN]], %[[ARG1_BUILTIN]]) {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.mul$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { +func.func @torch.aten.mul$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { %0 = torch.aten.mul.Tensor %arg0, %arg1 : !torch.vtensor<[?, ?],f32>, !torch.vtensor<[?, ?],f32> -> !torch.vtensor<[?, ?],f32> return %0 : !torch.vtensor<[?, ?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.div$basic( +// CHECK-LABEL: func.func @torch.aten.div$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -197,14 +197,14 @@ func @torch.aten.mul$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vten // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.mul"(%[[ARG0_BUILTIN]], %[[RCP]]) {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.div$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { +func.func @torch.aten.div$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vtensor<[?, ?],f32>) -> !torch.vtensor<[?, ?],f32> { %0 = torch.aten.div.Tensor %arg0, %arg1 : !torch.vtensor<[?, ?],f32>, !torch.vtensor<[?, ?],f32> -> !torch.vtensor<[?, ?],f32> return %0 : !torch.vtensor<[?, ?],f32> } // ----- -// CHECK-LABEL: func @test_reduce_mean_dim$basic( +// CHECK-LABEL: func.func @test_reduce_mean_dim$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?,?,?],f32> -> tensor // CHECK: %[[ARG1:.*]] = torch.constant.int 0 @@ -217,7 +217,7 @@ func @torch.aten.div$basic(%arg0: !torch.vtensor<[?, ?],f32>, %arg1: !torch.vten // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.mul"(%[[RESHAPE_SUM]], %[[CONST]]) {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?,?],f32> -func @test_reduce_mean_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { +func.func @test_reduce_mean_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { %dim0 = torch.constant.int 0 %reducedims = torch.prim.ListConstruct %dim0 : (!torch.int) -> !torch.list %keepdims = torch.constant.bool false @@ -228,7 +228,7 @@ func @test_reduce_mean_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch // ----- -// CHECK-LABEL: func @test_reduce_sum_dims$basic( +// CHECK-LABEL: func.func @test_reduce_sum_dims$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?,?,?],f32> -> tensor // CHECK: %[[ARG1_BUILTIN:.*]] = torch.constant.none @@ -239,7 +239,7 @@ func @test_reduce_mean_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.reshape"(%[[SUM]]) {new_shape = [-1, -1, -1]} : (tensor<1x?x?x?xf32>) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?,?],f32> -func @test_reduce_sum_dims$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { +func.func @test_reduce_sum_dims$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { %none = torch.constant.none %false = torch.constant.bool false %int0 = torch.constant.int 0 @@ -250,7 +250,7 @@ func @test_reduce_sum_dims$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch // ----- -// CHECK-LABEL: func @test_reduce_sum$basic( +// CHECK-LABEL: func.func @test_reduce_sum$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[1],f32> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?,?,?],f32> -> tensor // CHECK: %[[ARG1_BUILTIN:.*]] = torch.constant.none @@ -261,7 +261,7 @@ func @test_reduce_sum_dims$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.reshape"(%[[REDUCE4]]) {new_shape = [1]} : (tensor<1x1x1x1xf32>) -> tensor<1xf32> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor<1xf32> -> !torch.vtensor<[1],f32> // CHECK: return %[[RESULT]] : !torch.vtensor<[1],f32> -func @test_reduce_sum$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[1],f32> { +func.func @test_reduce_sum$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[1],f32> { %none = torch.constant.none %0 = torch.aten.sum %arg0, %none : !torch.vtensor<[?,?,?,?],f32>, !torch.none -> !torch.vtensor<[1],f32> return %0 : !torch.vtensor<[1],f32> @@ -269,7 +269,7 @@ func @test_reduce_sum$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vten // ----- -// CHECK-LABEL: func @test_reduce_all$basic( +// CHECK-LABEL: func.func @test_reduce_all$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[1],i1> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?,?,?],i1> -> tensor // CHECK: %[[REDUCE1:.*]] = "tosa.reduce_all"(%[[ARG0_BUILTIN]]) {axis = 0 : i64} : (tensor) -> tensor<1x?x?x?xi1> @@ -279,14 +279,14 @@ func @test_reduce_sum$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vten // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.reshape"(%[[REDUCE4]]) {new_shape = [1]} : (tensor<1x1x1x1xi1>) -> tensor<1xi1> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor<1xi1> -> !torch.vtensor<[1],i1> // CHECK: return %[[RESULT]] : !torch.vtensor<[1],i1> -func @test_reduce_all$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[1],i1> { +func.func @test_reduce_all$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[1],i1> { %0 = torch.aten.all %arg0 : !torch.vtensor<[?,?,?,?],i1> -> !torch.vtensor<[1],i1> return %0 : !torch.vtensor<[1],i1> } // ----- -// CHECK-LABEL: func @test_reduce_any_dim$basic( +// CHECK-LABEL: func.func @test_reduce_any_dim$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[?,?,?],i1> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?,?,?],i1> -> tensor // CHECK: %[[ARG1:.*]] = torch.constant.int 0 @@ -295,7 +295,7 @@ func @test_reduce_all$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtens // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.reshape"(%[[REDUCE]]) {new_shape = [-1, -1, -1]} : (tensor<1x?x?x?xi1>) -> tensor // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?,?],i1> // CHECK: return %[[RESULT]] : !torch.vtensor<[?,?,?],i1> -func @test_reduce_any_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[?,?,?],i1> { +func.func @test_reduce_any_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[?,?,?],i1> { %int0 = torch.constant.int 0 %false = torch.constant.bool false %0 = torch.aten.any.dim %arg0, %int0, %false : !torch.vtensor<[?,?,?,?],i1>, !torch.int, !torch.bool -> !torch.vtensor<[?,?,?],i1> @@ -304,7 +304,7 @@ func @test_reduce_any_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.v // ----- -// CHECK-LABEL: func @test_reduce_any$basic( +// CHECK-LABEL: func.func @test_reduce_any$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[1],i1> { // CHECK: %[[ARG0_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?,?,?],i1> -> tensor // CHECK: %[[REDUCE1:.*]] = "tosa.reduce_any"(%[[ARG0_BUILTIN]]) {axis = 0 : i64} : (tensor) -> tensor<1x?x?x?xi1> @@ -314,28 +314,28 @@ func @test_reduce_any_dim$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.v // CHECK: %[[RESULT_BUILTIN:.*]] = "tosa.reshape"(%[[REDUCE4]]) {new_shape = [1]} : (tensor<1x1x1x1xi1>) -> tensor<1xi1> // CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor<1xi1> -> !torch.vtensor<[1],i1> // CHECK: return %[[RESULT]] : !torch.vtensor<[1],i1> -func @test_reduce_any$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[1],i1> { +func.func @test_reduce_any$basic(%arg0: !torch.vtensor<[?,?,?,?],i1>) -> !torch.vtensor<[1],i1> { %0 = torch.aten.any %arg0 : !torch.vtensor<[?,?,?,?],i1> -> !torch.vtensor<[1],i1> return %0 : !torch.vtensor<[1],i1> } // ----- -// CHECK-LABEL: func @torch.aten.rsqrt$basic( +// CHECK-LABEL: func.func @torch.aten.rsqrt$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = "tosa.rsqrt"(%[[VAL_1]]) : (tensor) -> tensor // CHECK: %[[VAL_3:.*]] = torch_c.from_builtin_tensor %[[VAL_2]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_3]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.rsqrt$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.rsqrt$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.rsqrt %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.maximum$basic( +// CHECK-LABEL: func.func @torch.aten.maximum$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -344,14 +344,14 @@ func @torch.aten.rsqrt$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.maximum$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.maximum$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.maximum %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.minimum$basic( +// CHECK-LABEL: func.func @torch.aten.minimum$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -360,14 +360,14 @@ func @torch.aten.maximum$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.v // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.minimum$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.minimum$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.minimum %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.pow.Tensor_Scalar$basic( +// CHECK-LABEL: func.func @torch.aten.pow.Tensor_Scalar$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = torch.constant.float 3.123400e+00 @@ -376,7 +376,7 @@ func @torch.aten.minimum$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.v // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.pow.Tensor_Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.pow.Tensor_Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %fp0 = torch.constant.float 3.123400e+00 %0 = torch.aten.pow.Tensor_Scalar %arg0, %fp0 : !torch.vtensor<[?,?],f32>, !torch.float -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> @@ -384,7 +384,7 @@ func @torch.aten.pow.Tensor_Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !t // ----- -// CHECK-LABEL: func @torch.aten.rsub.Scalar$basic( +// CHECK-LABEL: func.func @torch.aten.rsub.Scalar$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = torch.constant.float 3.123400e+00 @@ -396,7 +396,7 @@ func @torch.aten.pow.Tensor_Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !t // CHECK: %[[VAL_8:.*]] = torch_c.from_builtin_tensor %[[VAL_7]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_8]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %other = torch.constant.float 3.123400e+00 %alpha = torch.constant.float 6.432100e+00 %0 = torch.aten.rsub.Scalar %arg0, %other, %alpha : !torch.vtensor<[?,?],f32>, !torch.float, !torch.float -> !torch.vtensor<[?,?],f32> @@ -405,7 +405,7 @@ func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.v // ----- -// CHECK-LABEL: func @torch.aten.rsub.Scalar$basic( +// CHECK-LABEL: func.func @torch.aten.rsub.Scalar$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = torch.constant.float 3.123400e+00 @@ -417,7 +417,7 @@ func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.v // CHECK: %[[VAL_8:.*]] = torch_c.from_builtin_tensor %[[VAL_7]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_8]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %other = torch.constant.float 3.123400e+00 %alpha = torch.constant.int 1 %0 = torch.aten.rsub.Scalar %arg0, %other, %alpha : !torch.vtensor<[?,?],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,?],f32> @@ -426,7 +426,7 @@ func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.v // ----- -// CHECK-LABEL: func @torch.aten.gt.Tensor$basic( +// CHECK-LABEL: func.func @torch.aten.gt.Tensor$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -435,14 +435,14 @@ func @torch.aten.rsub.Scalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.v // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],i1> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],i1> // CHECK: } -func @torch.aten.gt.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { +func.func @torch.aten.gt.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { %0 = torch.aten.gt.Tensor %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],i1> return %0 : !torch.vtensor<[?,?],i1> } // ----- -// CHECK-LABEL: func @torch.aten.lt.Tensor$basic( +// CHECK-LABEL: func.func @torch.aten.lt.Tensor$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -451,14 +451,14 @@ func @torch.aten.gt.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],i1> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],i1> // CHECK: } -func @torch.aten.lt.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { +func.func @torch.aten.lt.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { %0 = torch.aten.lt.Tensor %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],i1> return %0 : !torch.vtensor<[?,?],i1> } // ----- -// CHECK-LABEL: func @torch.aten.eq.Tensor$basic( +// CHECK-LABEL: func.func @torch.aten.eq.Tensor$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -467,14 +467,14 @@ func @torch.aten.lt.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],i1> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],i1> // CHECK: } -func @torch.aten.eq.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { +func.func @torch.aten.eq.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { %0 = torch.aten.eq.Tensor %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],i1> return %0 : !torch.vtensor<[?,?],i1> } // ----- -// CHECK-LABEL: func @torch.aten.reshape$basic( +// CHECK-LABEL: func.func @torch.aten.reshape$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?,?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = torch.constant.int -1 @@ -483,7 +483,7 @@ func @torch.aten.eq.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?],f32> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?],f32> // CHECK: } -func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?],f32> { +func.func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?],f32> { %dim0 = torch.constant.int -1 %shape = torch.prim.ListConstruct %dim0 : (!torch.int) -> !torch.list %0 = torch.aten.reshape %arg0, %shape : !torch.vtensor<[?,?,?,?],f32>, !torch.list -> !torch.vtensor<[?],f32> @@ -492,7 +492,7 @@ func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.v // ----- -// CHECK-LABEL: func @torch.aten.native_batch_norm$basic( +// CHECK-LABEL: func.func @torch.aten.native_batch_norm$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[10,4,3],f32>) -> !torch.vtensor<[10,4,3],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[10,4,3],f32> -> tensor<10x4x3xf32> // CHECK: %[[VAL_2:.*]] = "tosa.const"() {value = dense<[5.000000e-01, 4.000000e-01, 3.000000e-01, 6.000000e-01]> : tensor<4xf32>} : () -> tensor<4xf32> @@ -515,7 +515,7 @@ func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.v // CHECK: %[[VAL_19:.*]] = torch_c.from_builtin_tensor %[[VAL_18]] : tensor<10x4x3xf32> -> !torch.vtensor<[10,4,3],f32> // CHECK: return %[[VAL_19]] : !torch.vtensor<[10,4,3],f32> // CHECK: } -func @torch.aten.native_batch_norm$basic(%arg0: !torch.vtensor<[10,4,3],f32> ) -> !torch.vtensor<[10,4,3],f32> { +func.func @torch.aten.native_batch_norm$basic(%arg0: !torch.vtensor<[10,4,3],f32> ) -> !torch.vtensor<[10,4,3],f32> { %0 = torch.vtensor.literal(dense<[5.000000e-01, 4.000000e-01, 3.000000e-01, 6.000000e-01]> : tensor<4xf32>) : !torch.vtensor<[4],f32> %1 = torch.vtensor.literal(dense<[3.000000e+00, 2.000000e+00, 4.000000e+00, 5.000000e+00]> : tensor<4xf32>) : !torch.vtensor<[4],f32> %float1.000000e-01 = torch.constant.float 1.000000e-01 @@ -528,7 +528,7 @@ func @torch.aten.native_batch_norm$basic(%arg0: !torch.vtensor<[10,4,3],f32> ) - // ----- -// CHECK-LABEL: func @forward( +// CHECK-LABEL: func.func @forward( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[10,3,8,9,3,4],f32>) -> !torch.vtensor<[10,3,?,4],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[10,3,8,9,3,4],f32> -> tensor<10x3x8x9x3x4xf32> // CHECK: %[[VAL_2:.*]] = torch.constant.int 4 @@ -538,7 +538,7 @@ func @torch.aten.native_batch_norm$basic(%arg0: !torch.vtensor<[10,4,3],f32> ) - // CHECK: %[[VAL_6:.*]] = torch_c.from_builtin_tensor %[[VAL_5]] : tensor<10x3x?x4xf32> -> !torch.vtensor<[10,3,?,4],f32> // CHECK: return %[[VAL_6]] : !torch.vtensor<[10,3,?,4],f32> // CHECK: } -func @forward(%arg0: !torch.vtensor<[10,3,8,9,3,4],f32> ) -> !torch.vtensor<[10,3,?,4],f32> { +func.func @forward(%arg0: !torch.vtensor<[10,3,8,9,3,4],f32> ) -> !torch.vtensor<[10,3,?,4],f32> { %int4 = torch.constant.int 4 %int2 = torch.constant.int 2 %0 = torch.aten.flatten.using_ints %arg0, %int2, %int4 : !torch.vtensor<[10,3,8,9,3,4],f32>, !torch.int, !torch.int -> !torch.vtensor<[10,3,?,4],f32> @@ -547,7 +547,7 @@ func @forward(%arg0: !torch.vtensor<[10,3,8,9,3,4],f32> ) -> !torch.vtensor<[10, // ----- -// CHECK-LABEL: func @forward( +// CHECK-LABEL: func.func @forward( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[5,2,2,3],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[2,2,3],f32>, // CHECK-SAME: %[[VAL_2:.*]]: !torch.vtensor<[2,2,3],f32>) -> !torch.vtensor<[5,2,2,3],f32> { @@ -584,7 +584,7 @@ func @forward(%arg0: !torch.vtensor<[10,3,8,9,3,4],f32> ) -> !torch.vtensor<[10, // CHECK: %[[VAL_33:.*]] = torch_c.from_builtin_tensor %[[VAL_32]] : tensor<5x2x2x3xf32> -> !torch.vtensor<[5,2,2,3],f32> // CHECK: return %[[VAL_33]] : !torch.vtensor<[5,2,2,3],f32> // CHECK: } -func @forward(%arg0: !torch.vtensor<[5,2,2,3],f32> , %arg1: !torch.vtensor<[2,2,3],f32> , %arg2: !torch.vtensor<[2,2,3],f32> ) -> !torch.vtensor<[5,2,2,3],f32> { +func.func @forward(%arg0: !torch.vtensor<[5,2,2,3],f32> , %arg1: !torch.vtensor<[2,2,3],f32> , %arg2: !torch.vtensor<[2,2,3],f32> ) -> !torch.vtensor<[5,2,2,3],f32> { %float5.000000e-01 = torch.constant.float 5.000000e-01 %int3 = torch.constant.int 3 %int2 = torch.constant.int 2 @@ -595,7 +595,7 @@ func @forward(%arg0: !torch.vtensor<[5,2,2,3],f32> , %arg1: !torch.vtensor<[2,2, // ----- -// CHECK-LABEL: func @torch.aten.ne.Tensor$basic( +// CHECK-LABEL: func.func @torch.aten.ne.Tensor$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor @@ -605,14 +605,14 @@ func @forward(%arg0: !torch.vtensor<[5,2,2,3],f32> , %arg1: !torch.vtensor<[2,2, // CHECK: %[[VAL_6:.*]] = torch_c.from_builtin_tensor %[[VAL_5]] : tensor -> !torch.vtensor<[?,?],i1> // CHECK: return %[[VAL_6]] : !torch.vtensor<[?,?],i1> // CHECK: } -func @torch.aten.ne.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { +func.func @torch.aten.ne.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],i1> { %0 = torch.aten.ne.Tensor %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],i1> return %0 : !torch.vtensor<[?,?],i1> } // ----- -// CHECK-LABEL: func @forward( +// CHECK-LABEL: func.func @forward( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[3,4,2],f32>) -> !torch.vtensor<[3,2,4],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[3,4,2],f32> -> tensor<3x4x2xf32> // CHECK: %[[VAL_2:.*]] = torch.constant.int 1 @@ -624,7 +624,7 @@ func @torch.aten.ne.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch // CHECK: %[[VAL_8:.*]] = torch_c.from_builtin_tensor %[[VAL_7]] : tensor<3x2x4xf32> -> !torch.vtensor<[3,2,4],f32> // CHECK: return %[[VAL_8]] : !torch.vtensor<[3,2,4],f32> // CHECK: } -func @forward(%arg0: !torch.vtensor<[3,4,2],f32> ) -> !torch.vtensor<[3,2,4],f32> { +func.func @forward(%arg0: !torch.vtensor<[3,4,2],f32> ) -> !torch.vtensor<[3,2,4],f32> { %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 %int0 = torch.constant.int 0 @@ -635,7 +635,7 @@ func @forward(%arg0: !torch.vtensor<[3,4,2],f32> ) -> !torch.vtensor<[3,2,4],f32 // ----- -// CHECK-LABEL: func @torch.aten.bitwise_and.Tensor$basic( +// CHECK-LABEL: func.func @torch.aten.bitwise_and.Tensor$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],si32>, // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],si32>) -> !torch.vtensor<[?,?],si32> { // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],si32> -> tensor @@ -644,14 +644,14 @@ func @forward(%arg0: !torch.vtensor<[3,4,2],f32> ) -> !torch.vtensor<[3,2,4],f32 // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],si32> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],si32> // CHECK: } -func @torch.aten.bitwise_and.Tensor$basic(%arg0: !torch.vtensor<[?,?],si32>, %arg1: !torch.vtensor<[?,?],si32>) -> !torch.vtensor<[?,?],si32> { +func.func @torch.aten.bitwise_and.Tensor$basic(%arg0: !torch.vtensor<[?,?],si32>, %arg1: !torch.vtensor<[?,?],si32>) -> !torch.vtensor<[?,?],si32> { %0 = torch.aten.bitwise_and.Tensor %arg0, %arg1 : !torch.vtensor<[?,?],si32>, !torch.vtensor<[?,?],si32> -> !torch.vtensor<[?,?],si32> return %0 : !torch.vtensor<[?,?],si32> } // ----- -// CHECK-LABEL: func @torch.aten.log2$basic( +// CHECK-LABEL: func.func @torch.aten.log2$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = "tosa.const"() {value = dense<0.693147182> : tensor<1x1xf32>} : () -> tensor<1x1xf32> @@ -661,14 +661,14 @@ func @torch.aten.bitwise_and.Tensor$basic(%arg0: !torch.vtensor<[?,?],si32>, %ar // CHECK: %[[VAL_6:.*]] = torch_c.from_builtin_tensor %[[VAL_5]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_6]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.log2$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.log2$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.log2 %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.zeros$basic() -> !torch.vtensor<[3,4],f32> { +// CHECK-LABEL: func.func @torch.aten.zeros$basic() -> !torch.vtensor<[3,4],f32> { // CHECK: %[[VAL_0:.*]] = torch.constant.int 4 // CHECK: %[[VAL_1:.*]] = torch.constant.int 3 // CHECK: %[[VAL_2:.*]] = torch.constant.none @@ -678,7 +678,7 @@ func @torch.aten.log2$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor // CHECK: %[[VAL_6:.*]] = torch_c.from_builtin_tensor %[[VAL_5]] : tensor<3x4xf32> -> !torch.vtensor<[3,4],f32> // CHECK: return %[[VAL_6]] : !torch.vtensor<[3,4],f32> // CHECK: } -func @torch.aten.zeros$basic() -> !torch.vtensor<[3,4],f32> { +func.func @torch.aten.zeros$basic() -> !torch.vtensor<[3,4],f32> { %int4 = torch.constant.int 4 %int3 = torch.constant.int 3 %none = torch.constant.none @@ -689,7 +689,7 @@ func @torch.aten.zeros$basic() -> !torch.vtensor<[3,4],f32> { // ----- -// CHECK-LABEL: func @torch.aten.unsqueeze$basic( +// CHECK-LABEL: func.func @torch.aten.unsqueeze$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[4,3],si32>) -> !torch.vtensor<[4,1,3],si32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[4,3],si32> -> tensor<4x3xi32> // CHECK: %[[VAL_2:.*]] = torch.constant.int 1 @@ -698,7 +698,7 @@ func @torch.aten.zeros$basic() -> !torch.vtensor<[3,4],f32> { // CHECK: return %[[VAL_4]] : !torch.vtensor<[4,1,3],si32> // CHECK: } -func @torch.aten.unsqueeze$basic(%arg0: !torch.vtensor<[4,3],si32> ) -> !torch.vtensor<[4,1,3],si32> { +func.func @torch.aten.unsqueeze$basic(%arg0: !torch.vtensor<[4,3],si32> ) -> !torch.vtensor<[4,1,3],si32> { %int1 = torch.constant.int 1 %0 = torch.aten.unsqueeze %arg0, %int1 : !torch.vtensor<[4,3],si32>, !torch.int -> !torch.vtensor<[4,1,3],si32> return %0 : !torch.vtensor<[4,1,3],si32> @@ -706,14 +706,14 @@ func @torch.aten.unsqueeze$basic(%arg0: !torch.vtensor<[4,3],si32> ) -> !torch.v // ----- -// CHECK-LABEL: func @torch.aten.contiguous$basic( +// CHECK-LABEL: func.func @torch.aten.contiguous$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = torch.constant.int 0 // CHECK: %[[VAL_3:.*]] = torch_c.from_builtin_tensor %[[VAL_1]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_3]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.contiguous$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.contiguous$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor<[?,?],f32> { %int0 = torch.constant.int 0 %0 = torch.aten.contiguous %arg0, %int0 : !torch.vtensor<[?,?],f32>, !torch.int -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> @@ -721,7 +721,7 @@ func @torch.aten.contiguous$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.v // ----- -// CHECK-LABEL: func @torch.aten.ones$basic() -> !torch.vtensor<[3,4],f32> { +// CHECK-LABEL: func.func @torch.aten.ones$basic() -> !torch.vtensor<[3,4],f32> { // CHECK: %[[VAL_0:.*]] = torch.constant.int 4 // CHECK: %[[VAL_1:.*]] = torch.constant.int 3 // CHECK: %[[VAL_2:.*]] = torch.constant.none @@ -731,7 +731,7 @@ func @torch.aten.contiguous$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.v // CHECK: %[[VAL_6:.*]] = torch_c.from_builtin_tensor %[[VAL_5]] : tensor<3x4xf32> -> !torch.vtensor<[3,4],f32> // CHECK: return %[[VAL_6]] : !torch.vtensor<[3,4],f32> // CHECK: } -func @torch.aten.ones$basic() -> !torch.vtensor<[3,4],f32> { +func.func @torch.aten.ones$basic() -> !torch.vtensor<[3,4],f32> { %int4 = torch.constant.int 4 %int3 = torch.constant.int 3 %none = torch.constant.none @@ -742,7 +742,7 @@ func @torch.aten.ones$basic() -> !torch.vtensor<[3,4],f32> { // ----- -// CHECK-LABEL: func @torch.aten.dropout$basic( +// CHECK-LABEL: func.func @torch.aten.dropout$basic( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = torch.constant.float 0.000000e+00 @@ -751,7 +751,7 @@ func @torch.aten.ones$basic() -> !torch.vtensor<[3,4],f32> { // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.dropout$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.dropout$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor<[?,?],f32> { %float0.000000e00 = torch.constant.float 0.000000e+00 %false = torch.constant.bool false %0 = torch.aten.dropout %arg0, %float0.000000e00, %false : !torch.vtensor<[?,?],f32>, !torch.float, !torch.bool -> !torch.vtensor<[?,?],f32> diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/free-functions.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/free-functions.mlir index 97e48d9bc..51f379bcb 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/free-functions.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/free-functions.mlir @@ -4,31 +4,31 @@ torch.class_type @c { torch.attr "float" : !torch.float torch.method "calls_free_function", @calls_free_function } -// CHECK-LABEL: func private +// CHECK-LABEL: func.func private // CHECK-SAME: @free_function$[[$MONOMORPHIZE_TAG0:.*]]( // CHECK-SAME: %[[F:.*]]: !torch.float) -> !torch.float { // CHECK: return %[[F]] : !torch.float // CHECK: } -func private @free_function(%arg0: !torch.float, %arg1: !torch.nn.Module<"c">) -> !torch.float { +func.func private @free_function(%arg0: !torch.float, %arg1: !torch.nn.Module<"c">) -> !torch.float { return %arg0 : !torch.float } -// CHECK-LABEL: func private +// CHECK-LABEL: func.func private // CHECK-SAME: @free_function_no_module_args$[[$MONOMORPHIZE_TAG1:.*]]( // CHECK-SAME: %[[F:.*]]: !torch.float) -> !torch.float { // CHECK: return %[[F]] : !torch.float // CHECK: } -func private @free_function_no_module_args(%arg0: !torch.float) -> !torch.float { +func.func private @free_function_no_module_args(%arg0: !torch.float) -> !torch.float { return %arg0 : !torch.float } -// CHECK-LABEL: func @calls_free_function() -> !torch.float { +// CHECK-LABEL: func.func @calls_free_function() -> !torch.float { // CHECK: %[[F1:.*]] = torch.global_slot.get @float : !torch.float // CHECK: %[[F2:.*]] = call @free_function$[[$MONOMORPHIZE_TAG0]](%[[F1]]) : (!torch.float) -> !torch.float // CHECK: %[[RET:.*]] = call @free_function_no_module_args$[[$MONOMORPHIZE_TAG1]](%[[F2]]) : (!torch.float) -> !torch.float // CHECK: return %[[RET]] : !torch.float // CHECK: } -func private @calls_free_function(%arg0: !torch.nn.Module<"c">) -> !torch.float { +func.func private @calls_free_function(%arg0: !torch.nn.Module<"c">) -> !torch.float { %0 = torch.prim.GetAttr %arg0["float"] : !torch.nn.Module<"c"> -> !torch.float %1 = call @free_function(%0, %arg0) : (!torch.float, !torch.nn.Module<"c">) -> !torch.float %2 = call @free_function_no_module_args(%1) : (!torch.float) -> !torch.float diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/methods.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/methods.mlir index e25d8b545..c27706406 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/methods.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/methods.mlir @@ -7,28 +7,28 @@ torch.class_type @c { torch.method "test_call", @test_call } -// CHECK-LABEL: func @test_get() -> !torch.float { +// CHECK-LABEL: func.func @test_get() -> !torch.float { // CHECK: %[[V:.*]] = torch.global_slot.get @float : !torch.float // CHECK: return %[[V]] : !torch.float -func private @test_get(%arg0: !torch.nn.Module<"c">) -> !torch.float { +func.func private @test_get(%arg0: !torch.nn.Module<"c">) -> !torch.float { %0 = torch.prim.GetAttr %arg0["float"] : !torch.nn.Module<"c"> -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func @test_set( +// CHECK-LABEL: func.func @test_set( // CHECK-SAME: %[[A:.*]]: !torch.float) { // CHECK: torch.global_slot.set @float = %[[A]] : !torch.float // CHECK: return -func private @test_set(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) { +func.func private @test_set(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) { torch.prim.SetAttr %arg0["float"] = %arg1 : !torch.nn.Module<"c">, !torch.float return } -// CHECK-LABEL: func @test_call( +// CHECK-LABEL: func.func @test_call( // CHECK-SAME: %[[A:.*]]: !torch.float) -> !torch.float { // CHECK: %[[V:.*]] = call @test_call(%[[A]]) : (!torch.float) -> !torch.float // CHECK: return %[[V]] : !torch.float -func private @test_call(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) -> !torch.float { +func.func private @test_call(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) -> !torch.float { %0 = call @test_call(%arg0, %arg1) : (!torch.nn.Module<"c">, !torch.float) -> !torch.float return %0 : !torch.float } diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/module-uses-error.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/module-uses-error.mlir index c3a4199f0..4b8bed8db 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/module-uses-error.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/module-uses-error.mlir @@ -4,7 +4,7 @@ torch.class_type @parent { torch.method "module_type_return", @module_type_return } -func private @module_type_return(%arg0: !torch.nn.Module<"parent">) { +func.func private @module_type_return(%arg0: !torch.nn.Module<"parent">) { // expected-error @+1 {{unsupported use of a torch.nn.Module. Expected only method calls or attribute get/set}} torch.prim.ListConstruct %arg0 : (!torch.nn.Module<"parent">) -> !torch.list> return diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/module-uses.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/module-uses.mlir index 4d1a04d02..7fef78678 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/module-uses.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/module-uses.mlir @@ -10,8 +10,8 @@ torch.class_type @parent { torch.method "method_call", @method_call } -// CHECK-LABEL: func @get_attr_returns_module_type() -> !torch.float { -func private @get_attr_returns_module_type(%arg0: !torch.nn.Module<"parent">) -> !torch.float { +// CHECK-LABEL: func.func @get_attr_returns_module_type() -> !torch.float { +func.func private @get_attr_returns_module_type(%arg0: !torch.nn.Module<"parent">) -> !torch.float { %0 = torch.prim.GetAttr %arg0["m"] : !torch.nn.Module<"parent"> -> !torch.nn.Module<"child"> // CHECK-NEXT: %[[V:.*]] = torch.global_slot.get @m.float : !torch.float %1 = torch.prim.GetAttr %0["float"] : !torch.nn.Module<"child"> -> !torch.float @@ -21,15 +21,15 @@ func private @get_attr_returns_module_type(%arg0: !torch.nn.Module<"parent">) -> return %1 : !torch.float } -// CHECK-LABEL: func @module_type_argument( +// CHECK-LABEL: func.func @module_type_argument( // CHECK-SAME: %[[F:.*]]: !torch.float) -> !torch.none { -func private @module_type_argument(%arg0: !torch.nn.Module<"parent">, %arg1: !torch.nn.Module<"parent">, %arg2: !torch.float, %arg3: !torch.nn.Module<"parent">) -> !torch.none { +func.func private @module_type_argument(%arg0: !torch.nn.Module<"parent">, %arg1: !torch.nn.Module<"parent">, %arg2: !torch.float, %arg3: !torch.nn.Module<"parent">) -> !torch.none { %0 = torch.constant.none return %0 : !torch.none } -// CHECK-LABEL: func @method_call() -> !torch.none { -func private @method_call(%arg0: !torch.nn.Module<"parent">) -> !torch.none { +// CHECK-LABEL: func.func @method_call() -> !torch.none { +func.func private @method_call(%arg0: !torch.nn.Module<"parent">) -> !torch.none { // CHECK-NEXT: %[[C:.*]] = torch.constant.float 4.300000e+01 %c = torch.constant.float 43.0 // CHECK-NEXT: %[[F:.*]] = call @module_type_argument(%[[C]]) : (!torch.float) -> !torch.none diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-error.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-error.mlir index 4e26f93be..b270ea258 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-error.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-error.mlir @@ -1,9 +1,9 @@ // RUN: torch-mlir-opt -torch-globalize-object-graph -verify-diagnostics -split-input-file %s -func private @__torch__.Submodule.forward(%arg0: !torch.nn.Module<"__torch__.Submodule">, %arg1: !torch.nn.Module<"__torch__.Submodule">) { +func.func private @__torch__.Submodule.forward(%arg0: !torch.nn.Module<"__torch__.Submodule">, %arg1: !torch.nn.Module<"__torch__.Submodule">) { return } -func private @__torch__.TestModule.forward(%arg0: !torch.nn.Module<"__torch__.TestModule">) { +func.func private @__torch__.TestModule.forward(%arg0: !torch.nn.Module<"__torch__.TestModule">) { %5 = torch.prim.GetAttr %arg0["s1"] : !torch.nn.Module<"__torch__.TestModule"> -> !torch.nn.Module<"__torch__.Submodule"> %6 = torch.prim.GetAttr %arg0["s2"] : !torch.nn.Module<"__torch__.TestModule"> -> !torch.nn.Module<"__torch__.Submodule"> call @__torch__.Submodule.forward(%5, %6) : (!torch.nn.Module<"__torch__.Submodule">, !torch.nn.Module<"__torch__.Submodule">) -> () diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-multiple-module-args.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-multiple-module-args.mlir index a0a0f0488..f4f84730d 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-multiple-module-args.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances-multiple-module-args.mlir @@ -34,10 +34,10 @@ torch.class_type @__torch__.Submodule { } : !torch.nn.Module<"__torch__.TestModule"> -// CHECK-LABEL: func @forward() { +// CHECK-LABEL: func.func @forward() { // CHECK: call @__torch__.free_function$[[$MONOMORPHIZE_TAG0:.*]]() : () -> () // CHECK: call @__torch__.free_function$[[$MONOMORPHIZE_TAG1:.*]]() : () -> () -func private @__torch__.TestModule.forward(%arg0: !torch.nn.Module<"__torch__.TestModule">) { +func.func private @__torch__.TestModule.forward(%arg0: !torch.nn.Module<"__torch__.TestModule">) { %4 = torch.prim.GetAttr %arg0["s1"] : !torch.nn.Module<"__torch__.TestModule"> -> !torch.nn.Module<"__torch__.Submodule"> %5 = torch.prim.GetAttr %arg0["s2"] : !torch.nn.Module<"__torch__.TestModule"> -> !torch.nn.Module<"__torch__.Submodule"> call @__torch__.free_function(%4, %5) : (!torch.nn.Module<"__torch__.Submodule">, !torch.nn.Module<"__torch__.Submodule">) -> () @@ -48,27 +48,27 @@ func private @__torch__.TestModule.forward(%arg0: !torch.nn.Module<"__torch__.Te } // s1 called first, then s2 -// CHECK-LABEL: func private +// CHECK-LABEL: func.func private // CHECK-SAME @__torch__.free_function$[[$MONOMORPHIZE_TAG0]]() { // CHECK: call @s1.forward() : () -> () // CHECK: call @s2.forward() : () -> () // s2 called first, then s1 -// CHECK-LABEL: func private +// CHECK-LABEL: func.func private // CHECK-SAME: @__torch__.free_function$[[$MONOMORPHIZE_TAG1]]() { // CHECK: call @s2.forward() : () -> () // CHECK: call @s1.forward() : () -> () -func private @__torch__.free_function(%arg0: !torch.nn.Module<"__torch__.Submodule">, %arg1: !torch.nn.Module<"__torch__.Submodule">) { +func.func private @__torch__.free_function(%arg0: !torch.nn.Module<"__torch__.Submodule">, %arg1: !torch.nn.Module<"__torch__.Submodule">) { call @__torch__.Submodule.forward(%arg0) : (!torch.nn.Module<"__torch__.Submodule">) -> () call @__torch__.Submodule.forward(%arg1) : (!torch.nn.Module<"__torch__.Submodule">) -> () return } -// CHECK-LABEL: func private @s2.forward() { +// CHECK-LABEL: func.func private @s2.forward() { // CHECK: return -// CHECK-LABEL: func private @s1.forward() { +// CHECK-LABEL: func.func private @s1.forward() { // CHECK: return -func private @__torch__.Submodule.forward(%arg0: !torch.nn.Module<"__torch__.Submodule">) { +func.func private @__torch__.Submodule.forward(%arg0: !torch.nn.Module<"__torch__.Submodule">) { return } diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances.mlir index 2d021dbf3..149e4e528 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/multiple-instances.mlir @@ -32,31 +32,31 @@ torch.class_type @__torch__.Submodule { } : !torch.nn.Module<"__torch__.TestModule"> -// CHECK-LABEL: func @forward() { +// CHECK-LABEL: func.func @forward() { // CHECK: call @s1.forward() : () -> () // CHECK: call @s2.forward() : () -> () // CHECK: return -func private @__torch__.TestModule.forward(%arg0: !torch.nn.Module<"__torch__.TestModule">) { +func.func private @__torch__.TestModule.forward(%arg0: !torch.nn.Module<"__torch__.TestModule">) { %4 = torch.prim.GetAttr %arg0["s1"] : !torch.nn.Module<"__torch__.TestModule"> -> !torch.nn.Module<"__torch__.Submodule"> %5 = torch.prim.GetAttr %arg0["s2"] : !torch.nn.Module<"__torch__.TestModule"> -> !torch.nn.Module<"__torch__.Submodule"> call @__torch__.Submodule.forward(%4) : (!torch.nn.Module<"__torch__.Submodule">) -> () call @__torch__.Submodule.forward(%5) : (!torch.nn.Module<"__torch__.Submodule">) -> () return } -// CHECK-LABEL: func private @s1.forward() { +// CHECK-LABEL: func.func private @s1.forward() { // CHECK: %[[C3:.*]] = torch.constant.int 3 // CHECK: %[[N:.*]] = torch.global_slot.get @s1.n : !torch.int // CHECK: %[[NEWVAL:.*]] = torch.aten.add.int %[[N]], %[[C3]] : !torch.int, !torch.int -> !torch.int // CHECK: torch.global_slot.set @s1.n = %[[NEWVAL]] : !torch.int // CHECK: return -// CHECK-LABEL: func private @s2.forward() { +// CHECK-LABEL: func.func private @s2.forward() { // CHECK: %[[C3:.*]] = torch.constant.int 3 // CHECK: %[[N:.*]] = torch.global_slot.get @s2.n : !torch.int // CHECK: %[[NEWVAL:.*]] = torch.aten.add.int %[[N]], %[[C3]] : !torch.int, !torch.int -> !torch.int // CHECK: torch.global_slot.set @s2.n = %[[NEWVAL]] : !torch.int // CHECK: return -func private @__torch__.Submodule.forward(%arg0: !torch.nn.Module<"__torch__.Submodule">) { +func.func private @__torch__.Submodule.forward(%arg0: !torch.nn.Module<"__torch__.Submodule">) { %int3 = torch.constant.int 3 %5 = torch.prim.GetAttr %arg0["n"] : !torch.nn.Module<"__torch__.Submodule"> -> !torch.int %6 = torch.aten.add.int %5, %int3 : !torch.int, !torch.int -> !torch.int diff --git a/test/Dialect/Torch/GlobalizeObjectGraph/visibility.mlir b/test/Dialect/Torch/GlobalizeObjectGraph/visibility.mlir index d0b4defef..8ad6e5f48 100644 --- a/test/Dialect/Torch/GlobalizeObjectGraph/visibility.mlir +++ b/test/Dialect/Torch/GlobalizeObjectGraph/visibility.mlir @@ -6,8 +6,8 @@ torch.class_type @c { torch.method private "forward", @method } -// CHECK: func private @forward() { -func private @method(%arg0: !torch.nn.Module<"c">) { +// CHECK: func.func private @forward() { +func.func private @method(%arg0: !torch.nn.Module<"c">) { return } diff --git a/test/Dialect/Torch/adjust-calling-conventions.mlir b/test/Dialect/Torch/adjust-calling-conventions.mlir index b5c4a7fa0..5ca041b3b 100644 --- a/test/Dialect/Torch/adjust-calling-conventions.mlir +++ b/test/Dialect/Torch/adjust-calling-conventions.mlir @@ -1,22 +1,22 @@ // RUN: torch-mlir-opt -torch-adjust-calling-conventions -allow-unregistered-dialect -split-input-file %s | FileCheck %s -// CHECK-LABEL: func @basic( +// CHECK-LABEL: func.func @basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor { // CHECK: %[[ERASED:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.vtensor<[2,3,?],f32> to !torch.vtensor // CHECK: %[[NONVAL_TENSOR:.*]] = torch.copy.to_tensor %[[ERASED]] : !torch.tensor // CHECK: return %[[NONVAL_TENSOR]] : !torch.tensor -func @basic(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[2,3,?],f32>}) -> !torch.tensor { +func.func @basic(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[2,3,?],f32>}) -> !torch.tensor { return %arg0 : !torch.tensor } -// CHECK-LABEL: func @no_type_bound( +// CHECK-LABEL: func.func @no_type_bound( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: return %[[ARG]] : !torch.tensor -func @no_type_bound(%arg0: !torch.tensor) -> !torch.tensor { +func.func @no_type_bound(%arg0: !torch.tensor) -> !torch.tensor { return %arg0 : !torch.tensor } -// CHECK-LABEL: func @call( +// CHECK-LABEL: func.func @call( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor { // CHECK: %[[ARG_ERASED:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.vtensor<[2,3,?],f32> to !torch.vtensor // CHECK: %[[ARG_NONVAL:.*]] = torch.copy.to_tensor %[[ARG_ERASED]] : !torch.tensor @@ -24,31 +24,31 @@ func @no_type_bound(%arg0: !torch.tensor) -> !torch.tensor { // CHECK: %[[CALL_ARG:.*]] = torch.copy.to_vtensor %[[INFO_ADDED]] : !torch.vtensor<[2,3,?],f32> // CHECK: %[[CALL_RES:.*]] = call @call(%[[CALL_ARG]]) : (!torch.vtensor<[2,3,?],f32>) -> !torch.tensor // CHECK: return %[[ARG_NONVAL]] : !torch.tensor -func @call(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[2,3,?],f32>}) -> !torch.tensor { +func.func @call(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[2,3,?],f32>}) -> !torch.tensor { %0 = call @call(%arg0) : (!torch.tensor) -> !torch.tensor return %arg0 : !torch.tensor } -// CHECK-LABEL: func @none_return() { +// CHECK-LABEL: func.func @none_return() { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: return -func @none_return() -> !torch.none { +func.func @none_return() -> !torch.none { %1 = torch.constant.none return %1 : !torch.none } -// CHECK-LABEL: func @none_call_return() { +// CHECK-LABEL: func.func @none_call_return() { // CHECK: call @none_return() : () -> () // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: "test.use"(%[[NONE]]) : (!torch.none) -> () // CHECK: return -func @none_call_return() { +func.func @none_call_return() { %0 = call @none_return() : () -> !torch.none "test.use"(%0) : (!torch.none) -> () return } -// CHECK-LABEL: func @tuple_return( +// CHECK-LABEL: func.func @tuple_return( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?],f32>) -> (!torch.tensor, !torch.tensor) { // CHECK: %[[ARG0_ERASED:.*]] = torch.tensor_static_info_cast %[[ARG0]] : !torch.vtensor<[?],f32> to !torch.vtensor @@ -64,13 +64,13 @@ func @none_call_return() { // CHECK: %[[RET1:.*]] = torch.prim.TupleIndex %[[TUPLE]], %[[CST1]] : // CHECK-SAME: !torch.tuple, !torch.int -> !torch.tensor // CHECK: return %[[RET0]], %[[RET1]] : !torch.tensor, !torch.tensor -func @tuple_return(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[?],f32>}, +func.func @tuple_return(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[?],f32>}, %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[?],f32>}) -> !torch.tuple { %1 = torch.prim.TupleConstruct %arg0, %arg1 : !torch.tensor, !torch.tensor -> !torch.tuple return %1 : !torch.tuple } -// CHECK-LABEL: func @call_tuple_return( +// CHECK-LABEL: func.func @call_tuple_return( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?],f32>) -> (!torch.tensor, !torch.tensor) { // CHECK: %[[ARG0_ERASED:.*]] = torch.tensor_static_info_cast %[[ARG0]] : !torch.vtensor<[?],f32> to !torch.vtensor @@ -92,7 +92,7 @@ func @tuple_return(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[?],f // CHECK: %[[RET1:.*]] = torch.prim.TupleIndex %[[TUPLE]], %[[CST1]] : // CHECK-SAME: !torch.tuple, !torch.int -> !torch.tensor // CHECK: return %[[RET0]], %[[RET1]] : !torch.tensor, !torch.tensor -func @call_tuple_return(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[?],f32>}, +func.func @call_tuple_return(%arg0: !torch.tensor {torch.type_bound = !torch.vtensor<[?],f32>}, %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[?],f32>}) -> !torch.tuple { %0 = call @tuple_return(%arg0, %arg1) : (!torch.tensor, !torch.tensor) -> !torch.tuple return %0 : !torch.tuple diff --git a/test/Dialect/Torch/canonicalize.mlir b/test/Dialect/Torch/canonicalize.mlir index 7bfcb0f52..575aae9d5 100644 --- a/test/Dialect/Torch/canonicalize.mlir +++ b/test/Dialect/Torch/canonicalize.mlir @@ -1,13 +1,13 @@ // RUN: torch-mlir-opt %s -canonicalize | FileCheck %s -// CHECK-LABEL: func @torch.aten.__range_length$fold() -> (!torch.int, !torch.int, !torch.int, !torch.int) { +// CHECK-LABEL: func.func @torch.aten.__range_length$fold() -> (!torch.int, !torch.int, !torch.int, !torch.int) { // CHECK: %[[INT1:.*]] = torch.constant.int 1 // CHECK: %[[INT2:.*]] = torch.constant.int 2 // CHECK: %[[INTM1:.*]] = torch.constant.int -1 // CHECK: %[[INT3:.*]] = torch.constant.int 3 // CHECK: %[[NEG_STEP:.*]] = torch.aten.__range_length %[[INT1]], %[[INT3]], %[[INTM1]] : !torch.int, !torch.int, !torch.int -> !torch.int // CHECK: return %[[INT2]], %[[INT2]], %[[INT1]], %[[NEG_STEP]] : !torch.int, !torch.int, !torch.int, !torch.int -func @torch.aten.__range_length$fold() -> (!torch.int, !torch.int, !torch.int, !torch.int) { +func.func @torch.aten.__range_length$fold() -> (!torch.int, !torch.int, !torch.int, !torch.int) { %int3 = torch.constant.int 3 %int4 = torch.constant.int 4 %int2 = torch.constant.int 2 @@ -21,95 +21,95 @@ func @torch.aten.__range_length$fold() -> (!torch.int, !torch.int, !torch.int, ! return %0, %1, %2, %3 : !torch.int, !torch.int, !torch.int, !torch.int } -// CHECK-LABEL: func @torch.aten.__is__ +// CHECK-LABEL: func.func @torch.aten.__is__ // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.__is__(%arg0: !torch.list, %arg1: !torch.none) -> !torch.bool { +func.func @torch.aten.__is__(%arg0: !torch.list, %arg1: !torch.none) -> !torch.bool { %0 = torch.aten.__is__ %arg0, %arg1 : !torch.list, !torch.none -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.__is__$derefine_is_none +// CHECK-LABEL: func.func @torch.aten.__is__$derefine_is_none // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.__is__$derefine_is_none(%arg0: !torch.list, %arg1: !torch.none) -> !torch.bool { +func.func @torch.aten.__is__$derefine_is_none(%arg0: !torch.list, %arg1: !torch.none) -> !torch.bool { %0 = torch.derefine %arg0 : !torch.list to !torch.optional> %1 = torch.aten.__is__ %0, %arg1 : !torch.optional>, !torch.none -> !torch.bool return %1 : !torch.bool } -// CHECK-LABEL: func @torch.aten.__is__$none_is_none +// CHECK-LABEL: func.func @torch.aten.__is__$none_is_none // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.__is__$none_is_none(%arg0: !torch.none, %arg1: !torch.none) -> !torch.bool { +func.func @torch.aten.__is__$none_is_none(%arg0: !torch.none, %arg1: !torch.none) -> !torch.bool { %0 = torch.aten.__is__ %arg0, %arg1 : !torch.none, !torch.none -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.__is__$is_none$derefine( +// CHECK-LABEL: func.func @torch.aten.__is__$is_none$derefine( // CHECK-SAME: %{{.*}}: !torch.vtensor) -> !torch.bool { // CHECK: %[[RESULT:.*]] = torch.constant.bool false // CHECK: return %[[RESULT]] : !torch.bool -func @torch.aten.__is__$is_none$derefine(%arg0: !torch.vtensor) -> !torch.bool { +func.func @torch.aten.__is__$is_none$derefine(%arg0: !torch.vtensor) -> !torch.bool { %none = torch.constant.none %0 = torch.derefine %arg0 : !torch.vtensor to !torch.optional %1 = torch.aten.__is__ %0, %none : !torch.optional, !torch.none -> !torch.bool return %1 : !torch.bool } -// CHECK-LABEL: func @torch.aten.__isnot__ +// CHECK-LABEL: func.func @torch.aten.__isnot__ // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.__isnot__(%arg0: !torch.list, %arg1: !torch.none) -> !torch.bool { +func.func @torch.aten.__isnot__(%arg0: !torch.list, %arg1: !torch.none) -> !torch.bool { %0 = torch.aten.__isnot__ %arg0, %arg1 : !torch.list, !torch.none -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.__isnot__$none_isnot_none +// CHECK-LABEL: func.func @torch.aten.__isnot__$none_isnot_none // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.__isnot__$none_isnot_none(%arg0: !torch.none, %arg1: !torch.none) -> !torch.bool { +func.func @torch.aten.__isnot__$none_isnot_none(%arg0: !torch.none, %arg1: !torch.none) -> !torch.bool { %0 = torch.aten.__isnot__ %arg0, %arg1 : !torch.none, !torch.none -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ne.bool() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.ne.bool() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.ne.bool() -> !torch.bool { +func.func @torch.aten.ne.bool() -> !torch.bool { %a = torch.constant.bool true %b = torch.constant.bool false %0 = torch.aten.ne.bool %a, %b: !torch.bool, !torch.bool -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ne.bool$same_operand( +// CHECK-LABEL: func.func @torch.aten.ne.bool$same_operand( // CHECK-SAME: %[[ARG0:.*]]: !torch.bool) -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.ne.bool$same_operand(%arg0: !torch.bool) -> !torch.bool { +func.func @torch.aten.ne.bool$same_operand(%arg0: !torch.bool) -> !torch.bool { %0 = torch.aten.ne.bool %arg0, %arg0: !torch.bool, !torch.bool -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ne.bool$different_operand( +// CHECK-LABEL: func.func @torch.aten.ne.bool$different_operand( // CHECK-SAME: %[[ARG0:.*]]: !torch.bool) -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: %[[RET:.*]] = torch.aten.ne.bool %[[ARG0]], %[[FALSE]] : !torch.bool, !torch.bool -> !torch.bool // CHECK: return %[[RET]] : !torch.bool -func @torch.aten.ne.bool$different_operand(%a: !torch.bool) -> !torch.bool { +func.func @torch.aten.ne.bool$different_operand(%a: !torch.bool) -> !torch.bool { %b = torch.constant.bool false %0 = torch.aten.ne.bool %a, %b: !torch.bool, !torch.bool -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.size$canonicalize_to_list( +// CHECK-LABEL: func.func @torch.aten.size$canonicalize_to_list( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3],f32>) -> !torch.list { // CHECK: %[[C2:.*]] = torch.constant.int 2 // CHECK: %[[C3:.*]] = torch.constant.int 3 // CHECK: %[[LIST:.*]] = torch.prim.ListConstruct %[[C2]], %[[C3]] : (!torch.int, !torch.int) -> !torch.list // CHECK: return %[[LIST]] : !torch.list -func @torch.aten.size$canonicalize_to_list(%arg0: !torch.vtensor<[2,3],f32>) -> !torch.list { +func.func @torch.aten.size$canonicalize_to_list(%arg0: !torch.vtensor<[2,3],f32>) -> !torch.list { %0 = torch.aten.size %arg0 : !torch.vtensor<[2,3],f32> -> !torch.list return %0 : !torch.list } @@ -117,77 +117,77 @@ func @torch.aten.size$canonicalize_to_list(%arg0: !torch.vtensor<[2,3],f32>) -> // One size unknown, so cannot canonicalize. // TODO: For unknown sizes, insert the equivalent of a "dim" op. // Then this will only require static rank. -// CHECK-LABEL: func @torch.aten.size$unknown_size( +// CHECK-LABEL: func.func @torch.aten.size$unknown_size( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,3],f32>) -> !torch.list { // CHECK: %[[SIZE:.*]] = torch.aten.size %[[ARG]] : !torch.vtensor<[?,3],f32> -> !torch.list -func @torch.aten.size$unknown_size(%arg0: !torch.vtensor<[?,3],f32>) -> !torch.list { +func.func @torch.aten.size$unknown_size(%arg0: !torch.vtensor<[?,3],f32>) -> !torch.list { %0 = torch.aten.size %arg0 : !torch.vtensor<[?,3],f32> -> !torch.list return %0 : !torch.list } -// CHECK-LABEL: func @torch.aten.ne.int$same_operand( +// CHECK-LABEL: func.func @torch.aten.ne.int$same_operand( // CHECK-SAME: %{{.*}}: !torch.int) -> !torch.bool { // CHECK-NEXT: %[[FALSE:.*]] = torch.constant.bool false // CHECK-NEXT: return %[[FALSE]] : !torch.bool -func @torch.aten.ne.int$same_operand(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.ne.int$same_operand(%arg0: !torch.int) -> !torch.bool { %0 = torch.aten.ne.int %arg0, %arg0 : !torch.int, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ne.int$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.ne.int$same_value() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.ne.int$same_value() -> !torch.bool { +func.func @torch.aten.ne.int$same_value() -> !torch.bool { %int4 = torch.constant.int 4 %int4_0 = torch.constant.int 4 %2 = torch.aten.ne.int %int4, %int4_0 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ne.int$different_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.ne.int$different_value() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.ne.int$different_value() -> !torch.bool { +func.func @torch.aten.ne.int$different_value() -> !torch.bool { %int4 = torch.constant.int 4 %int5 = torch.constant.int 5 %2 = torch.aten.ne.int %int4, %int5 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int$different_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.eq.int$different_value() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.eq.int$different_value() -> !torch.bool { +func.func @torch.aten.eq.int$different_value() -> !torch.bool { %int4 = torch.constant.int 4 %int5 = torch.constant.int 5 %2 = torch.aten.eq.int %int4, %int5 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int$same_operand( +// CHECK-LABEL: func.func @torch.aten.eq.int$same_operand( // CHECK-SAME: %{{.*}}: !torch.int) -> !torch.bool { // CHECK-NEXT: %[[F:.*]] = torch.constant.bool true // CHECK-NEXT: return %[[F]] : !torch.bool -func @torch.aten.eq.int$same_operand(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.eq.int$same_operand(%arg0: !torch.int) -> !torch.bool { %0 = torch.aten.eq.int %arg0, %arg0 : !torch.int, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.eq.int$same_value() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.eq.int$same_value() -> !torch.bool { +func.func @torch.aten.eq.int$same_value() -> !torch.bool { %int4 = torch.constant.int 4 %int4_0 = torch.constant.int 4 %2 = torch.aten.eq.int %int4, %int4_0 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int$of_size.int( +// CHECK-LABEL: func.func @torch.aten.eq.int$of_size.int( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.eq.int$of_size.int(%arg0: !torch.tensor) -> !torch.bool { +func.func @torch.aten.eq.int$of_size.int(%arg0: !torch.tensor) -> !torch.bool { %int-1 = torch.constant.int -1 %int0 = torch.constant.int 0 %0 = torch.aten.size.int %arg0, %int0 : !torch.tensor, !torch.int -> !torch.int @@ -195,11 +195,11 @@ func @torch.aten.eq.int$of_size.int(%arg0: !torch.tensor) -> !torch.bool { return %1 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int$of_size.int_lhs_constant( +// CHECK-LABEL: func.func @torch.aten.eq.int$of_size.int_lhs_constant( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.eq.int$of_size.int_lhs_constant(%arg0: !torch.tensor) -> !torch.bool { +func.func @torch.aten.eq.int$of_size.int_lhs_constant(%arg0: !torch.tensor) -> !torch.bool { %int-1 = torch.constant.int -1 %int0 = torch.constant.int 0 %0 = torch.aten.size.int %arg0, %int0 : !torch.tensor, !torch.int -> !torch.int @@ -207,167 +207,167 @@ func @torch.aten.eq.int$of_size.int_lhs_constant(%arg0: !torch.tensor) -> !torch return %1 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int$no_change_minus1( +// CHECK-LABEL: func.func @torch.aten.eq.int$no_change_minus1( // CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.bool { // CHECK: %[[CM1:.*]] = torch.constant.int -1 // CHECK: %[[RESULT:.*]] = torch.aten.eq.int %[[CM1]], %[[ARG]] : !torch.int, !torch.int -> !torch.bool // CHECK: return %[[RESULT]] : !torch.bool -func @torch.aten.eq.int$no_change_minus1(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.eq.int$no_change_minus1(%arg0: !torch.int) -> !torch.bool { %int-1 = torch.constant.int -1 %1 = torch.aten.eq.int %int-1, %arg0 : !torch.int, !torch.int -> !torch.bool return %1 : !torch.bool } -// CHECK-LABEL: func @torch.aten.lt.int$evaluate_to_true() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.lt.int$evaluate_to_true() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.lt.int$evaluate_to_true() -> !torch.bool { +func.func @torch.aten.lt.int$evaluate_to_true() -> !torch.bool { %int4 = torch.constant.int 4 %int5 = torch.constant.int 5 %2 = torch.aten.lt.int %int4, %int5 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.lt.int$same_operand( +// CHECK-LABEL: func.func @torch.aten.lt.int$same_operand( // CHECK-SAME: %{{.*}}: !torch.int) -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.lt.int$same_operand(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.lt.int$same_operand(%arg0: !torch.int) -> !torch.bool { %2 = torch.aten.lt.int %arg0, %arg0: !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.lt.int$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.lt.int$same_value() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.lt.int$same_value() -> !torch.bool { +func.func @torch.aten.lt.int$same_value() -> !torch.bool { %int4 = torch.constant.int 4 %int4_0 = torch.constant.int 4 %2 = torch.aten.lt.int %int4, %int4_0 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.le.int$evaluate_to_true() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.le.int$evaluate_to_true() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.le.int$evaluate_to_true() -> !torch.bool { +func.func @torch.aten.le.int$evaluate_to_true() -> !torch.bool { %int4 = torch.constant.int 4 %int5 = torch.constant.int 5 %2 = torch.aten.le.int %int4, %int5 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.le.int$same_operand( +// CHECK-LABEL: func.func @torch.aten.le.int$same_operand( // CHECK-SAME: %{{.*}}: !torch.int) -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.le.int$same_operand(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.le.int$same_operand(%arg0: !torch.int) -> !torch.bool { %2 = torch.aten.le.int %arg0, %arg0: !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.le.int$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.le.int$same_value() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.le.int$same_value() -> !torch.bool { +func.func @torch.aten.le.int$same_value() -> !torch.bool { %int4 = torch.constant.int 4 %int4_0 = torch.constant.int 4 %2 = torch.aten.le.int %int4, %int4_0 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.gt.int$evaluate_to_true() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.gt.int$evaluate_to_true() -> !torch.bool { // CHECK-NEXT: %[[T:.*]] = torch.constant.bool true // CHECK-NEXT: return %[[T]] : !torch.bool -func @torch.aten.gt.int$evaluate_to_true() -> !torch.bool { +func.func @torch.aten.gt.int$evaluate_to_true() -> !torch.bool { %int2 = torch.constant.int 2 %int4 = torch.constant.int 4 %0 = torch.aten.gt.int %int4, %int2 : !torch.int, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.gt.int$evaluate_to_false() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.gt.int$evaluate_to_false() -> !torch.bool { // CHECK-NEXT: %[[T:.*]] = torch.constant.bool false // CHECK-NEXT: return %[[T]] : !torch.bool -func @torch.aten.gt.int$evaluate_to_false() -> !torch.bool { +func.func @torch.aten.gt.int$evaluate_to_false() -> !torch.bool { %int2 = torch.constant.int 2 %int4 = torch.constant.int 4 %0 = torch.aten.gt.int %int2, %int4 : !torch.int, !torch.int -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ge.int$evaluate_to_false() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.ge.int$evaluate_to_false() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.ge.int$evaluate_to_false() -> !torch.bool { +func.func @torch.aten.ge.int$evaluate_to_false() -> !torch.bool { %int4 = torch.constant.int 4 %int5 = torch.constant.int 5 %2 = torch.aten.ge.int %int4, %int5 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ge.int$same_operand( +// CHECK-LABEL: func.func @torch.aten.ge.int$same_operand( // CHECK-SAME: %{{.*}}: !torch.int) -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.ge.int$same_operand(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.ge.int$same_operand(%arg0: !torch.int) -> !torch.bool { %2 = torch.aten.ge.int %arg0, %arg0: !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ge.int$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.ge.int$same_value() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.ge.int$same_value() -> !torch.bool { +func.func @torch.aten.ge.int$same_value() -> !torch.bool { %int4 = torch.constant.int 4 %int4_0 = torch.constant.int 4 %2 = torch.aten.ge.int %int4, %int4_0 : !torch.int, !torch.int -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.lt.float$evaluate_to_true() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.lt.float$evaluate_to_true() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.lt.float$evaluate_to_true() -> !torch.bool { +func.func @torch.aten.lt.float$evaluate_to_true() -> !torch.bool { %float4 = torch.constant.float 4.0 %float5 = torch.constant.float 5.0 %2 = torch.aten.lt.float %float4, %float5 : !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.lt.float$same_operand( +// CHECK-LABEL: func.func @torch.aten.lt.float$same_operand( // CHECK-SAME: %{{.*}}: !torch.float) -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.lt.float$same_operand(%arg0: !torch.float) -> !torch.bool { +func.func @torch.aten.lt.float$same_operand(%arg0: !torch.float) -> !torch.bool { %2 = torch.aten.lt.float %arg0, %arg0: !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.lt.float$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.lt.float$same_value() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.lt.float$same_value() -> !torch.bool { +func.func @torch.aten.lt.float$same_value() -> !torch.bool { %float4 = torch.constant.float 4.0 %float4_0 = torch.constant.float 4.0 %2 = torch.aten.lt.float %float4, %float4_0 : !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.gt.float$evaluate_to_true() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.gt.float$evaluate_to_true() -> !torch.bool { // CHECK-NEXT: %[[T:.*]] = torch.constant.bool true // CHECK-NEXT: return %[[T]] : !torch.bool -func @torch.aten.gt.float$evaluate_to_true() -> !torch.bool { +func.func @torch.aten.gt.float$evaluate_to_true() -> !torch.bool { %float2 = torch.constant.float 2.0 %float4 = torch.constant.float 4.0 %0 = torch.aten.gt.float %float4, %float2 : !torch.float, !torch.float -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.gt.float$evaluate_to_false() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.gt.float$evaluate_to_false() -> !torch.bool { // CHECK-NEXT: %[[T:.*]] = torch.constant.bool false // CHECK-NEXT: return %[[T]] : !torch.bool -func @torch.aten.gt.float$evaluate_to_false() -> !torch.bool { +func.func @torch.aten.gt.float$evaluate_to_false() -> !torch.bool { %float2 = torch.constant.float 2.0 %float4 = torch.constant.float 4.0 %0 = torch.aten.gt.float %float2, %float4 : !torch.float, !torch.float -> !torch.bool @@ -375,7 +375,7 @@ func @torch.aten.gt.float$evaluate_to_false() -> !torch.bool { } -// CHECK-LABEL: func @comparison_with_torch.aten.size.int( +// CHECK-LABEL: func.func @comparison_with_torch.aten.size.int( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,2],unk>) -> (!torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool) { // CHECK: %[[SIZE:.*]] = torch.aten.size.int %[[ARG0]], %int0 : !torch.vtensor<[?,2],unk>, !torch.int -> !torch.int // CHECK: %[[GE_0_LHS:.*]] = torch.aten.ge.int %int0, %[[SIZE]] : !torch.int, !torch.int -> !torch.bool @@ -387,7 +387,7 @@ func @torch.aten.gt.float$evaluate_to_false() -> !torch.bool { // CHECK: %[[EQ_0_RHS:.*]] = torch.aten.eq.int %[[SIZE]], %int0 : !torch.int, !torch.int -> !torch.bool // CHECK: %[[NE_0_RHS:.*]] = torch.aten.ne.int %[[SIZE]], %int0 : !torch.int, !torch.int -> !torch.bool // CHECK: return %true, %true, %false, %false, %[[GE_0_LHS]], %[[LT_0_LHS]], %[[EQ_0_LHS]], %[[NE_0_LHS]], %[[GT_0_RHS]], %[[LE_0_RHS]], %[[EQ_0_RHS]], %[[NE_0_RHS]] : !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool -func @comparison_with_torch.aten.size.int(%arg0: !torch.vtensor<[?,2],unk>) -> (!torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool) { +func.func @comparison_with_torch.aten.size.int(%arg0: !torch.vtensor<[?,2],unk>) -> (!torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool, !torch.bool) { %int0 = torch.constant.int 0 %0 = torch.aten.size.int %arg0, %int0 : !torch.vtensor<[?,2],unk>, !torch.int -> !torch.int // Cases we can fold. @@ -408,86 +408,86 @@ func @comparison_with_torch.aten.size.int(%arg0: !torch.vtensor<[?,2],unk>) -> ( } -// CHECK-LABEL: func @torch.aten.eq.float$different_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.eq.float$different_value() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.eq.float$different_value() -> !torch.bool { +func.func @torch.aten.eq.float$different_value() -> !torch.bool { %float4 = torch.constant.float 4.0 %float5 = torch.constant.float 5.0 %2 = torch.aten.eq.float %float4, %float5 : !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.float$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.eq.float$same_value() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.eq.float$same_value() -> !torch.bool { +func.func @torch.aten.eq.float$same_value() -> !torch.bool { %float4 = torch.constant.float 4.0 %float4_0 = torch.constant.float 4.0 %2 = torch.aten.eq.float %float4, %float4_0 : !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.str$different_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.eq.str$different_value() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.eq.str$different_value() -> !torch.bool { +func.func @torch.aten.eq.str$different_value() -> !torch.bool { %str4 = torch.constant.str "4" %str5 = torch.constant.str "5" %2 = torch.aten.eq.str %str4, %str5 : !torch.str, !torch.str -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.str$same_operand( +// CHECK-LABEL: func.func @torch.aten.eq.str$same_operand( // CHECK-SAME: %{{.*}}: !torch.str) -> !torch.bool { // CHECK-NEXT: %[[F:.*]] = torch.constant.bool true // CHECK-NEXT: return %[[F]] : !torch.bool -func @torch.aten.eq.str$same_operand(%arg0: !torch.str) -> !torch.bool { +func.func @torch.aten.eq.str$same_operand(%arg0: !torch.str) -> !torch.bool { %0 = torch.aten.eq.str %arg0, %arg0 : !torch.str, !torch.str -> !torch.bool return %0 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.str$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.eq.str$same_value() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.eq.str$same_value() -> !torch.bool { +func.func @torch.aten.eq.str$same_value() -> !torch.bool { %str4 = torch.constant.str "4" %str4_0 = torch.constant.str "4" %2 = torch.aten.eq.str %str4, %str4_0 : !torch.str, !torch.str -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.__not__ +// CHECK-LABEL: func.func @torch.aten.__not__ // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.__not__() -> !torch.bool { +func.func @torch.aten.__not__() -> !torch.bool { %false = torch.constant.bool false %ret = torch.aten.__not__ %false : !torch.bool -> !torch.bool return %ret: !torch.bool } -// CHECK-LABEL: func @torch.prim.max.int$identity( +// CHECK-LABEL: func.func @torch.prim.max.int$identity( // CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.int { // CHECK: return %[[ARG]] : !torch.int -func @torch.prim.max.int$identity(%arg0: !torch.int) -> !torch.int { +func.func @torch.prim.max.int$identity(%arg0: !torch.int) -> !torch.int { %0 = torch.prim.max.int %arg0, %arg0 : !torch.int, !torch.int -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.prim.max.int$constant() -> !torch.int { +// CHECK-LABEL: func.func @torch.prim.max.int$constant() -> !torch.int { // CHECK: %[[INT3:.*]] = torch.constant.int 3 // CHECK: return %[[INT3]] : !torch.int -func @torch.prim.max.int$constant() -> !torch.int { +func.func @torch.prim.max.int$constant() -> !torch.int { %int-1 = torch.constant.int -1 %int3 = torch.constant.int 3 %0 = torch.prim.max.int %int-1, %int3 : !torch.int, !torch.int -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.prim.min.self_int$basic() -> !torch.int { +// CHECK-LABEL: func.func @torch.prim.min.self_int$basic() -> !torch.int { // CHECK: %[[M1:.*]] = torch.constant.int -1 // CHECK: return %[[M1]] : !torch.int -func @torch.prim.min.self_int$basic() -> !torch.int { +func.func @torch.prim.min.self_int$basic() -> !torch.int { %int-1 = torch.constant.int -1 %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 @@ -496,9 +496,9 @@ func @torch.prim.min.self_int$basic() -> !torch.int { return %1 : !torch.int } -// CHECK-LABEL: func @torch.prim.min.self_int$nofold$dynamic( +// CHECK-LABEL: func.func @torch.prim.min.self_int$nofold$dynamic( // CHECK: torch.prim.min.self_int -func @torch.prim.min.self_int$nofold$dynamic(%arg0: !torch.int) -> !torch.int { +func.func @torch.prim.min.self_int$nofold$dynamic(%arg0: !torch.int) -> !torch.int { %int-1 = torch.constant.int -1 %int0 = torch.constant.int 0 %0 = torch.prim.ListConstruct %int-1, %int0, %arg0: (!torch.int, !torch.int, !torch.int) -> !torch.list @@ -506,37 +506,37 @@ func @torch.prim.min.self_int$nofold$dynamic(%arg0: !torch.int) -> !torch.int { return %1 : !torch.int } -// CHECK-LABEL: func @torch.aten.len.t$of_size( +// CHECK-LABEL: func.func @torch.aten.len.t$of_size( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<*,f32>) -> !torch.int { // CHECK: %[[DIM:.*]] = torch.aten.dim %[[ARG]] : !torch.vtensor<*,f32> -> !torch.int // CHECK: return %[[DIM]] : !torch.int -func @torch.aten.len.t$of_size(%arg0: !torch.vtensor<*,f32>) -> !torch.int { +func.func @torch.aten.len.t$of_size(%arg0: !torch.vtensor<*,f32>) -> !torch.int { %0 = torch.aten.size %arg0 : !torch.vtensor<*,f32> -> !torch.list %1 = torch.aten.len.t %0 : !torch.list -> !torch.int return %1 : !torch.int } -// CHECK-LABEL: func @torch.aten.dim$with_shape( +// CHECK-LABEL: func.func @torch.aten.dim$with_shape( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.int { // CHECK: %[[DIM:.*]] = torch.constant.int 3 // CHECK: return %[[DIM]] : !torch.int -func @torch.aten.dim$with_shape(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.int { +func.func @torch.aten.dim$with_shape(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.int { %0 = torch.aten.dim %arg0 : !torch.vtensor<[?,?,?],f32> -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.aten.len.t$of_build_list( +// CHECK-LABEL: func.func @torch.aten.len.t$of_build_list( // CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.int { // CHECK: %[[LEN:.*]] = torch.constant.int 4 // CHECK: return %[[LEN]] : !torch.int -func @torch.aten.len.t$of_build_list(%arg0: !torch.int) -> !torch.int { +func.func @torch.aten.len.t$of_build_list(%arg0: !torch.int) -> !torch.int { %0 = torch.prim.ListConstruct %arg0, %arg0, %arg0, %arg0 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list %1 = torch.aten.len.t %0 : !torch.list -> !torch.int return %1 : !torch.int } -// CHECK-LABEL: func @torch.aten.len.t$no_fold_list_mutated() -func @torch.aten.len.t$no_fold_list_mutated() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.len.t$no_fold_list_mutated() +func.func @torch.aten.len.t$no_fold_list_mutated() -> !torch.int { %int4 = torch.constant.int 4 %0 = torch.prim.ListConstruct : () -> !torch.list %1 = torch.aten.append.t %0, %int4 : !torch.list, !torch.int -> !torch.list @@ -545,10 +545,10 @@ func @torch.aten.len.t$no_fold_list_mutated() -> !torch.int { return %2 : !torch.int } -// CHECK-LABEL: func @torch.aten.__getitem__.t( +// CHECK-LABEL: func.func @torch.aten.__getitem__.t( // CHECK: %[[C5:.*]] = torch.constant.int 5 // CHECK: return %[[C5]] : !torch.int -func @torch.aten.__getitem__.t() -> !torch.int { +func.func @torch.aten.__getitem__.t() -> !torch.int { %int4 = torch.constant.int 4 %int5 = torch.constant.int 5 %int1 = torch.constant.int 1 @@ -558,13 +558,13 @@ func @torch.aten.__getitem__.t() -> !torch.int { } // Not canonicalized because of passed in index -// CHECK-LABEL: func @torch.aten.__getitem__.t$no_change_test0( +// CHECK-LABEL: func.func @torch.aten.__getitem__.t$no_change_test0( // CHECK: %[[C4:.*]] = torch.constant.int 4 // CHECK: %[[C5:.*]] = torch.constant.int 5 // CHECK: %[[LIST:.*]] = torch.prim.ListConstruct %[[C4]], %[[C5]] : (!torch.int, !torch.int) -> !torch.list // CHECK: %[[ITEM:.*]] = torch.aten.__getitem__.t %[[LIST]], %arg0 : !torch.list, !torch.int -> !torch.int // CHECK: return %[[ITEM]] : !torch.int -func @torch.aten.__getitem__.t$no_change_test0(%arg0: !torch.int) -> !torch.int { +func.func @torch.aten.__getitem__.t$no_change_test0(%arg0: !torch.int) -> !torch.int { %int5 = torch.constant.int 5 %int4 = torch.constant.int 4 %0 = torch.prim.ListConstruct %int4, %int5 : (!torch.int, !torch.int) -> !torch.list @@ -573,31 +573,31 @@ func @torch.aten.__getitem__.t$no_change_test0(%arg0: !torch.int) -> !torch.int } // Not canonicalized because of passed in list -// CHECK-LABEL: func @torch.aten.__getitem__.t$no_change_test1( +// CHECK-LABEL: func.func @torch.aten.__getitem__.t$no_change_test1( // CHECK: %[[C5:.*]] = torch.constant.int 5 // CHECK: %[[ITEM:.*]] = torch.aten.__getitem__.t %arg0, %[[C5]] : !torch.list, !torch.int -> !torch.int // CHECK: return %[[ITEM]] : !torch.int -func @torch.aten.__getitem__.t$no_change_test1(%arg0: !torch.list) -> !torch.int { +func.func @torch.aten.__getitem__.t$no_change_test1(%arg0: !torch.list) -> !torch.int { %int5 = torch.constant.int 5 %0 = torch.aten.__getitem__.t %arg0, %int5 : !torch.list, !torch.int -> !torch.int return %0 : !torch.int } -// CHECK-LABEL: func @torch.aten.__getitem__.t$getitem_of_size( +// CHECK-LABEL: func.func @torch.aten.__getitem__.t$getitem_of_size( // CHECK-SAME: %[[TENSOR:.*]]: !torch.tensor, // CHECK-SAME: %[[INDEX:.*]]: !torch.int) -> !torch.int { // CHECK: %[[RESULT:.*]] = torch.aten.size.int %[[TENSOR]], %[[INDEX]] : !torch.tensor, !torch.int -> !torch.int // CHECK: return %[[RESULT]] : !torch.int -func @torch.aten.__getitem__.t$getitem_of_size(%arg0: !torch.tensor, %arg1: !torch.int) -> !torch.int { +func.func @torch.aten.__getitem__.t$getitem_of_size(%arg0: !torch.tensor, %arg1: !torch.int) -> !torch.int { %0 = torch.aten.size %arg0 : !torch.tensor -> !torch.list %1 = torch.aten.__getitem__.t %0, %arg1 : !torch.list, !torch.int -> !torch.int return %1 : !torch.int } -// CHECK-LABEL: func @torch.aten.__getitem__.t$negative_index() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.__getitem__.t$negative_index() -> !torch.int { // CHECK: %[[INT8:.*]] = torch.constant.int 8 // CHECK: return %[[INT8]] : !torch.int -func @torch.aten.__getitem__.t$negative_index() -> !torch.int { +func.func @torch.aten.__getitem__.t$negative_index() -> !torch.int { %int7 = torch.constant.int 7 %int8 = torch.constant.int 8 %int-1 = torch.constant.int -1 @@ -606,8 +606,8 @@ func @torch.aten.__getitem__.t$negative_index() -> !torch.int { return %1 : !torch.int } -// CHECK-LABEL: func @torch.aten.__getitem__.t$invalid_index() -> !torch.int { -func @torch.aten.__getitem__.t$invalid_index() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.__getitem__.t$invalid_index() -> !torch.int { +func.func @torch.aten.__getitem__.t$invalid_index() -> !torch.int { %int7 = torch.constant.int 7 %int8 = torch.constant.int 8 %int-1 = torch.constant.int -100 @@ -617,28 +617,28 @@ func @torch.aten.__getitem__.t$invalid_index() -> !torch.int { return %1 : !torch.int } -// CHECK-LABEL: func @torch.aten.eq.int_list$fold$literals_of_different_sizes +// CHECK-LABEL: func.func @torch.aten.eq.int_list$fold$literals_of_different_sizes // CHECK: %[[RET:.*]] = torch.constant.bool false // CHECK: return %[[RET]] : !torch.bool -func @torch.aten.eq.int_list$fold$literals_of_different_sizes(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.eq.int_list$fold$literals_of_different_sizes(%arg0: !torch.int) -> !torch.bool { %0 = torch.prim.ListConstruct : () -> !torch.list %1 = torch.prim.ListConstruct %arg0 : (!torch.int) -> !torch.list %2 = torch.aten.eq.int_list %0, %1 : !torch.list, !torch.list -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int_list$fold$same_literal +// CHECK-LABEL: func.func @torch.aten.eq.int_list$fold$same_literal // CHECK: %[[RET:.*]] = torch.constant.bool true // CHECK: return %[[RET]] : !torch.bool -func @torch.aten.eq.int_list$fold$same_literal(%arg0: !torch.int) -> !torch.bool { +func.func @torch.aten.eq.int_list$fold$same_literal(%arg0: !torch.int) -> !torch.bool { %0 = torch.prim.ListConstruct %arg0 : (!torch.int) -> !torch.list %1 = torch.prim.ListConstruct %arg0 : (!torch.int) -> !torch.list %2 = torch.aten.eq.int_list %0, %1 : !torch.list, !torch.list -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.eq.int_list$no_fold$different_literals( -func @torch.aten.eq.int_list$no_fold$different_literals(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.eq.int_list$no_fold$different_literals( +func.func @torch.aten.eq.int_list$no_fold$different_literals(%arg0: !torch.int, %arg1: !torch.int) -> !torch.bool { %0 = torch.prim.ListConstruct %arg0 : (!torch.int) -> !torch.list %1 = torch.prim.ListConstruct %arg1 : (!torch.int) -> !torch.list // CHECK: torch.aten.eq.int_list @@ -646,59 +646,59 @@ func @torch.aten.eq.int_list$no_fold$different_literals(%arg0: !torch.int, %arg1 return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.Float.Scalar$constant_fold_int_to_float() -> !torch.float { +// CHECK-LABEL: func.func @torch.aten.Float.Scalar$constant_fold_int_to_float() -> !torch.float { // CHECK: %[[VAL_0:.*]] = torch.constant.float 3.000000e+00 // CHECK: return %[[VAL_0]] : !torch.float -func @torch.aten.Float.Scalar$constant_fold_int_to_float() -> !torch.float { +func.func @torch.aten.Float.Scalar$constant_fold_int_to_float() -> !torch.float { %0 = torch.constant.int 3 %1 = torch.aten.Float.Scalar %0 : !torch.int -> !torch.float return %1 : !torch.float } -// CHECK-LABEL: func @torch.aten.Float.Scalar$identity( +// CHECK-LABEL: func.func @torch.aten.Float.Scalar$identity( // CHECK-SAME: %[[VAL_0:.*]]: !torch.float) -> !torch.float { // CHECK: return %[[VAL_0]] : !torch.float -func @torch.aten.Float.Scalar$identity(%arg0: !torch.float) -> !torch.float { +func.func @torch.aten.Float.Scalar$identity(%arg0: !torch.float) -> !torch.float { %0 = torch.aten.Float.Scalar %arg0 : !torch.float -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func @torch.constant.none$constantlike() -> (!torch.none, !torch.none) { +// CHECK-LABEL: func.func @torch.constant.none$constantlike() -> (!torch.none, !torch.none) { // CHECK: %[[C:.*]] = torch.constant.none // CHECK: return %[[C]], %[[C]] : !torch.none, !torch.none -func @torch.constant.none$constantlike() -> (!torch.none, !torch.none) { +func.func @torch.constant.none$constantlike() -> (!torch.none, !torch.none) { %0 = torch.constant.none %1 = torch.constant.none return %0, %1 : !torch.none, !torch.none } -// CHECK-LABEL: func @torch.constant.str$constantlike() -> (!torch.str, !torch.str, !torch.str) { +// CHECK-LABEL: func.func @torch.constant.str$constantlike() -> (!torch.str, !torch.str, !torch.str) { // CHECK: %[[T:.*]] = torch.constant.str "t" // CHECK: %[[S:.*]] = torch.constant.str "s" // CHECK: return %[[S]], %[[S]], %[[T]] : !torch.str, !torch.str, !torch.str -func @torch.constant.str$constantlike() -> (!torch.str, !torch.str, !torch.str) { +func.func @torch.constant.str$constantlike() -> (!torch.str, !torch.str, !torch.str) { %0 = torch.constant.str "s" %1 = torch.constant.str "s" %2 = torch.constant.str "t" return %0, %1, %2 : !torch.str, !torch.str, !torch.str } -// CHECK-LABEL: func @torch.constant.bool$constantlike() -> (!torch.bool, !torch.bool, !torch.bool) { +// CHECK-LABEL: func.func @torch.constant.bool$constantlike() -> (!torch.bool, !torch.bool, !torch.bool) { // CHECK: %[[F:.*]] = torch.constant.bool false // CHECK: %[[T:.*]] = torch.constant.bool true // CHECK: return %[[T]], %[[T]], %[[F]] : !torch.bool, !torch.bool, !torch.bool -func @torch.constant.bool$constantlike() -> (!torch.bool, !torch.bool, !torch.bool) { +func.func @torch.constant.bool$constantlike() -> (!torch.bool, !torch.bool, !torch.bool) { %0 = torch.constant.bool true %1 = torch.constant.bool true %2 = torch.constant.bool false return %0, %1, %2 : !torch.bool, !torch.bool, !torch.bool } -// CHECK-LABEL: func @torch.prim.If$erase_dead_branch( +// CHECK-LABEL: func.func @torch.prim.If$erase_dead_branch( // CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.int { // CHECK-NEXT: %[[RET:.*]] = torch.aten.add.int %[[ARG]], %[[ARG]] : !torch.int, !torch.int -> !torch.int // CHECK-NEXT: return %[[RET]] : !torch.int -func @torch.prim.If$erase_dead_branch(%arg0: !torch.int) -> !torch.int { +func.func @torch.prim.If$erase_dead_branch(%arg0: !torch.int) -> !torch.int { %true = torch.constant.bool true %0 = torch.prim.If %true -> (!torch.int) { %1 = torch.aten.add.int %arg0, %arg0 : !torch.int, !torch.int -> !torch.int @@ -710,7 +710,7 @@ func @torch.prim.If$erase_dead_branch(%arg0: !torch.int) -> !torch.int { return %0 : !torch.int } -// CHECK-LABEL: func @torch.prim.If$no_fold$side_effect( +// CHECK-LABEL: func.func @torch.prim.If$no_fold$side_effect( // CHECK-SAME: %[[ARG0:.*]]: !torch.bool) { // CHECK: %[[STR:.*]] = torch.constant.str "str" // CHECK: torch.prim.If %[[ARG0]] -> () { @@ -720,7 +720,7 @@ func @torch.prim.If$erase_dead_branch(%arg0: !torch.int) -> !torch.int { // CHECK: torch.prim.If.yield // CHECK: } // CHECK: return -func @torch.prim.If$no_fold$side_effect(%arg0: !torch.bool) { +func.func @torch.prim.If$no_fold$side_effect(%arg0: !torch.bool) { %str = torch.constant.str "str" torch.prim.If %arg0 -> () { torch.prim.RaiseException %str, %str : !torch.str, !torch.str @@ -731,11 +731,11 @@ func @torch.prim.If$no_fold$side_effect(%arg0: !torch.bool) { return } -// CHECK-LABEL: func @torch.prim.If$fold_same_result( +// CHECK-LABEL: func.func @torch.prim.If$fold_same_result( // CHECK-SAME: %[[PRED:.*]]: !torch.bool, // CHECK-SAME: %[[ARG1:.*]]: !torch.int) -> (!torch.int, !torch.int) { // CHECK-NEXT: return %[[ARG1]], %[[ARG1]] : !torch.int, !torch.int -func @torch.prim.If$fold_same_result(%arg0: !torch.bool, %arg1: !torch.int) -> (!torch.int, !torch.int) { +func.func @torch.prim.If$fold_same_result(%arg0: !torch.bool, %arg1: !torch.int) -> (!torch.int, !torch.int) { %0, %1 = torch.prim.If %arg0 -> (!torch.int, !torch.int) { torch.prim.If.yield %arg1, %arg1 : !torch.int, !torch.int } else { @@ -744,7 +744,7 @@ func @torch.prim.If$fold_same_result(%arg0: !torch.bool, %arg1: !torch.int) -> ( return %0, %1: !torch.int, !torch.int } -// CHECK-LABEL: func @torch.prim.If$fold_same_result$subset_of_results( +// CHECK-LABEL: func.func @torch.prim.If$fold_same_result$subset_of_results( // CHECK-SAME: %[[PRED:.*]]: !torch.bool, // CHECK-SAME: %[[ARG1:.*]]: !torch.int, // CHECK-SAME: %[[ARG2:.*]]: !torch.int) -> (!torch.int, !torch.int) { @@ -754,7 +754,7 @@ func @torch.prim.If$fold_same_result(%arg0: !torch.bool, %arg1: !torch.int) -> ( // CHECK: torch.prim.If.yield %[[ARG2]] : !torch.int // CHECK: } // CHECK: return %[[ARG1]], %[[IF_RESULT:.*]] : !torch.int, !torch.int -func @torch.prim.If$fold_same_result$subset_of_results(%arg0: !torch.bool, %arg1: !torch.int, %arg2: !torch.int) -> (!torch.int, !torch.int) { +func.func @torch.prim.If$fold_same_result$subset_of_results(%arg0: !torch.bool, %arg1: !torch.int, %arg2: !torch.int) -> (!torch.int, !torch.int) { %0, %1 = torch.prim.If %arg0 -> (!torch.int, !torch.int) { torch.prim.If.yield %arg1, %arg1: !torch.int, !torch.int } else { @@ -763,18 +763,18 @@ func @torch.prim.If$fold_same_result$subset_of_results(%arg0: !torch.bool, %arg1 return %0, %1: !torch.int, !torch.int } -// CHECK-LABEL: func @torch.prim.TupleUnpack( +// CHECK-LABEL: func.func @torch.prim.TupleUnpack( // CHECK-SAME: %[[ARG0:.*]]: !torch.tensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: return %[[ARG0]] : !torch.tensor -func @torch.prim.TupleUnpack(%arg0: !torch.tensor, %arg1: !torch.tensor) -> !torch.tensor{ +func.func @torch.prim.TupleUnpack(%arg0: !torch.tensor, %arg1: !torch.tensor) -> !torch.tensor{ %123 = torch.prim.TupleConstruct %arg0, %arg1: !torch.tensor, !torch.tensor -> !torch.tuple %124:2 = torch.prim.TupleUnpack %123 : !torch.tuple -> !torch.tensor, !torch.tensor return %124#0 : !torch.tensor } -// CHECK-LABEL: func @torch.aten.__contains__.str( +// CHECK-LABEL: func.func @torch.aten.__contains__.str( // CHECK-SAME: %[[K0:.*]]: !torch.str, %[[V0:.*]]: !torch.tensor, // CHECK-SAME: %[[K1:.*]]: !torch.str, // CHECK-SAME: %[[V1:.*]]: !torch.tensor) -> !torch.bool { @@ -784,13 +784,13 @@ func @torch.prim.TupleUnpack(%arg0: !torch.tensor, %arg1: !torch.tensor) -> !tor // CHECK-SAME: values(%[[V0]], %[[V1]] : !torch.tensor, !torch.tensor) // CHECK-SAME: -> !torch.dict // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.__contains__.str(%k0 : !torch.str, %v0: !torch.tensor, %k1: !torch.str, %v1: !torch.tensor) -> !torch.bool{ +func.func @torch.aten.__contains__.str(%k0 : !torch.str, %v0: !torch.tensor, %k1: !torch.str, %v1: !torch.tensor) -> !torch.bool{ %dict = torch.prim.DictConstruct keys(%k0, %k1: !torch.str, !torch.str) values(%v0, %v1: !torch.tensor, !torch.tensor) -> !torch.dict %pred = torch.aten.__contains__.str %dict, %k0 : !torch.dict, !torch.str -> !torch.bool return %pred : !torch.bool } -// CHECK-LABEL: func @torch.aten.__contains__.str$with_dict_modified( +// CHECK-LABEL: func.func @torch.aten.__contains__.str$with_dict_modified( // CHECK-SAME: %[[K0:.*]]: !torch.str, %[[V0:.*]]: !torch.tensor, // CHECK-SAME: %[[K1:.*]]: !torch.str, %[[V1:.*]]: !torch.tensor) -> !torch.bool { // CHECK: %[[DICT:.*]] = torch.prim.DictConstruct @@ -803,14 +803,14 @@ func @torch.aten.__contains__.str(%k0 : !torch.str, %v0: !torch.tensor, %k1: !to // CHECK-SAME: !torch.dict, !torch.str -> !torch.bool // CHECK: return %[[RET]] : !torch.bool -func @torch.aten.__contains__.str$with_dict_modified(%k0 : !torch.str, %v0: !torch.tensor, %k1: !torch.str, %v1: !torch.tensor) -> !torch.bool{ +func.func @torch.aten.__contains__.str$with_dict_modified(%k0 : !torch.str, %v0: !torch.tensor, %k1: !torch.str, %v1: !torch.tensor) -> !torch.bool{ %dict = torch.prim.DictConstruct keys(%k0, %k1: !torch.str, !torch.str) values(%v0, %v1: !torch.tensor, !torch.tensor) -> !torch.dict torch.aten._set_item.str %dict, %k0, %v1 : !torch.dict, !torch.str, !torch.tensor %pred = torch.aten.__contains__.str %dict, %k0 : !torch.dict, !torch.str -> !torch.bool return %pred : !torch.bool } -// CHECK-LABEL: func @torch.aten.__getitem__.Dict_str( +// CHECK-LABEL: func.func @torch.aten.__getitem__.Dict_str( // CHECK-SAME: %[[K0:.*]]: !torch.str, %[[V0:.*]]: !torch.tensor, // CHECK-SAME: %[[K1:.*]]: !torch.str, %[[V1:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[DICT:.*]] = torch.prim.DictConstruct @@ -818,149 +818,149 @@ func @torch.aten.__contains__.str$with_dict_modified(%k0 : !torch.str, %v0: !tor // CHECK-SAME: values(%[[V0]], %[[V1]] : !torch.tensor, !torch.tensor) // CHECK-SAME: -> !torch.dict // CHECK: return %[[V0]] : !torch.tensor -func @torch.aten.__getitem__.Dict_str(%k0 : !torch.str, %v0: !torch.tensor, %k1: !torch.str, %v1: !torch.tensor) -> !torch.tensor { +func.func @torch.aten.__getitem__.Dict_str(%k0 : !torch.str, %v0: !torch.tensor, %k1: !torch.str, %v1: !torch.tensor) -> !torch.tensor { %dict = torch.prim.DictConstruct keys(%k0, %k1: !torch.str, !torch.str) values(%v0, %v1: !torch.tensor, !torch.tensor) -> !torch.dict %v = torch.aten.__getitem__.Dict_str %dict, %k0 : !torch.dict, !torch.str -> !torch.tensor return %v : !torch.tensor } -// CHECK-LABEL: func @torch.aten.add.int() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.add.int() -> !torch.int { // CHECK: %[[CST9:.*]] = torch.constant.int 9 // CHECK: return %[[CST9]] : !torch.int -func @torch.aten.add.int() -> !torch.int { +func.func @torch.aten.add.int() -> !torch.int { %cst4 = torch.constant.int 4 %cst5 = torch.constant.int 5 %ret = torch.aten.add.int %cst4, %cst5: !torch.int, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.sub.int() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.sub.int() -> !torch.int { // CHECK: %[[CST1:.*]] = torch.constant.int 1 // CHECK: return %[[CST1]] : !torch.int -func @torch.aten.sub.int() -> !torch.int { +func.func @torch.aten.sub.int() -> !torch.int { %cst6 = torch.constant.int 6 %cst5 = torch.constant.int 5 %ret = torch.aten.sub.int %cst6, %cst5: !torch.int, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.mul.int() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.mul.int() -> !torch.int { // CHECK: %[[CST30:.*]] = torch.constant.int 30 // CHECK: return %[[CST30]] : !torch.int -func @torch.aten.mul.int() -> !torch.int { +func.func @torch.aten.mul.int() -> !torch.int { %cst6 = torch.constant.int 6 %cst5 = torch.constant.int 5 %ret = torch.aten.mul.int %cst6, %cst5: !torch.int, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.mul.int$with_zero() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.mul.int$with_zero() -> !torch.int { // CHECK: %[[CST0:.*]] = torch.constant.int 0 // CHECK: return %[[CST0]] : !torch.int -func @torch.aten.mul.int$with_zero() -> !torch.int { +func.func @torch.aten.mul.int$with_zero() -> !torch.int { %cst6 = torch.constant.int 6 %cst0 = torch.constant.int 0 %ret = torch.aten.mul.int %cst6, %cst0: !torch.int, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.floordiv.int() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.floordiv.int() -> !torch.int { // CHECK: %[[CST3:.*]] = torch.constant.int 3 // CHECK: return %[[CST3]] : !torch.int -func @torch.aten.floordiv.int() -> !torch.int { +func.func @torch.aten.floordiv.int() -> !torch.int { %cst18 = torch.constant.int 18 %cst5 = torch.constant.int 5 %ret = torch.aten.floordiv.int %cst18, %cst5: !torch.int, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.remainder.int() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.remainder.int() -> !torch.int { // CHECK: %[[CST3:.*]] = torch.constant.int 3 // CHECK: return %[[CST3]] : !torch.int -func @torch.aten.remainder.int() -> !torch.int { +func.func @torch.aten.remainder.int() -> !torch.int { %cst18 = torch.constant.int 18 %cst5 = torch.constant.int 5 %ret = torch.aten.remainder.int %cst18, %cst5: !torch.int, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.prim.dtype$float( +// CHECK-LABEL: func.func @torch.prim.dtype$float( // CHECK-SAME: %[[T:.*]]: !torch.tensor<*,f32>) -> !torch.int { // CHECK: %[[CST:.*]] = torch.constant.int 6 // CHECK: return %[[CST]] : !torch.int -func @torch.prim.dtype$float(%t : !torch.tensor<*,f32>) -> !torch.int { +func.func @torch.prim.dtype$float(%t : !torch.tensor<*,f32>) -> !torch.int { %ret = torch.prim.dtype %t: !torch.tensor<*,f32> -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.prim.dtype$bool( +// CHECK-LABEL: func.func @torch.prim.dtype$bool( // CHECK-SAME: %[[T:.*]]: !torch.tensor<*,i1>) -> !torch.int { // CHECK: %[[CST:.*]] = torch.constant.int 11 // CHECK: return %[[CST]] : !torch.int -func @torch.prim.dtype$bool(%t : !torch.tensor<*,i1>) -> !torch.int { +func.func @torch.prim.dtype$bool(%t : !torch.tensor<*,i1>) -> !torch.int { %ret = torch.prim.dtype %t: !torch.tensor<*,i1> -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.prim.dtype$int64( +// CHECK-LABEL: func.func @torch.prim.dtype$int64( // CHECK-SAME: %[[T:.*]]: !torch.tensor<*,si64>) -> !torch.int { // CHECK: %[[CST:.*]] = torch.constant.int 4 // CHECK: return %[[CST]] : !torch.int -func @torch.prim.dtype$int64(%t : !torch.tensor<*,si64>) -> !torch.int { +func.func @torch.prim.dtype$int64(%t : !torch.tensor<*,si64>) -> !torch.int { %ret = torch.prim.dtype %t: !torch.tensor<*,si64> -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.size.int$neg_dim( +// CHECK-LABEL: func.func @torch.aten.size.int$neg_dim( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[2,3],f32>) -> !torch.int { // CHECK: %[[RET:.*]] = torch.constant.int 2 // CHECK: return %[[RET]] : !torch.int -func @torch.aten.size.int$neg_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.int { +func.func @torch.aten.size.int$neg_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.int { %int-2 = torch.constant.int -2 %ret = torch.aten.size.int %t, %int-2 : !torch.tensor<[2,3],f32>, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.size.int$pos_dim( +// CHECK-LABEL: func.func @torch.aten.size.int$pos_dim( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[2,3],f32>) -> !torch.int { // CHECK: %[[RET:.*]] = torch.constant.int 3 // CHECK: return %[[RET]] : !torch.int -func @torch.aten.size.int$pos_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.int { +func.func @torch.aten.size.int$pos_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.int { %int1 = torch.constant.int 1 %ret = torch.aten.size.int %t, %int1 : !torch.tensor<[2,3],f32>, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.aten.size.int$invalid_dim( +// CHECK-LABEL: func.func @torch.aten.size.int$invalid_dim( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[2,3],f32>) -> !torch.int { // CHECK: %[[CST3:.*]] = torch.constant.int 3 // CHECK: %[[RET:.*]] = torch.aten.size.int %[[T]], %[[CST3]] : !torch.tensor<[2,3],f32>, !torch.int -> !torch.int // CHECK: return %[[RET]] : !torch.int -func @torch.aten.size.int$invalid_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.int { +func.func @torch.aten.size.int$invalid_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.int { %int3 = torch.constant.int 3 %ret = torch.aten.size.int %t, %int3 : !torch.tensor<[2,3],f32>, !torch.int -> !torch.int return %ret : !torch.int } -// CHECK-LABEL: func @torch.prim.unchecked_cast$derefine_identity( +// CHECK-LABEL: func.func @torch.prim.unchecked_cast$derefine_identity( // CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.int { // CHECK: return %[[ARG]] : !torch.int -func @torch.prim.unchecked_cast$derefine_identity(%arg0: !torch.int) -> !torch.int { +func.func @torch.prim.unchecked_cast$derefine_identity(%arg0: !torch.int) -> !torch.int { %0 = torch.derefine %arg0 : !torch.int to !torch.optional %1 = torch.prim.unchecked_cast %0 : !torch.optional -> !torch.int return %1 : !torch.int } -// CHECK-LABEL: func @torch.derefine$of_unchecked_cast( +// CHECK-LABEL: func.func @torch.derefine$of_unchecked_cast( // CHECK-SAME: %[[ARG:.*]]: !torch.optional) -> !torch.optional { // CHECK: return %[[ARG]] : !torch.optional -func @torch.derefine$of_unchecked_cast(%arg0: !torch.optional) -> !torch.optional { +func.func @torch.derefine$of_unchecked_cast(%arg0: !torch.optional) -> !torch.optional { %0 = torch.prim.unchecked_cast %arg0 : !torch.optional -> !torch.int %1 = torch.derefine %0 : !torch.int to !torch.optional return %1 : !torch.optional } -// CHECK-LABEL: func @torch.derefine$use_allows_type_refinement( +// CHECK-LABEL: func.func @torch.derefine$use_allows_type_refinement( // CHECK-SAME: %{{.*}}: !torch.int) -> (!torch.vtensor, !torch.optional) { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[DEREFINED:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.optional @@ -968,7 +968,7 @@ func @torch.derefine$of_unchecked_cast(%arg0: !torch.optional) -> !torch.op // CHECK: %[[ARANGE:.*]] = torch.aten.arange.start %{{.*}}, %{{.*}}, %[[NONE]], %{{.*}}, %{{.*}}, %{{.*}} : !torch.int, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor // For the use that does not allow type refinement, don't replace. // CHECK: return %[[ARANGE]], %[[DEREFINED]] : !torch.vtensor, !torch.optional -func @torch.derefine$use_allows_type_refinement(%arg0: !torch.int) -> (!torch.vtensor, !torch.optional) { +func.func @torch.derefine$use_allows_type_refinement(%arg0: !torch.int) -> (!torch.vtensor, !torch.optional) { %none = torch.constant.none %optional = torch.derefine %none : !torch.none to !torch.optional %ret = torch.aten.arange.start %arg0, %arg0, %optional, %none, %none, %none: !torch.int, !torch.int, !torch.optional, !torch.none, !torch.none, !torch.none -> !torch.vtensor @@ -976,52 +976,52 @@ func @torch.derefine$use_allows_type_refinement(%arg0: !torch.int) -> (!torch.vt } -// CHECK-LABEL: func @torch.tensor_static_info_cast$downcast_first( +// CHECK-LABEL: func.func @torch.tensor_static_info_cast$downcast_first( // CHECK-SAME: %[[T:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: return %[[T]] : !torch.tensor -func @torch.tensor_static_info_cast$downcast_first(%t: !torch.tensor) -> !torch.tensor { +func.func @torch.tensor_static_info_cast$downcast_first(%t: !torch.tensor) -> !torch.tensor { %downcast = torch.tensor_static_info_cast %t : !torch.tensor to !torch.tensor<[?,?],f64> %upcast = torch.tensor_static_info_cast %downcast : !torch.tensor<[?,?],f64> to !torch.tensor return %upcast: !torch.tensor } -// CHECK-LABEL: func @torch.tensor_static_info_cast$upcast_first( +// CHECK-LABEL: func.func @torch.tensor_static_info_cast$upcast_first( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[?,?],f64>) -> !torch.tensor<[?,?],f64> { // CHECK: return %[[T]] : !torch.tensor<[?,?],f64> -func @torch.tensor_static_info_cast$upcast_first(%t: !torch.tensor<[?,?],f64>) -> !torch.tensor<[?,?],f64> { +func.func @torch.tensor_static_info_cast$upcast_first(%t: !torch.tensor<[?,?],f64>) -> !torch.tensor<[?,?],f64> { %upcast = torch.tensor_static_info_cast %t : !torch.tensor<[?,?],f64> to !torch.tensor %downcast = torch.tensor_static_info_cast %upcast : !torch.tensor to !torch.tensor<[?,?],f64> return %downcast: !torch.tensor<[?,?],f64> } -// CHECK-LABEL: func @torch.tensor_static_info_cast$refine( +// CHECK-LABEL: func.func @torch.tensor_static_info_cast$refine( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor { // CHECK-NEXT: %[[RESULT:.*]] = torch.aten.relu %[[ARG]] : !torch.vtensor<[],f32> -> !torch.vtensor // CHECK-NEXT: return %[[RESULT]] : !torch.vtensor -func @torch.tensor_static_info_cast$refine(%arg0: !torch.vtensor<[], f32>) -> !torch.vtensor { +func.func @torch.tensor_static_info_cast$refine(%arg0: !torch.vtensor<[], f32>) -> !torch.vtensor { %0 = torch.tensor_static_info_cast %arg0 : !torch.vtensor<[],f32> to !torch.vtensor %1 = torch.aten.relu %0 : !torch.vtensor -> !torch.vtensor return %1 : !torch.vtensor } -// CHECK-LABEL: func @torch.tensor_static_info_cast$no_refine( +// CHECK-LABEL: func.func @torch.tensor_static_info_cast$no_refine( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.vtensor to !torch.vtensor<[],f32> // CHECK: %[[RESULT:.*]] = torch.aten.relu %[[CAST]] : !torch.vtensor<[],f32> -> !torch.vtensor // CHECK: return %[[RESULT]] : !torch.vtensor -func @torch.tensor_static_info_cast$no_refine(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @torch.tensor_static_info_cast$no_refine(%arg0: !torch.vtensor) -> !torch.vtensor { %0 = torch.tensor_static_info_cast %arg0 : !torch.vtensor to !torch.vtensor<[],f32> %1 = torch.aten.relu %0 : !torch.vtensor<[],f32> -> !torch.vtensor return %1 : !torch.vtensor } -// CHECK-LABEL: func @torch.tensor_static_info_cast$refine_allowed_ops( +// CHECK-LABEL: func.func @torch.tensor_static_info_cast$refine_allowed_ops( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[],f32>) -> !torch.tuple { // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.vtensor<[],f32> to !torch.vtensor // CHECK: %[[RELU:.*]] = torch.aten.relu %[[ARG]] : !torch.vtensor<[],f32> -> !torch.vtensor // CHECK: %[[RESULT:.*]] = torch.prim.TupleConstruct %[[CAST]], %[[RELU]] : !torch.vtensor, !torch.vtensor -> !torch.tuple // CHECK: return %[[RESULT]] : !torch.tuple -func @torch.tensor_static_info_cast$refine_allowed_ops(%arg0: !torch.vtensor<[], f32>) -> !torch.tuple { +func.func @torch.tensor_static_info_cast$refine_allowed_ops(%arg0: !torch.vtensor<[], f32>) -> !torch.tuple { %0 = torch.tensor_static_info_cast %arg0 : !torch.vtensor<[],f32> to !torch.vtensor %1 = torch.aten.relu %0 : !torch.vtensor -> !torch.vtensor // prim.TupleConstruct does not allow type refinements @@ -1029,17 +1029,17 @@ func @torch.tensor_static_info_cast$refine_allowed_ops(%arg0: !torch.vtensor<[], return %2 : !torch.tuple } -// CHECK-LABEL: func @torch.prim.TupleIndex( +// CHECK-LABEL: func.func @torch.prim.TupleIndex( // CHECK-SAME: %[[T0:.*]]: !torch.tensor, %[[T1:.*]]: !torch.tensor, %[[T2:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: return %[[T1]] : !torch.tensor -func @torch.prim.TupleIndex(%t0: !torch.tensor, %t1: !torch.tensor, %t2: !torch.tensor) -> !torch.tensor { +func.func @torch.prim.TupleIndex(%t0: !torch.tensor, %t1: !torch.tensor, %t2: !torch.tensor) -> !torch.tensor { %0 = torch.prim.TupleConstruct %t0, %t1, %t2 : !torch.tensor, !torch.tensor, !torch.tensor -> !torch.tuple %int1 = torch.constant.int 1 %1 = torch.prim.TupleIndex %0, %int1 : !torch.tuple, !torch.int -> !torch.tensor return %1 : !torch.tensor } -// CHECK-LABEL: func @torch.prim.TupleIndex$out_of_bound( +// CHECK-LABEL: func.func @torch.prim.TupleIndex$out_of_bound( // CHECK-SAME: %[[T0:.*]]: !torch.tensor, %[[T1:.*]]: !torch.tensor, %[[T2:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[INDEX3:.*]] = torch.constant.int 3 // CHECK: %[[TUPLE:.*]] = torch.prim.TupleConstruct %[[T0]], %[[T1]], %[[T2]] : @@ -1048,75 +1048,75 @@ func @torch.prim.TupleIndex(%t0: !torch.tensor, %t1: !torch.tensor, %t2: !torch. // CHECK: %[[RET:.*]] = torch.prim.TupleIndex %[[TUPLE]], %[[INDEX3]] : // CHECK-SAME: !torch.tuple, !torch.int -> !torch.tensor // CHECK: return %[[RET]] : !torch.tensor -func @torch.prim.TupleIndex$out_of_bound(%t0: !torch.tensor, %t1: !torch.tensor, %t2: !torch.tensor) -> !torch.tensor { +func.func @torch.prim.TupleIndex$out_of_bound(%t0: !torch.tensor, %t1: !torch.tensor, %t2: !torch.tensor) -> !torch.tensor { %0 = torch.prim.TupleConstruct %t0, %t1, %t2 : !torch.tensor, !torch.tensor, !torch.tensor -> !torch.tuple %int3 = torch.constant.int 3 %1 = torch.prim.TupleIndex %0, %int3 : !torch.tuple, !torch.int -> !torch.tensor return %1 : !torch.tensor } -// CHECK-LABEL: func @torch.prim.TupleIndex$different_types$no_change( +// CHECK-LABEL: func.func @torch.prim.TupleIndex$different_types$no_change( // CHECK-SAME: %[[ARG0:.*]]: !torch.tensor<[1,768],f32>) -> !torch.tensor { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[TUPLE:.*]] = torch.prim.TupleConstruct %[[ARG0]] : !torch.tensor<[1,768],f32> -> !torch.tuple> // CHECK: %[[ELEMENT:.*]] = torch.prim.TupleIndex %[[TUPLE]], %[[INT0]] : !torch.tuple>, !torch.int -> !torch.tensor // CHECK: return %[[ELEMENT]] : !torch.tensor -func @torch.prim.TupleIndex$different_types$no_change(%arg0: !torch.tensor<[1,768],f32>) -> !torch.tensor { +func.func @torch.prim.TupleIndex$different_types$no_change(%arg0: !torch.tensor<[1,768],f32>) -> !torch.tensor { %int0 = torch.constant.int 0 %0 = torch.prim.TupleConstruct %arg0 : !torch.tensor<[1,768],f32> -> !torch.tuple> %1 = torch.prim.TupleIndex %0, %int0 : !torch.tuple>, !torch.int -> !torch.tensor return %1 : !torch.tensor } -// CHECK-LABEL: func @torch.prim.unchecked_cast$derefine +// CHECK-LABEL: func.func @torch.prim.unchecked_cast$derefine // CHECK-next: return %arg0 : !torch.list -func @torch.prim.unchecked_cast$derefine(%arg0: !torch.list) -> !torch.list { +func.func @torch.prim.unchecked_cast$derefine(%arg0: !torch.list) -> !torch.list { %0 = torch.derefine %arg0 : !torch.list to !torch.optional> %1 = torch.prim.unchecked_cast %0 : !torch.optional> -> !torch.list return %1 : !torch.list } -// CHECK-LABEL: func @torch.aten.Int.Tensor( +// CHECK-LABEL: func.func @torch.aten.Int.Tensor( // CHECK-SAME: %[[NUM:.*]]: !torch.int) -> !torch.int { // CHECK: %[[T:.*]] = torch.prim.NumToTensor.Scalar %[[NUM]] : !torch.int -> !torch.vtensor<[],si64> // CHECK: return %[[NUM]] : !torch.int -func @torch.aten.Int.Tensor(%arg0: !torch.int) -> !torch.int { +func.func @torch.aten.Int.Tensor(%arg0: !torch.int) -> !torch.int { %tensor = torch.prim.NumToTensor.Scalar %arg0: !torch.int -> !torch.vtensor<[],si64> %scalar = torch.aten.Int.Tensor %tensor : !torch.vtensor<[],si64> -> !torch.int return %scalar : !torch.int } -// CHECK-LABEL: func @torch.aten.Float.Tensor( +// CHECK-LABEL: func.func @torch.aten.Float.Tensor( // CHECK-SAME: %[[NUM:.*]]: !torch.float) -> !torch.float { // CHECK: %[[T:.*]] = torch.prim.NumToTensor.Scalar %[[NUM]] : !torch.float -> !torch.vtensor<[],f64> // CHECK: return %[[NUM]] : !torch.float -func @torch.aten.Float.Tensor(%arg0: !torch.float) -> !torch.float { +func.func @torch.aten.Float.Tensor(%arg0: !torch.float) -> !torch.float { %tensor = torch.prim.NumToTensor.Scalar %arg0: !torch.float -> !torch.vtensor<[],f64> %scalar = torch.aten.Float.Tensor %tensor : !torch.vtensor<[],f64> -> !torch.float return %scalar : !torch.float } -// CHECK-LABEL: func @torch.aten.squeeze$zero_rank( +// CHECK-LABEL: func.func @torch.aten.squeeze$zero_rank( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { // CHECK-NEXT: return %[[ARG]] : !torch.tensor<[],f32> -func @torch.aten.squeeze$zero_rank(%arg0: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { +func.func @torch.aten.squeeze$zero_rank(%arg0: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { %0 = torch.aten.squeeze %arg0 : !torch.tensor<[],f32> -> !torch.tensor<[],f32> return %0 : !torch.tensor<[],f32> } -// CHECK-LABEL: func @torch.aten.squeeze.dim$zero_rank( +// CHECK-LABEL: func.func @torch.aten.squeeze.dim$zero_rank( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { // CHECK-NEXT: return %[[ARG]] : !torch.tensor<[],f32> -func @torch.aten.squeeze.dim$zero_rank(%arg0: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { +func.func @torch.aten.squeeze.dim$zero_rank(%arg0: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { %int0 = torch.constant.int 0 %0 = torch.aten.squeeze.dim %arg0, %int0 : !torch.tensor<[],f32>, !torch.int -> !torch.tensor<[],f32> return %0 : !torch.tensor<[],f32> } -// CHECK-LABEL: func @torch.aten.to.dtype$same_dtype( +// CHECK-LABEL: func.func @torch.aten.to.dtype$same_dtype( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor<*,f32>) -> !torch.tensor<*,f32> { // CHECK-NEXT: return %[[ARG]] : !torch.tensor<*,f32> -func @torch.aten.to.dtype$same_dtype(%arg0: !torch.tensor<*,f32>) -> !torch.tensor<*,f32> { +func.func @torch.aten.to.dtype$same_dtype(%arg0: !torch.tensor<*,f32>) -> !torch.tensor<*,f32> { %none = torch.constant.none %false = torch.constant.bool false %int6 = torch.constant.int 6 @@ -1124,11 +1124,11 @@ func @torch.aten.to.dtype$same_dtype(%arg0: !torch.tensor<*,f32>) -> !torch.tens return %0 : !torch.tensor<*,f32> } -// CHECK-LABEL: func @torch.aten.to.dtype$no_fold$unk_dtype( +// CHECK-LABEL: func.func @torch.aten.to.dtype$no_fold$unk_dtype( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[RESULT:.*]] = torch.aten.to.dtype %[[ARG]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !torch.tensor, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor // CHECK: return %[[RESULT]] : !torch.tensor -func @torch.aten.to.dtype$no_fold$unk_dtype(%arg0: !torch.tensor) -> !torch.tensor { +func.func @torch.aten.to.dtype$no_fold$unk_dtype(%arg0: !torch.tensor) -> !torch.tensor { %none = torch.constant.none %false = torch.constant.bool false %int6 = torch.constant.int 6 @@ -1136,50 +1136,50 @@ func @torch.aten.to.dtype$no_fold$unk_dtype(%arg0: !torch.tensor) -> !torch.tens return %0 : !torch.tensor } -// CHECK-LABEL: func @torch.aten.view$1D( +// CHECK-LABEL: func.func @torch.aten.view$1D( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor<[?],f32>) -> !torch.tensor<[?],f32> { // CHECK-NEXT: return %[[ARG]] : !torch.tensor<[?],f32> -func @torch.aten.view$1D(%arg0: !torch.tensor<[?],f32>) -> !torch.tensor<[?],f32> { +func.func @torch.aten.view$1D(%arg0: !torch.tensor<[?],f32>) -> !torch.tensor<[?],f32> { %int-1 = torch.constant.int -1 %0 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list %1 = torch.aten.view %arg0, %0 : !torch.tensor<[?],f32>, !torch.list -> !torch.tensor<[?],f32> return %1 : !torch.tensor<[?],f32> } -// CHECK-LABEL: func @torch.aten.div.float$fold_zero_dividend( +// CHECK-LABEL: func.func @torch.aten.div.float$fold_zero_dividend( // CHECK: %[[CST0:.*]] = torch.constant.float 0.000000e+00 // CHECK: return %[[CST0]] : !torch.float -func @torch.aten.div.float$fold_zero_dividend() -> !torch.float { +func.func @torch.aten.div.float$fold_zero_dividend() -> !torch.float { %float0 = torch.constant.float 0.0 %float5 = torch.constant.float 5.0 %0 = torch.aten.div.float %float0, %float5 : !torch.float, !torch.float -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func @torch.aten.div.float$fold_one_divisor( +// CHECK-LABEL: func.func @torch.aten.div.float$fold_one_divisor( // CHECK: %[[CST4:.*]] = torch.constant.float 4.000000e+00 // CHECK: return %[[CST4]] : !torch.float -func @torch.aten.div.float$fold_one_divisor() -> !torch.float { +func.func @torch.aten.div.float$fold_one_divisor() -> !torch.float { %float4 = torch.constant.float 4.0 %float1 = torch.constant.float 1.0 %0 = torch.aten.div.float %float4, %float1 : !torch.float, !torch.float -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func @torch.aten.div.float$fold_cst_operands( +// CHECK-LABEL: func.func @torch.aten.div.float$fold_cst_operands( // CHECK: %[[CST2:.*]] = torch.constant.float 2.000000e+00 // CHECK: return %[[CST2]] : !torch.float -func @torch.aten.div.float$fold_cst_operands() -> !torch.float { +func.func @torch.aten.div.float$fold_cst_operands() -> !torch.float { %float4 = torch.constant.float 4.0 %float2 = torch.constant.float 2.0 %0 = torch.aten.div.float %float4, %float2 : !torch.float, !torch.float -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func @torch.aten.to.dtype_layout$same_dtype( +// CHECK-LABEL: func.func @torch.aten.to.dtype_layout$same_dtype( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor<[?,?],f32>) -> !torch.tensor<[?,?],f32> { // CHECK-NEXT: return %[[ARG]] : !torch.tensor<[?,?],f32> -func @torch.aten.to.dtype_layout$same_dtype(%arg0: !torch.tensor<[?,?],f32>) -> !torch.tensor<[?,?],f32> { +func.func @torch.aten.to.dtype_layout$same_dtype(%arg0: !torch.tensor<[?,?],f32>) -> !torch.tensor<[?,?],f32> { %none = torch.constant.none %false = torch.constant.bool false %int6 = torch.constant.int 6 @@ -1187,49 +1187,49 @@ func @torch.aten.to.dtype_layout$same_dtype(%arg0: !torch.tensor<[?,?],f32>) -> return %0 : !torch.tensor<[?,?],f32> } -// CHECK-LABEL: func @torch.aten.ge.float$same_operand( +// CHECK-LABEL: func.func @torch.aten.ge.float$same_operand( // CHECK-SAME: %{{.*}}: !torch.float) -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.ge.float$same_operand(%arg0: !torch.float) -> !torch.bool { +func.func @torch.aten.ge.float$same_operand(%arg0: !torch.float) -> !torch.bool { %2 = torch.aten.ge.float %arg0, %arg0: !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ge.float$same_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.ge.float$same_value() -> !torch.bool { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: return %[[TRUE]] : !torch.bool -func @torch.aten.ge.float$same_value() -> !torch.bool { +func.func @torch.aten.ge.float$same_value() -> !torch.bool { %float4 = torch.constant.float 4.0 %float4_0 = torch.constant.float 4.0 %2 = torch.aten.ge.float %float4, %float4_0: !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ge.float$different_value() -> !torch.bool { +// CHECK-LABEL: func.func @torch.aten.ge.float$different_value() -> !torch.bool { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: return %[[FALSE]] : !torch.bool -func @torch.aten.ge.float$different_value() -> !torch.bool { +func.func @torch.aten.ge.float$different_value() -> !torch.bool { %float4 = torch.constant.float 4.0 %float4_0 = torch.constant.float 5.0 %2 = torch.aten.ge.float %float4, %float4_0: !torch.float, !torch.float -> !torch.bool return %2 : !torch.bool } -// CHECK-LABEL: func @torch.aten.ceil.float$fold_cst() -> !torch.int { +// CHECK-LABEL: func.func @torch.aten.ceil.float$fold_cst() -> !torch.int { // CHECK: %[[CST2:.*]] = torch.constant.int 2 // CHECK: return %[[CST2]] : !torch.int -func @torch.aten.ceil.float$fold_cst() -> !torch.int { +func.func @torch.aten.ceil.float$fold_cst() -> !torch.int { %float = torch.constant.float 1.5 %1 = torch.aten.ceil.float %float : !torch.float -> !torch.int return %1 : !torch.int } -// CHECK-LABEL: func @torch.aten.ceil.float$no_fold( +// CHECK-LABEL: func.func @torch.aten.ceil.float$no_fold( // CHECK-SAME: %[[ARG:.*]]: !torch.float) -> !torch.int { // CHECK: %[[RESULT:.*]] = torch.aten.ceil.float %[[ARG]] : !torch.float -> !torch.int // CHECK: return %[[RESULT]] : !torch.int -func @torch.aten.ceil.float$no_fold(%arg0 : !torch.float) -> !torch.int { +func.func @torch.aten.ceil.float$no_fold(%arg0 : !torch.float) -> !torch.int { %1 = torch.aten.ceil.float %arg0 : !torch.float -> !torch.int return %1 : !torch.int } diff --git a/test/Dialect/Torch/decompose-complex-ops.mlir b/test/Dialect/Torch/decompose-complex-ops.mlir index c4a494ec8..c2f69eeaf 100644 --- a/test/Dialect/Torch/decompose-complex-ops.mlir +++ b/test/Dialect/Torch/decompose-complex-ops.mlir @@ -1,8 +1,8 @@ // RUN: torch-mlir-opt -torch-decompose-complex-ops -split-input-file %s | FileCheck %s -// CHECK-LABEL: func @matmul_no_decompose +// CHECK-LABEL: func.func @matmul_no_decompose // CHECK: torch.aten.matmul %arg0, %arg1 : !torch.vtensor<[?,?,?,?,?],f32>, !torch.vtensor<[?,?,?],f32> -> !torch.tensor -func @matmul_no_decompose(%arg0: !torch.vtensor<[?,?,?,?,?],f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.tensor { +func.func @matmul_no_decompose(%arg0: !torch.vtensor<[?,?,?,?,?],f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.tensor { %0 = torch.aten.matmul %arg0, %arg1 : !torch.vtensor<[?,?,?,?,?],f32>, !torch.vtensor<[?,?,?],f32> -> !torch.tensor return %0 : !torch.tensor } @@ -10,23 +10,23 @@ func @matmul_no_decompose(%arg0: !torch.vtensor<[?,?,?,?,?],f32>, %arg1: !torch. // ----- -// CHECK-LABEL: func @matmul_decompose_2d +// CHECK-LABEL: func.func @matmul_decompose_2d // CHECK: torch.aten.mm %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.tensor -func @matmul_decompose_2d(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.tensor { +func.func @matmul_decompose_2d(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) -> !torch.tensor { %0 = torch.aten.matmul %arg0, %arg1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.tensor return %0 : !torch.tensor } // ----- -// CHECK-LABEL: func @matmul_decompose_3d( +// CHECK-LABEL: func.func @matmul_decompose_3d( // CHECK: torch.aten.bmm %arg0, %arg1 : !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,?],f32> -> !torch.tensor -func @matmul_decompose_3d(%arg0: !torch.vtensor<[?,?,?],f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.tensor { +func.func @matmul_decompose_3d(%arg0: !torch.vtensor<[?,?,?],f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.tensor { %0 = torch.aten.matmul %arg0, %arg1 : !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,?],f32> -> !torch.tensor return %0 : !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.softmax.int( +// CHECK-LABEL: func.func @torch.aten.softmax.int( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[2,3],f32>, // CHECK-SAME: %[[DIM:.*]]: !torch.int) -> !torch.tensor<[2,3],f32> { // CHECK: %[[DTYPE:.*]] = torch.constant.none @@ -45,7 +45,7 @@ func @matmul_decompose_3d(%arg0: !torch.vtensor<[?,?,?],f32>, %arg1: !torch.vten // CHECK: %[[SOFTMAX:.*]] = torch.aten.div.Tensor %[[EXP]], %[[SUM]] : !torch.tensor<[2,3],f32>, !torch.tensor<[?,?],f32> -> !torch.tensor<[2,3],f32> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[SOFTMAX]] : !torch.tensor<[2,3],f32> to !torch.tensor<[2,3],f32> // CHECK: return %[[RET]] : !torch.tensor<[2,3],f32> -func @torch.aten.softmax.int(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> !torch.tensor<[2,3],f32> { +func.func @torch.aten.softmax.int(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> !torch.tensor<[2,3],f32> { %dtype = torch.constant.none %ret = torch.aten.softmax.int %t, %dim, %dtype: !torch.tensor<[2,3],f32>, !torch.int, !torch.none -> !torch.tensor<[2,3],f32> return %ret : !torch.tensor<[2,3],f32> @@ -53,7 +53,7 @@ func @torch.aten.softmax.int(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> // ----- -// CHECK-LABEL: func @torch.aten.softmax.int$cst_dim( +// CHECK-LABEL: func.func @torch.aten.softmax.int$cst_dim( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[2,3],f32>) -> !torch.tensor<[2,3],f32> { // CHECK: %[[DTYPE:.*]] = torch.constant.none // CHECK: %[[DIM:.*]] = torch.constant.int 1 @@ -72,7 +72,7 @@ func @torch.aten.softmax.int(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> // CHECK: %[[SOFTMAX:.*]] = torch.aten.div.Tensor %[[EXP]], %[[SUM]] : !torch.tensor<[2,3],f32>, !torch.tensor<[2,1],f32> -> !torch.tensor<[2,3],f32> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[SOFTMAX]] : !torch.tensor<[2,3],f32> to !torch.tensor<[2,3],f32> // CHECK: return %[[RET]] : !torch.tensor<[2,3],f32> -func @torch.aten.softmax.int$cst_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.tensor<[2,3],f32> { +func.func @torch.aten.softmax.int$cst_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.tensor<[2,3],f32> { %none = torch.constant.none %dim = torch.constant.int 1 %ret = torch.aten.softmax.int %t, %dim, %none : !torch.tensor<[2,3],f32>, !torch.int, !torch.none -> !torch.tensor<[2,3],f32> @@ -80,7 +80,7 @@ func @torch.aten.softmax.int$cst_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.ten } // ----- -// CHECK-LABEL: func @torch.aten.softmax.int$dyn_shape( +// CHECK-LABEL: func.func @torch.aten.softmax.int$dyn_shape( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[?,?],f32>) -> !torch.tensor<[?,?],f32> { // CHECK: %[[DTYPE:.*]] = torch.constant.none // CHECK: %[[DIM:.*]] = torch.constant.int 1 @@ -99,7 +99,7 @@ func @torch.aten.softmax.int$cst_dim(%t: !torch.tensor<[2,3],f32>) -> !torch.ten // CHECK: %[[SOFTMAX:.*]] = torch.aten.div.Tensor %[[EXP]], %[[SUM]] : !torch.tensor<[?,?],f32>, !torch.tensor<[?,1],f32> -> !torch.tensor<[?,?],f32> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[SOFTMAX]] : !torch.tensor<[?,?],f32> to !torch.tensor<[?,?],f32> // CHECK: return %[[RET]] : !torch.tensor<[?,?],f32> -func @torch.aten.softmax.int$dyn_shape(%t: !torch.tensor<[?,?],f32>) -> !torch.tensor<[?,?],f32> { +func.func @torch.aten.softmax.int$dyn_shape(%t: !torch.tensor<[?,?],f32>) -> !torch.tensor<[?,?],f32> { %none = torch.constant.none %dim = torch.constant.int 1 %ret = torch.aten.softmax.int %t, %dim, %none : !torch.tensor<[?,?],f32>, !torch.int, !torch.none -> !torch.tensor<[?,?],f32> @@ -107,7 +107,7 @@ func @torch.aten.softmax.int$dyn_shape(%t: !torch.tensor<[?,?],f32>) -> !torch.t } // ----- -// CHECK-LABEL: func @torch.aten.softmax.int$unknown_shape( +// CHECK-LABEL: func.func @torch.aten.softmax.int$unknown_shape( // CHECK-SAME: %[[T:.*]]: !torch.tensor<*,f32>) -> !torch.tensor<*,f32> { // CHECK: %[[DTYPE:.*]] = torch.constant.none // CHECK: %[[DIM:.*]] = torch.constant.int 1 @@ -126,7 +126,7 @@ func @torch.aten.softmax.int$dyn_shape(%t: !torch.tensor<[?,?],f32>) -> !torch.t // CHECK: %[[SOFTMAX:.*]] = torch.aten.div.Tensor %[[EXP]], %[[SUM]] : !torch.tensor<*,f32>, !torch.tensor<*,f32> -> !torch.tensor<*,f32> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[SOFTMAX]] : !torch.tensor<*,f32> to !torch.tensor<*,f32> // CHECK: return %[[RET]] : !torch.tensor<*,f32> -func @torch.aten.softmax.int$unknown_shape(%t: !torch.tensor<*,f32>) -> !torch.tensor<*,f32> { +func.func @torch.aten.softmax.int$unknown_shape(%t: !torch.tensor<*,f32>) -> !torch.tensor<*,f32> { %none = torch.constant.none %dim = torch.constant.int 1 %ret = torch.aten.softmax.int %t, %dim, %none : !torch.tensor<*,f32>, !torch.int, !torch.none -> !torch.tensor<*,f32> @@ -134,7 +134,7 @@ func @torch.aten.softmax.int$unknown_shape(%t: !torch.tensor<*,f32>) -> !torch.t } // ----- -// CHECK-LABEL: func @torch.aten.size( +// CHECK-LABEL: func.func @torch.aten.size( // CHECK-SAME: %[[T:.*]]: !torch.vtensor<[?,3],f32>) -> !torch.list { // CHECK: %[[CST0:.*]] = torch.constant.int 0 // CHECK: %[[DIM0:.*]] = torch.aten.size.int %[[T]], %[[CST0]] : !torch.vtensor<[?,3],f32>, !torch.int -> !torch.int @@ -142,13 +142,13 @@ func @torch.aten.softmax.int$unknown_shape(%t: !torch.tensor<*,f32>) -> !torch.t // CHECK: %[[DIM1:.*]] = torch.aten.size.int %[[T]], %[[CST1]] : !torch.vtensor<[?,3],f32>, !torch.int -> !torch.int // CHECK: %[[SIZE:.*]] = torch.prim.ListConstruct %[[DIM0]], %[[DIM1]] : (!torch.int, !torch.int) -> !torch.list // CHECK: return %[[SIZE]] : !torch.list -func @torch.aten.size(%arg0: !torch.vtensor<[?,3],f32>) -> !torch.list { +func.func @torch.aten.size(%arg0: !torch.vtensor<[?,3],f32>) -> !torch.list { %0 = torch.aten.size %arg0 : !torch.vtensor<[?,3],f32> -> !torch.list return %0 : !torch.list } // ----- -// CHECK-LABEL: func @torch.aten.arange() -> !torch.vtensor<[?],si64> { +// CHECK-LABEL: func.func @torch.aten.arange() -> !torch.vtensor<[?],si64> { // CHECK: %[[CST5:.*]] = torch.constant.int 5 // CHECK: %[[CSTN:.*]] = torch.constant.none // CHECK: %[[CST0:.*]] = torch.constant.int 0 @@ -156,7 +156,7 @@ func @torch.aten.size(%arg0: !torch.vtensor<[?,3],f32>) -> !torch.list { // CHECK: %[[RESULT:.*]] = torch.aten.arange.start_step %[[CST0]], %[[CST5]], %[[CST1]], %[[CSTN]], %[[CSTN]], %[[CSTN]], %[[CSTN]] : // CHECK-SAME: !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[?],si64> // CHECK: return %[[RESULT]] : !torch.vtensor<[?],si64> -func @torch.aten.arange() -> !torch.vtensor<[?],si64> { +func.func @torch.aten.arange() -> !torch.vtensor<[?],si64> { %int5 = torch.constant.int 5 %none = torch.constant.none %0 = torch.aten.arange %int5, %none, %none, %none, %none : !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[?],si64> @@ -164,7 +164,7 @@ func @torch.aten.arange() -> !torch.vtensor<[?],si64> { } // ----- -// CHECK-LABEL: func @torch.aten.arange.start() -> !torch.vtensor<[?],si64> { +// CHECK-LABEL: func.func @torch.aten.arange.start() -> !torch.vtensor<[?],si64> { // CHECK: %[[CST10:.*]] = torch.constant.int 10 // CHECK: %[[CST0:.*]] = torch.constant.int 0 // CHECK: %[[CSTN:.*]] = torch.constant.none @@ -172,7 +172,7 @@ func @torch.aten.arange() -> !torch.vtensor<[?],si64> { // CHECK: %[[RESULT:.*]] = torch.aten.arange.start_step %[[CST0]], %[[CST10]], %[[CST1]], %[[CSTN]], %[[CSTN]], %[[CSTN]], %[[CSTN]] : // CHECK-SAME: !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[?],si64> // CHECK: return %[[RESULT]] : !torch.vtensor<[?],si64> -func @torch.aten.arange.start() -> !torch.vtensor<[?],si64> { +func.func @torch.aten.arange.start() -> !torch.vtensor<[?],si64> { %int10 = torch.constant.int 10 %int0 = torch.constant.int 0 %none = torch.constant.none @@ -181,14 +181,14 @@ func @torch.aten.arange.start() -> !torch.vtensor<[?],si64> { } // ----- -// CHECK-LABEL: func @torch.aten.argmax( +// CHECK-LABEL: func.func @torch.aten.argmax( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[1,?],si64> { // CHECK: %[[CST0:.*]] = torch.constant.int 0 // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: %[[VAL:.*]], %[[IND:.*]] = torch.aten.max.dim %[[INP]], %[[CST0]], %[[TRUE]] : // CHECK-SAME: !torch.vtensor<[?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,?],f32>, !torch.vtensor<[1,?],si64> // CHECK: return %[[IND]] : !torch.vtensor<[1,?],si64> -func @torch.aten.argmax(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[1,?],si64> { +func.func @torch.aten.argmax(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[1,?],si64> { %int0 = torch.constant.int 0 %true = torch.constant.bool true %0 = torch.aten.argmax %arg0, %int0, %true : !torch.vtensor<[?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,?],si64> @@ -196,7 +196,7 @@ func @torch.aten.argmax(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[1,? } // ----- -// CHECK-LABEL: func @torch.aten.argmax$reduceall( +// CHECK-LABEL: func.func @torch.aten.argmax$reduceall( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[],si64> { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[FALSE:.*]] = torch.constant.bool false @@ -207,7 +207,7 @@ func @torch.aten.argmax(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[1,? // CHECK: %[[VAL:.*]], %[[IND:.*]] = torch.aten.max.dim %[[FLATTEN]], %[[CST0]], %[[FALSE]] : // CHECK-SAME: !torch.vtensor<[?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[],f32>, !torch.vtensor<[],si64> // CHECK: return %[[IND]] : !torch.vtensor<[],si64> -func @torch.aten.argmax$reduceall(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[],si64> { +func.func @torch.aten.argmax$reduceall(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[],si64> { %none = torch.constant.none %false = torch.constant.bool false %0 = torch.aten.argmax %arg0, %none, %false : !torch.vtensor<[?,?],f32>, !torch.none, !torch.bool -> !torch.vtensor<[],si64> @@ -215,18 +215,18 @@ func @torch.aten.argmax$reduceall(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vt } // ----- -// CHECK-LABEL: func @torch.aten.square( +// CHECK-LABEL: func.func @torch.aten.square( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { // CHECK: %[[SQUARE:.*]] = torch.aten.mul.Tensor %[[INPUT]], %[[INPUT]] : // CHECK-SAME: !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,?],f32> -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[SQUARE]] : !torch.vtensor<[?,?,?],f32> -func @torch.aten.square(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { +func.func @torch.aten.square(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { %0 = torch.aten.square %arg0 : !torch.vtensor<[?,?,?],f32> -> !torch.vtensor<[?,?,?],f32> return %0 : !torch.vtensor<[?,?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.var$unbiased( +// CHECK-LABEL: func.func @torch.aten.var$unbiased( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { // CHECK: %[[UNBIASED:.*]] = torch.constant.bool true // CHECK: %[[DTYPE:.*]] = torch.constant.none @@ -242,14 +242,14 @@ func @torch.aten.square(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[? // CHECK: %[[NUM_ELEMENTS_SUB1:.*]] = torch.aten.sub.int %[[SUB_MEAN_SQUARE_NUM_ELEMENTS]], %[[CST1]] : !torch.int, !torch.int -> !torch.int // CHECK: %[[UNBIASED_VAR:.*]] = torch.aten.div.Scalar %[[SUB_MEAN_SQUARE_SUM]], %[[NUM_ELEMENTS_SUB1]] : !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[],f32> // CHECK: return %[[UNBIASED_VAR]] : !torch.vtensor<[],f32> -func @torch.aten.var$unbiased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { +func.func @torch.aten.var$unbiased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { %true = torch.constant.bool true %0 = torch.aten.var %arg0, %true: !torch.vtensor<[?,?,?],f32>, !torch.bool -> !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } // ----- -// CHECK-LABEL: func @torch.aten.var$biased( +// CHECK-LABEL: func.func @torch.aten.var$biased( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { // CHECK: %[[UNBIASED:.*]] = torch.constant.bool false // CHECK: %[[DTYPE:.*]] = torch.constant.none @@ -263,14 +263,14 @@ func @torch.aten.var$unbiased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vten // CHECK: %[[SUB_MEAN_SQUARE_NUM_ELEMENTS:.*]] = torch.aten.numel %[[SUB_MEAN_SQUARE]] : !torch.vtensor<[?,?,?],f32> -> !torch.int // CHECK: %[[BIASED_VAR:.*]] = torch.aten.div.Scalar %[[SUB_MEAN_SQUARE_SUM]], %[[SUB_MEAN_SQUARE_NUM_ELEMENTS]] : !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[],f32> // CHECK: return %[[BIASED_VAR]] : !torch.vtensor<[],f32> -func @torch.aten.var$biased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { +func.func @torch.aten.var$biased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { %false = torch.constant.bool false %0 = torch.aten.var %arg0, %false: !torch.vtensor<[?,?,?],f32>, !torch.bool -> !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } // ----- -// CHECK-LABEL: func @torch.aten.std$unbiased( +// CHECK-LABEL: func.func @torch.aten.std$unbiased( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { // CHECK: %[[UNBIASED:.*]] = torch.constant.bool true // CHECK: %[[DTYPE:.*]] = torch.constant.none @@ -287,14 +287,14 @@ func @torch.aten.var$biased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtenso // CHECK: %[[UNBIASED_VAR:.*]] = torch.aten.div.Scalar %[[SUB_MEAN_SQUARE_SUM]], %[[NUM_ELEMENTS_SUB1]] : !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[],f32> // CHECK: %[[UNBIASED_STD:.*]] = torch.aten.sqrt %[[UNBIASED_VAR]] : !torch.vtensor<[],f32> -> !torch.vtensor<[],f32> // CHECK: return %[[UNBIASED_STD]] : !torch.vtensor<[],f32> -func @torch.aten.std$unbiased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { +func.func @torch.aten.std$unbiased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { %true = torch.constant.bool true %0 = torch.aten.std %arg0, %true: !torch.vtensor<[?,?,?],f32>, !torch.bool -> !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } // ----- -// CHECK-LABEL: func @torch.aten.std$biased( +// CHECK-LABEL: func.func @torch.aten.std$biased( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { // CHECK: %[[UNBIASED:.*]] = torch.constant.bool false // CHECK: %[[DTYPE:.*]] = torch.constant.none @@ -309,20 +309,20 @@ func @torch.aten.std$unbiased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vten // CHECK: %[[BIASED_VAR:.*]] = torch.aten.div.Scalar %[[SUB_MEAN_SQUARE_SUM]], %[[SUB_MEAN_SQUARE_NUM_ELEMENTS]] : !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[],f32> // CHECK: %[[BIASED_STD:.*]] = torch.aten.sqrt %[[BIASED_VAR]] : !torch.vtensor<[],f32> -> !torch.vtensor<[],f32> // CHECK: return %[[BIASED_STD]] : !torch.vtensor<[],f32> -func @torch.aten.std$biased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { +func.func @torch.aten.std$biased(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[],f32> { %false = torch.constant.bool false %0 = torch.aten.std %arg0, %false: !torch.vtensor<[?,?,?],f32>, !torch.bool -> !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } // ----- -// CHECK-LABEL: func @torch.aten._unsafe_view$static +// CHECK-LABEL: func.func @torch.aten._unsafe_view$static // CHECK-SAME: (%[[ARG0:.*]]: !torch.vtensor<[1,512,32],f32>) // CHECK: %[[LIST:.*]] = torch.prim.ListConstruct // CHECK-NOT: torch.aten._unsafe_view // CHECK-NEXT: %[[RES:.*]] = torch.aten.view %[[ARG0]], %[[LIST]] // CHECK-NEXT: return -func @torch.aten._unsafe_view$static(%arg0: !torch.vtensor<[1,512,32],f32>) -> !torch.vtensor<[1,2,256,32],f32> { +func.func @torch.aten._unsafe_view$static(%arg0: !torch.vtensor<[1,512,32],f32>) -> !torch.vtensor<[1,2,256,32],f32> { %c1 = torch.constant.int 1 %c2 = torch.constant.int 2 %c256 = torch.constant.int 256 @@ -333,14 +333,14 @@ func @torch.aten._unsafe_view$static(%arg0: !torch.vtensor<[1,512,32],f32>) -> ! } // ----- -// CHECK-LABEL: func @torch.aten._reshape_alias$static +// CHECK-LABEL: func.func @torch.aten._reshape_alias$static // CHECK-SAME: (%[[ARG0:.*]]: !torch.vtensor<[1],f32>) // CHECK: %[[LIST1:.*]] = torch.prim.ListConstruct // CHECK: %[[LIST2:.*]] = torch.prim.ListConstruct // CHECK-NOT: torch.aten._reshape_alias // CHECK-NEXT: %[[RES:.*]] = torch.aten.view %[[ARG0]], %[[LIST1]] // CHECK-NEXT: return -func @torch.aten._reshape_alias$static(%arg0: !torch.vtensor<[1],f32>) -> !torch.vtensor<[12,32],f32> { +func.func @torch.aten._reshape_alias$static(%arg0: !torch.vtensor<[1],f32>) -> !torch.vtensor<[12,32],f32> { %int1 = torch.constant.int 1 %int32 = torch.constant.int 32 %int12 = torch.constant.int 12 @@ -351,13 +351,13 @@ func @torch.aten._reshape_alias$static(%arg0: !torch.vtensor<[1],f32>) -> !torch } // ----- -// CHECK-LABEL: func @torch.aten._unsafe_view$dynamic +// CHECK-LABEL: func.func @torch.aten._unsafe_view$dynamic // CHECK-SAME: (%[[ARG0:.*]]: !torch.vtensor<[?,?,?],f32>) // CHECK: %[[LIST:.*]] = torch.prim.ListConstruct // CHECK-NOT: torch.aten._unsafe_view // CHECK-NEXT: %[[RES:.*]] = torch.aten.view %[[ARG0]], %[[LIST]] // CHECK-NEXT: return -func @torch.aten._unsafe_view$dynamic(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[512,32],f32> { +func.func @torch.aten._unsafe_view$dynamic(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[512,32],f32> { %c256 = torch.constant.int 512 %c32 = torch.constant.int 32 %0 = torch.prim.ListConstruct %c256, %c32 : (!torch.int, !torch.int) -> !torch.list @@ -366,7 +366,7 @@ func @torch.aten._unsafe_view$dynamic(%arg0: !torch.vtensor<[?,?,?],f32>) -> !to } // ----- -// CHECK-LABEL: func @torch.aten._log_softmax( +// CHECK-LABEL: func.func @torch.aten._log_softmax( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[FALSE:.*]] = torch.constant.bool false @@ -386,7 +386,7 @@ func @torch.aten._unsafe_view$dynamic(%arg0: !torch.vtensor<[?,?,?],f32>) -> !to // CHECK: %[[SUB1:.*]] = torch.aten.sub.Tensor %[[SUB]], %[[LOG]], %[[FLOAT_1]] : !torch.vtensor<[?,?,?],f32>, // CHECK-SAME: !torch.vtensor<[1,?,?],f32>, !torch.float -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[SUB1]] : !torch.vtensor<[?,?,?],f32> -func @torch.aten._log_softmax(%arg0: !torch.vtensor<[?,?,?],f32> loc(unknown)) -> !torch.vtensor<[?,?,?],f32> { +func.func @torch.aten._log_softmax(%arg0: !torch.vtensor<[?,?,?],f32> loc(unknown)) -> !torch.vtensor<[?,?,?],f32> { %int0 = torch.constant.int 0 %false = torch.constant.bool false %0 = torch.aten._log_softmax %arg0, %int0, %false : !torch.vtensor<[?,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[?,?,?],f32> @@ -394,7 +394,7 @@ func @torch.aten._log_softmax(%arg0: !torch.vtensor<[?,?,?],f32> loc(unknown)) - } // ----- -// CHECK-LABEL: func @torch.aten.bernoulli +// CHECK-LABEL: func.func @torch.aten.bernoulli // CHECK-SAME: (%[[INP:.*]]: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[INT7:.*]] = torch.constant.int 7 @@ -424,7 +424,7 @@ func @torch.aten._log_softmax(%arg0: !torch.vtensor<[?,?,?],f32> loc(unknown)) - // CHECK-SAME: !torch.vtensor<[?,?,?],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?],f64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[TODTYPE]] : !torch.vtensor<[?,?,?],f64> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @torch.aten.bernoulli(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { +func.func @torch.aten.bernoulli(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { %none = torch.constant.none %0 = torch.aten.bernoulli %arg0, %none : !torch.vtensor<[?,?,?],f64>, !torch.none -> !torch.vtensor<[?,?,?],f64> %1 = torch.tensor_static_info_cast %0 : !torch.vtensor<[?,?,?],f64> to !torch.vtensor @@ -432,7 +432,7 @@ func @torch.aten.bernoulli(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor } // ----- -// CHECK-LABEL: func @torch.valsem.aten.bernoulli.float +// CHECK-LABEL: func.func @torch.valsem.aten.bernoulli.float // CHECK-SAME: (%[[INP:.*]]: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[PROB:.*]] = torch.constant.float 4.000000e-01 @@ -463,7 +463,7 @@ func @torch.aten.bernoulli(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor // CHECK-SAME: !torch.vtensor<[?,?,?],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?],f64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[TODTYPE]] : !torch.vtensor<[?,?,?],f64> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @torch.valsem.aten.bernoulli.float(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { +func.func @torch.valsem.aten.bernoulli.float(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { %none = torch.constant.none %prob = torch.constant.float 4.000000e-01 %0 = torch.valsem.aten.bernoulli.float %arg0, %prob, %none : !torch.vtensor<[?,?,?],f64>, !torch.float, !torch.none -> !torch.vtensor<[?,?,?],f64> @@ -472,7 +472,7 @@ func @torch.valsem.aten.bernoulli.float(%arg0: !torch.vtensor<[?,?,?],f64>) -> ! } // ----- -// CHECK-LABEL: func @torch.valsem.aten.bernoulli.Tensor( +// CHECK-LABEL: func.func @torch.valsem.aten.bernoulli.Tensor( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?,?],f64>, // CHECK-SAME: %[[PROB:.*]]: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { // CHECK: %[[NONE:.*]] = torch.constant.none @@ -502,7 +502,7 @@ func @torch.valsem.aten.bernoulli.float(%arg0: !torch.vtensor<[?,?,?],f64>) -> ! // CHECK-SAME: !torch.vtensor<[?,?,?],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?],f64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[TODTYPE]] : !torch.vtensor<[?,?,?],f64> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @torch.valsem.aten.bernoulli.Tensor(%arg0: !torch.vtensor<[?,?,?],f64>, %arg1: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { +func.func @torch.valsem.aten.bernoulli.Tensor(%arg0: !torch.vtensor<[?,?,?],f64>, %arg1: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { %none = torch.constant.none %0 = torch.valsem.aten.bernoulli.Tensor %arg0, %arg1, %none : !torch.vtensor<[?,?,?],f64>, !torch.vtensor<[?,?,?],f64>, !torch.none -> !torch.vtensor<[?,?,?],f64> %1 = torch.tensor_static_info_cast %0 : !torch.vtensor<[?,?,?],f64> to !torch.vtensor @@ -510,7 +510,7 @@ func @torch.valsem.aten.bernoulli.Tensor(%arg0: !torch.vtensor<[?,?,?],f64>, %ar } // ----- -// CHECK-LABEL: func @torch.aten.rand_like( +// CHECK-LABEL: func.func @torch.aten.rand_like( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { // CHECK: %[[INT6:.*]] = torch.constant.int 6 // CHECK: %[[NONE_0:.*]] = torch.constant.none @@ -528,7 +528,7 @@ func @torch.valsem.aten.bernoulli.Tensor(%arg0: !torch.vtensor<[?,?,?],f64>, %ar // CHECK: %[[UNIFORM:.*]] = torch.valsem.aten.uniform %[[EMPTY]], %[[FLOAT0]], %[[FLOAT1]], %[[NONE_1]] : !torch.vtensor<[?,?,?],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[?,?,?],f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[UNIFORM]] : !torch.vtensor<[?,?,?],f32> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @torch.aten.rand_like(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { +func.func @torch.aten.rand_like(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor { %int6 = torch.constant.int 6 %none = torch.constant.none %0 = torch.aten.rand_like %arg0, %int6, %none, %none, %none, %none : !torch.vtensor<[?,?,?],f64>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[?,?,?],f32> @@ -537,7 +537,7 @@ func @torch.aten.rand_like(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor } // ----- -// CHECK-LABEL: func @torch.aten.select.int( +// CHECK-LABEL: func.func @torch.aten.select.int( // CHECK-SAME: %[[T:.*]]: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[?],si64> { // CHECK: %[[CST0:.*]] = torch.constant.int 0 // CHECK: %[[CST1:.*]] = torch.constant.int 1 @@ -547,14 +547,14 @@ func @torch.aten.rand_like(%arg0: !torch.vtensor<[?,?,?],f64>) -> !torch.vtensor // CHECK: %[[SELECT:.*]] = torch.aten.squeeze.dim %[[SLICE]], %[[CST0]] : // CHECK-SAME: !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[?],si64> // CHECK: return %[[SELECT]] : !torch.vtensor<[?],si64> -func @torch.aten.select.int(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[?],si64> { +func.func @torch.aten.select.int(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[?],si64> { %int0 = torch.constant.int 0 %0 = torch.aten.select.int %arg0, %int0, %int0 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[?],si64> return %0 : !torch.vtensor<[?],si64> } // ----- -// CHECK-LABEL: func @torch.aten.hardsigmoid( +// CHECK-LABEL: func.func @torch.aten.hardsigmoid( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[CST1:.*]] = torch.constant.int 1 // CHECK: %[[CST2:.*]] = torch.constant.int 3 @@ -576,13 +576,13 @@ func @torch.aten.select.int(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.vtensor // CHECK: %[[RET:.*]] = torch.aten.maximum %[[CST0_TENSOR]], %[[MIN]] : !torch.vtensor<[],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RET]] : !torch.vtensor<[?,?],f32> // CHECK: } -func @torch.aten.hardsigmoid(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.hardsigmoid(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.hardsigmoid %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.hardswish( +// CHECK-LABEL: func.func @torch.aten.hardswish( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[INT1:.*]] = torch.constant.int 1 // CHECK: %[[INT3:.*]] = torch.constant.int 3 @@ -599,13 +599,13 @@ func @torch.aten.hardsigmoid(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor // CHECK: %[[DIV:.*]] = torch.aten.div.Scalar %[[MIN]], %[[INT6]] : !torch.vtensor<[?,?],f32>, !torch.int -> !torch.vtensor<[?,?],f32> // CHECK: %[[MUL:.*]] = torch.aten.mul.Tensor %[[DIV]], %[[INP]] : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> // CHECK: return %[[MUL]] : !torch.vtensor<[?,?],f32> -func @torch.aten.hardswish(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.hardswish(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.hardswish %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.hardtanh( +// CHECK-LABEL: func.func @torch.aten.hardtanh( // CHECK-SAME: %[[INPUT:.*]]: !torch.vtensor<[?],f32>, // CHECK-SAME: %[[MIN_VAL:.*]]: !torch.float, // CHECK-SAME: %[[MAX_VAL:.*]]: !torch.float) -> !torch.vtensor<[?],f32> { @@ -622,13 +622,13 @@ func @torch.aten.hardswish(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ // CHECK: %[[MAX_TENSOR:.*]] = torch.valsem.aten.fill.Scalar %[[VAL_10]], %[[MAX_VAL]] : !torch.vtensor<[],f32>, !torch.float -> !torch.vtensor<[],f32> // CHECK: %[[RET:.*]] = torch.aten.minimum %[[MAX_TENSOR]], %[[MIN]] : !torch.vtensor<[],f32>, !torch.vtensor<[?],f32> -> !torch.vtensor<[?],f32> // CHECK: return %[[RET]] : !torch.vtensor<[?],f32> -func @torch.aten.hardtanh(%arg0: !torch.vtensor<[?],f32>, %min: !torch.float, %max: !torch.float) -> !torch.vtensor<[?],f32> { +func.func @torch.aten.hardtanh(%arg0: !torch.vtensor<[?],f32>, %min: !torch.float, %max: !torch.float) -> !torch.vtensor<[?],f32> { %0 = torch.aten.hardtanh %arg0, %min, %max : !torch.vtensor<[?],f32>, !torch.float, !torch.float -> !torch.vtensor<[?],f32> return %0 : !torch.vtensor<[?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.new_zeros +// CHECK-LABEL: func.func @torch.aten.new_zeros // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[2,3],f32> { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[INT2:.*]] = torch.constant.int 2 @@ -638,7 +638,7 @@ func @torch.aten.hardtanh(%arg0: !torch.vtensor<[?],f32>, %min: !torch.float, %m // CHECK: %[[RES:.*]] = torch.aten.zeros %[[SIZE]], %[[INT6]], %[[NONE]], %[[NONE]], %[[NONE]] : !torch.list, !torch.int, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[2,3],f32> // CHECK: return %[[RES]] : !torch.vtensor<[2,3],f32> // CHECK: } -func @torch.aten.new_zeros(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[2,3],f32> { +func.func @torch.aten.new_zeros(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[2,3],f32> { %none = torch.constant.none %int2 = torch.constant.int 2 %int3 = torch.constant.int 3 @@ -648,7 +648,7 @@ func @torch.aten.new_zeros(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ } // ----- -// CHECK-LABEL: func @torch.aten.new_ones +// CHECK-LABEL: func.func @torch.aten.new_ones // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[3,4],si64> { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[INT3:.*]] = torch.constant.int 3 @@ -658,7 +658,7 @@ func @torch.aten.new_zeros(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ // CHECK: %[[RES:.*]] = torch.aten.ones %[[SIZE]], %[[INT4_0]], %[[NONE]], %[[NONE]], %[[NONE]] : !torch.list, !torch.int, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[3,4],si64> // CHECK: return %[[RES]] : !torch.vtensor<[3,4],si64> // CHECK: } -func @torch.aten.new_ones(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[3,4],si64> { +func.func @torch.aten.new_ones(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[3,4],si64> { %none = torch.constant.none %int3 = torch.constant.int 3 %int4 = torch.constant.int 4 @@ -668,18 +668,18 @@ func @torch.aten.new_ones(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[ } // ----- -// CHECK-LABEL: func @torch.aten.silu( +// CHECK-LABEL: func.func @torch.aten.silu( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor { // CHECK: %[[SIGMOID:.*]] = torch.aten.sigmoid %[[INP]] : !torch.vtensor<[?,?],f32> -> !torch.vtensor // CHECK: %[[MUL:.*]] = torch.aten.mul.Tensor %[[SIGMOID]], %[[INP]] : !torch.vtensor, !torch.vtensor<[?,?],f32> -> !torch.vtensor // CHECK: return %[[MUL]] : !torch.vtensor -func @torch.aten.silu(%arg0: !torch.vtensor<[?,?],f32> loc(unknown)) -> !torch.vtensor { +func.func @torch.aten.silu(%arg0: !torch.vtensor<[?,?],f32> loc(unknown)) -> !torch.vtensor { %0 = torch.aten.silu %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor return %0 : !torch.vtensor } // ----- -// CHECK-LABEL: func @torch.aten.full +// CHECK-LABEL: func.func @torch.aten.full // CHECK-SAME: () -> !torch.vtensor<[2,3],f32> { // CHECK: %[[FLOAT5:.*]] = torch.constant.float 5.000000e+00 // CHECK: %[[INT3:.*]] = torch.constant.int 3 @@ -690,7 +690,7 @@ func @torch.aten.silu(%arg0: !torch.vtensor<[?,?],f32> loc(unknown)) -> !torch.v // CHECK: %[[EMPTY:.*]] = torch.aten.empty.memory_format %[[SIZE]], %[[NONE]], %[[NONE]], %[[NONE]], %[[NONE]], %[[MEM_FORMAT]] : !torch.list, !torch.none, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[2,3],f32> // CHECK: %[[RES:.*]] = torch.valsem.aten.fill.Scalar %[[EMPTY]], %[[FLOAT5]] : !torch.vtensor<[2,3],f32>, !torch.float -> !torch.vtensor<[2,3],f32> // CHECK: return %[[RES]] : !torch.vtensor<[2,3],f32> -func @torch.aten.full() -> !torch.vtensor<[2,3],f32> { +func.func @torch.aten.full() -> !torch.vtensor<[2,3],f32> { %float5.000000e00 = torch.constant.float 5.000000e+00 %int3 = torch.constant.int 3 %int2 = torch.constant.int 2 @@ -701,7 +701,7 @@ func @torch.aten.full() -> !torch.vtensor<[2,3],f32> { } // ----- -// CHECK-LABEL: func @torch.aten.full_like( +// CHECK-LABEL: func.func @torch.aten.full_like( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[INT5:.*]] = torch.constant.int 5 // CHECK: %[[NONE:.*]] = torch.constant.none @@ -713,7 +713,7 @@ func @torch.aten.full() -> !torch.vtensor<[2,3],f32> { // CHECK: %[[EMPTY:.*]] = torch.aten.empty.memory_format %[[SIZE]], %[[NONE]], %[[NONE]], %[[NONE]], %[[NONE]], %[[NONE]] : !torch.list, !torch.none, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[?,?],f32> // CHECK: %[[RES:.*]] = torch.valsem.aten.fill.Scalar %[[EMPTY]], %[[INT5]] : !torch.vtensor<[?,?],f32>, !torch.int -> !torch.vtensor<[?,?],f32> // CHECK: return %[[RES]] : !torch.vtensor<[?,?],f32> -func @torch.aten.full_like(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.full_like(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %int5 = torch.constant.int 5 %none = torch.constant.none %0 = torch.aten.full_like %arg0, %int5, %none, %none, %none, %none, %none : !torch.vtensor<[?,?],f32>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[?,?],f32> @@ -721,7 +721,7 @@ func @torch.aten.full_like(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ } // ----- -// CHECK-LABEL: func @torch.aten.index_put( +// CHECK-LABEL: func.func @torch.aten.index_put( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?],f32>, %[[INDEX:.*]]: !torch.vtensor<[?],si64>, // CHECK-SAME: %[[VALUES:.*]]: !torch.vtensor<[?],f32>, // CHECK-SAME: %[[ACCUM:.*]]: !torch.bool) -> !torch.vtensor<[?],f32> { @@ -729,14 +729,14 @@ func @torch.aten.full_like(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: %[[RES:.*]] = torch.valsem.aten.index_put_impl %[[INP]], %[[INDICES]], %[[VALUES]], %[[ACCUM]], %[[FALSE]] : !torch.vtensor<[?],f32>, !torch.list, !torch.vtensor<[?],f32>, !torch.bool, !torch.bool -> !torch.vtensor<[?],f32> // CHECK: return %[[RES]] : !torch.vtensor<[?],f32> -func @torch.aten.index_put(%input: !torch.vtensor<[?],f32>, %index: !torch.vtensor<[?],si64>, %values: !torch.vtensor<[?],f32>, %accumulate : !torch.bool) -> !torch.vtensor<[?],f32> { +func.func @torch.aten.index_put(%input: !torch.vtensor<[?],f32>, %index: !torch.vtensor<[?],si64>, %values: !torch.vtensor<[?],f32>, %accumulate : !torch.bool) -> !torch.vtensor<[?],f32> { %indices = torch.prim.ListConstruct %index : (!torch.vtensor<[?],si64>) -> !torch.list %0 = torch.aten.index_put %input, %indices, %values, %accumulate : !torch.vtensor<[?],f32>, !torch.list, !torch.vtensor<[?],f32>, !torch.bool -> !torch.vtensor<[?],f32> return %0 : !torch.vtensor<[?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.expand_as( +// CHECK-LABEL: func.func @torch.aten.expand_as( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,1,1],f32>, %[[OTHER:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[DIM0:.*]] = torch.aten.size.int %[[OTHER]], %[[INT0]] : !torch.vtensor<[?,?,?],f32>, !torch.int -> !torch.int @@ -747,13 +747,13 @@ func @torch.aten.index_put(%input: !torch.vtensor<[?],f32>, %index: !torch.vtens // CHECK: %[[SIZE:.*]] = torch.prim.ListConstruct %[[DIM0]], %[[DIM1]], %[[DIM2]] : (!torch.int, !torch.int, !torch.int) -> !torch.list // CHECK: %[[RES:.*]] = torch.aten.broadcast_to %[[INP]], %[[SIZE]] : !torch.vtensor<[?,1,1],f32>, !torch.list -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[RES]] : !torch.vtensor<[?,?,?],f32> -func @torch.aten.expand_as(%arg0: !torch.vtensor<[?,1,1],f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { +func.func @torch.aten.expand_as(%arg0: !torch.vtensor<[?,1,1],f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { %0 = torch.aten.expand_as %arg0, %arg1 : !torch.vtensor<[?,1,1],f32>, !torch.vtensor<[?,?,?],f32> -> !torch.vtensor<[?,?,?],f32> return %0 : !torch.vtensor<[?,?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten._to_copy( +// CHECK-LABEL: func.func @torch.aten._to_copy( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: %[[NONE:.*]] = torch.constant.none @@ -767,7 +767,7 @@ func @torch.aten.expand_as(%arg0: !torch.vtensor<[?,1,1],f32>, %arg1: !torch.vte // CHECK: %[[EMPTY:.*]] = torch.aten.empty.memory_format %[[SIZE]], %[[NONE]], %[[NONE]], %[[NONE]], %[[NONE]], %[[NONE]] : !torch.list, !torch.none, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[?,?,?],f32> // CHECK: %[[RES:.*]] = torch.valsem.aten.copy %[[EMPTY]], %[[INP]], %[[FALSE]] : !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,?],f32>, !torch.bool -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[RES]] : !torch.vtensor<[?,?,?],f32> -func @torch.aten._to_copy(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { +func.func @torch.aten._to_copy(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[?,?,?],f32> { %false = torch.constant.bool false %none = torch.constant.none %0 = torch.aten._to_copy %arg0, %none, %none, %none, %none, %false, %none : !torch.vtensor<[?,?,?],f32>, !torch.none, !torch.none, !torch.none, !torch.none, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?],f32> @@ -775,12 +775,12 @@ func @torch.aten._to_copy(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor< } // ----- -// CHECK-LABEL: func @torch.aten.dropout$eval( +// CHECK-LABEL: func.func @torch.aten.dropout$eval( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[PROB:.*]] = torch.constant.float 1.000000e-01 // CHECK: %[[TRAIN:.*]] = torch.constant.bool false // CHECK: return %[[INP:.*]] : !torch.vtensor<[?,?],f32> -func @torch.aten.dropout$eval(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.dropout$eval(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %float1.000000e-01 = torch.constant.float 1.000000e-01 %false = torch.constant.bool false %0 = torch.aten.dropout %arg0, %float1.000000e-01, %false : !torch.vtensor<[?,?],f32>, !torch.float, !torch.bool -> !torch.vtensor<[?,?],f32> @@ -788,8 +788,8 @@ func @torch.aten.dropout$eval(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtenso } // ----- -// CHECK-LABEL: func @torch.aten.dropout$train( -// CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +// CHECK-LABEL: func.func @torch.aten.dropout$train( +// CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[PROB:.*]] = torch.constant.float 3.000000e-01 // CHECK: %[[TRAIN:.*]] = torch.constant.bool true // CHECK: %[[NONE:.*]] = torch.constant.none @@ -821,7 +821,7 @@ func @torch.aten.dropout$eval(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtenso // CHECK: %[[MASK_INP:.*]] = torch.aten.mul.Tensor %[[BOOL_MASK]], %[[INP]] : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> // CHECK: %[[OUT:.*]] = torch.aten.div.Scalar %[[MASK_INP]], %[[ONEMINUSP]] : !torch.vtensor<[?,?],f32>, !torch.float -> !torch.vtensor<[?,?],f32> // CHECK: return %[[OUT]] : !torch.vtensor<[?,?],f32> -func @torch.aten.dropout$train(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.aten.dropout$train(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %float3.000000e-01 = torch.constant.float 3.000000e-01 %true = torch.constant.bool true %0 = torch.aten.dropout %arg0, %float3.000000e-01, %true : !torch.vtensor<[?,?],f32>, !torch.float, !torch.bool -> !torch.vtensor<[?,?],f32> @@ -829,18 +829,18 @@ func @torch.aten.dropout$train(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtens } // ----- -// CHECK-LABEL: func @torch.valsem.aten.zero( +// CHECK-LABEL: func.func @torch.valsem.aten.zero( // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[ZERO:.*]] = torch.constant.int 0 // CHECK: %[[OUT:.*]] = torch.valsem.aten.fill.Scalar %[[INP]], %[[ZERO]] : !torch.vtensor<[?,?],f32>, !torch.int -> !torch.vtensor<[?,?],f32> // CHECK: return %[[OUT]] : !torch.vtensor<[?,?],f32> -func @torch.valsem.aten.zero(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +func.func @torch.valsem.aten.zero(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.valsem.aten.zero %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> } // ----- -// CHECK-LABEL: func @torch.aten.new_empty +// CHECK-LABEL: func.func @torch.aten.new_empty // CHECK-SAME: %[[INP:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[2,3],f32> { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[INT2:.*]] = torch.constant.int 2 @@ -850,7 +850,7 @@ func @torch.valsem.aten.zero(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor // CHECK: %[[INT6:.*]] = torch.constant.int 6 // CHECK: %[[RES:.*]] = torch.aten.empty.memory_format %[[SIZE]], %[[INT6]], %[[NONE]], %[[NONE]], %[[NONE]], %[[NONE_0]] : !torch.list, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[2,3],f32> // CHECK: return %[[RES]] : !torch.vtensor<[2,3],f32> -func @torch.aten.new_empty(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[2,3],f32> { +func.func @torch.aten.new_empty(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[2,3],f32> { %none = torch.constant.none %int2 = torch.constant.int 2 %int3 = torch.constant.int 3 @@ -860,7 +860,7 @@ func @torch.aten.new_empty(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ } // ----- -// CHECK-LABEL: func @torch.aten.where.Scalar( +// CHECK-LABEL: func.func @torch.aten.where.Scalar( // CHECK-SAME: %[[COND:.*]]: !torch.vtensor<[?,?,?],i1>) -> !torch.vtensor<[?,?,?],f32> { // CHECK: %[[CST8:.*]] = torch.constant.float 8.000000e+00 // CHECK: %[[CST4:.*]] = torch.constant.float 4.000000e+00 @@ -874,7 +874,7 @@ func @torch.aten.new_empty(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ // CHECK: %[[FILL_OTHER:.*]] = torch.valsem.aten.fill.Scalar %[[ALLOC2]], %[[CST8]] : !torch.vtensor<[],f32>, !torch.float -> !torch.vtensor<[],f32> // CHECK: %[[OUT:.*]] = torch.aten.where.self %[[COND]], %[[FILL_SELF]], %[[FILL_OTHER]] : !torch.vtensor<[?,?,?],i1>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[OUT]] : !torch.vtensor<[?,?,?],f32> -func @torch.aten.where.Scalar(%arg0: !torch.vtensor<[?,?,?],i1>) -> !torch.vtensor<[?,?,?],f32> { +func.func @torch.aten.where.Scalar(%arg0: !torch.vtensor<[?,?,?],i1>) -> !torch.vtensor<[?,?,?],f32> { %cst8 = torch.constant.float 8.000000e+00 %cst4 = torch.constant.float 4.000000e+00 %0 = torch.aten.where.Scalar %arg0, %cst4, %cst8 : !torch.vtensor<[?,?,?],i1>, !torch.float, !torch.float -> !torch.vtensor<[?,?,?],f32> @@ -882,7 +882,7 @@ func @torch.aten.where.Scalar(%arg0: !torch.vtensor<[?,?,?],i1>) -> !torch.vtens } // ----- -// CHECK-LABEL: func @torch.aten.where.ScalarSelf( +// CHECK-LABEL: func.func @torch.aten.where.ScalarSelf( // CHECK-SAME: %[[COND:.*]]: !torch.vtensor<[?,?,?],i1>, %[[OTHER:.*]]: !torch.vtensor<[?,?],f64>) -> !torch.vtensor<[?,?,?],f64> { // CHECK: %[[CST:.*]] = torch.constant.float 4.000000e+00 // CHECK: %[[LIST:.*]] = torch.prim.ListConstruct : () -> !torch.list @@ -891,14 +891,14 @@ func @torch.aten.where.Scalar(%arg0: !torch.vtensor<[?,?,?],i1>) -> !torch.vtens // CHECK: %[[FILL:.*]] = torch.valsem.aten.fill.Scalar %[[ALLOC]], %[[CST]] : !torch.vtensor<[],f64>, !torch.float -> !torch.vtensor<[],f64> // CHECK: %[[OUT:.*]] = torch.aten.where.self %[[COND]], %[[FILL]], %[[OTHER]] : !torch.vtensor<[?,?,?],i1>, !torch.vtensor<[],f64>, !torch.vtensor<[?,?],f64> -> !torch.vtensor<[?,?,?],f64> // CHECK: return %[[OUT]] : !torch.vtensor<[?,?,?],f64> -func @torch.aten.where.ScalarSelf(%arg0: !torch.vtensor<[?,?,?],i1>, %arg1: !torch.vtensor<[?,?],f64>) -> !torch.vtensor<[?,?,?],f64> { +func.func @torch.aten.where.ScalarSelf(%arg0: !torch.vtensor<[?,?,?],i1>, %arg1: !torch.vtensor<[?,?],f64>) -> !torch.vtensor<[?,?,?],f64> { %cst = torch.constant.float 4.000000e+00 %0 = torch.aten.where.ScalarSelf %arg0, %cst, %arg1 : !torch.vtensor<[?,?,?],i1>, !torch.float, !torch.vtensor<[?,?],f64> -> !torch.vtensor<[?,?,?],f64> return %0 : !torch.vtensor<[?,?,?],f64> } // ----- -// CHECK-LABEL: func @torch.aten.where.ScalarOther( +// CHECK-LABEL: func.func @torch.aten.where.ScalarOther( // CHECK-SAME: %[[COND:.*]]: !torch.vtensor<[?,?,?],i1>, %[[SELF:.*]]: !torch.vtensor<[?,?],f64>) -> !torch.vtensor<[?,?,?],f64> { // CHECK: %[[CST:.*]] = torch.constant.float 4.000000e+00 // CHECK: %[[LIST:.*]] = torch.prim.ListConstruct : () -> !torch.list @@ -907,21 +907,21 @@ func @torch.aten.where.ScalarSelf(%arg0: !torch.vtensor<[?,?,?],i1>, %arg1: !tor // CHECK: %[[FILL:.*]] = torch.valsem.aten.fill.Scalar %[[ALLOC]], %[[CST]] : !torch.vtensor<[],f64>, !torch.float -> !torch.vtensor<[],f64> // CHECK: %[[OUT:.*]] = torch.aten.where.self %[[COND]], %[[SELF]], %[[FILL]] : !torch.vtensor<[?,?,?],i1>, !torch.vtensor<[?,?],f64>, !torch.vtensor<[],f64> -> !torch.vtensor<[?,?,?],f64> // CHECK: return %[[OUT]] : !torch.vtensor<[?,?,?],f64> -func @torch.aten.where.ScalarOther(%arg0: !torch.vtensor<[?,?,?],i1>, %arg1: !torch.vtensor<[?,?],f64>) -> !torch.vtensor<[?,?,?],f64> { +func.func @torch.aten.where.ScalarOther(%arg0: !torch.vtensor<[?,?,?],i1>, %arg1: !torch.vtensor<[?,?],f64>) -> !torch.vtensor<[?,?,?],f64> { %cst = torch.constant.float 4.000000e+00 %0 = torch.aten.where.ScalarOther %arg0, %arg1, %cst : !torch.vtensor<[?,?,?],i1>, !torch.vtensor<[?,?],f64>, !torch.float -> !torch.vtensor<[?,?,?],f64> return %0 : !torch.vtensor<[?,?,?],f64> } // ----- -// CHECK-LABEL: func @torch.aten.pad +// CHECK-LABEL: func.func @torch.aten.pad // CHECK-SAME: (%[[SELF:.*]]: !torch.vtensor<[?,?,?],f64>, %[[VALUE:.*]]: !torch.float) -> !torch.vtensor<[?,?,?],f64> { // CHECK-NOT: torch.aten.pad // CHECK: %[[STRING:.*]] = torch.constant.str "constant" // CHECK-NEXT: %[[LIST:.*]] = torch.prim.ListConstruct // CHECK-NEXT: %[[PAD_ND:.*]] = torch.aten.constant_pad_nd %[[SELF]], %[[LIST]], %[[VALUE]] // CHECK-NEXT: return %[[PAD_ND]] -func @torch.aten.pad(%arg0: !torch.vtensor<[?,?,?],f64>, %arg1: !torch.float) -> !torch.vtensor<[?,?,?],f64> { +func.func @torch.aten.pad(%arg0: !torch.vtensor<[?,?,?],f64>, %arg1: !torch.float) -> !torch.vtensor<[?,?,?],f64> { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 @@ -933,7 +933,7 @@ func @torch.aten.pad(%arg0: !torch.vtensor<[?,?,?],f64>, %arg1: !torch.float) -> } // ----- -// CHECK-LABEL: func @torch.aten.to.dtype_layout( +// CHECK-LABEL: func.func @torch.aten.to.dtype_layout( // CHECK-SAME: %[[SELF:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f64> { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[FALSE:.*]] = torch.constant.bool false @@ -941,7 +941,7 @@ func @torch.aten.pad(%arg0: !torch.vtensor<[?,?,?],f64>, %arg1: !torch.float) -> // CHECK: %[[CST7:.*]] = torch.constant.int 7 // CHECK: %[[OUT:.*]] = torch.aten.to.dtype %[[SELF]], %[[CST7]], %[[FALSE]], %[[FALSE]], %[[NONE]] : !torch.vtensor<[?,?],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?],f64> // CHECK: return %[[OUT]] : !torch.vtensor<[?,?],f64> -func @torch.aten.to.dtype_layout(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f64> { +func.func @torch.aten.to.dtype_layout(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f64> { %none = torch.constant.none %false = torch.constant.bool false %int0 = torch.constant.int 0 diff --git a/test/Dialect/Torch/drop-shape-calculations.mlir b/test/Dialect/Torch/drop-shape-calculations.mlir index d7d221e65..9aae69aea 100644 --- a/test/Dialect/Torch/drop-shape-calculations.mlir +++ b/test/Dialect/Torch/drop-shape-calculations.mlir @@ -1,11 +1,11 @@ // RUN: torch-mlir-opt -torch-drop-shape-calculations -split-input-file %s | FileCheck %s -// CHECK-LABEL: func @basic( +// CHECK-LABEL: func.func @basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,?],unk>) -> !torch.vtensor { // CHECK: %[[TANH:.*]] = torch.aten.tanh %[[ARG]] : !torch.vtensor<[2,?],unk> -> !torch.vtensor<[2,?],unk> // CHECK: %[[ERASED:.*]] = torch.tensor_static_info_cast %[[TANH]] : !torch.vtensor<[2,?],unk> to !torch.vtensor // CHECK: return %[[ERASED]] : !torch.vtensor -func @basic(%arg0: !torch.vtensor<[2,?],unk>) -> !torch.vtensor { +func.func @basic(%arg0: !torch.vtensor<[2,?],unk>) -> !torch.vtensor { %int2 = torch.constant.int 2 %int1 = torch.constant.int 1 %0 = torch.shape.calculate { diff --git a/test/Dialect/Torch/inline-global-slots.mlir b/test/Dialect/Torch/inline-global-slots.mlir index a7be0d0c5..0d86a814c 100644 --- a/test/Dialect/Torch/inline-global-slots.mlir +++ b/test/Dialect/Torch/inline-global-slots.mlir @@ -16,8 +16,8 @@ torch.global_slot "private" @mutated : !torch.tensor { torch.global_slot.init %0 : !torch.tensor } -// CHECK-LABEL: func @forward() -> (!torch.tensor, !torch.tensor, !torch.tensor) { -func @forward() -> (!torch.tensor, !torch.tensor, !torch.tensor) { +// CHECK-LABEL: func.func @forward() -> (!torch.tensor, !torch.tensor, !torch.tensor) { +func.func @forward() -> (!torch.tensor, !torch.tensor, !torch.tensor) { // Inlined. // CHECK: %[[READONLY:.*]] = torch.tensor.literal(dense<0.000000e+00> : tensor<1xf32>) : !torch.tensor %0 = torch.global_slot.get @readonly : !torch.tensor diff --git a/test/Dialect/Torch/maximize-value-semantics.mlir b/test/Dialect/Torch/maximize-value-semantics.mlir index c7366e8ce..33e6a5285 100644 --- a/test/Dialect/Torch/maximize-value-semantics.mlir +++ b/test/Dialect/Torch/maximize-value-semantics.mlir @@ -1,20 +1,20 @@ // RUN: torch-mlir-opt -split-input-file -allow-unregistered-dialect %s -torch-maximize-value-semantics | FileCheck %s -// CHECK-LABEL: func @torch.copy.tensor$basic( +// CHECK-LABEL: func.func @torch.copy.tensor$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { // CHECK: return %[[ARG0]], %[[ARG0]] : !torch.vtensor, !torch.vtensor -func @torch.copy.tensor$basic(%arg0: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { +func.func @torch.copy.tensor$basic(%arg0: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { %0 = torch.copy.to_tensor %arg0 : !torch.tensor %1 = torch.copy.to_vtensor %0 : !torch.vtensor %2 = torch.copy.to_vtensor %0 : !torch.vtensor return %1, %2 : !torch.vtensor, !torch.vtensor } -// CHECK-LABEL: func @one_mutation_in_a_block( +// CHECK-LABEL: func.func @one_mutation_in_a_block( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { // CHECK: return %[[ARG0]], %[[ARG1]] : !torch.vtensor, !torch.vtensor -func @one_mutation_in_a_block(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { +func.func @one_mutation_in_a_block(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { %0 = torch.copy.to_tensor %arg0 : !torch.tensor %equal_to_arg0 = torch.copy.to_vtensor %0 : !torch.vtensor torch.overwrite.tensor.contents %arg1 overwrites %0 : !torch.vtensor, !torch.tensor @@ -22,11 +22,11 @@ func @one_mutation_in_a_block(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> ( return %equal_to_arg0, %equal_to_arg1 : !torch.vtensor, !torch.vtensor } -// CHECK-LABEL: func @multiple_mutations_in_a_block( +// CHECK-LABEL: func.func @multiple_mutations_in_a_block( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, %[[ARG1:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG2:.*]]: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor, !torch.vtensor, !torch.vtensor) { // CHECK: return %[[ARG0]], %[[ARG1]], %[[ARG1]], %[[ARG2]] : !torch.vtensor, !torch.vtensor, !torch.vtensor, !torch.vtensor -func @multiple_mutations_in_a_block(%arg0: !torch.vtensor, %arg1: !torch.vtensor, %arg2: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor, !torch.vtensor, !torch.vtensor) { +func.func @multiple_mutations_in_a_block(%arg0: !torch.vtensor, %arg1: !torch.vtensor, %arg2: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor, !torch.vtensor, !torch.vtensor) { // The mutable tensor we are overwriting. %tensor = torch.copy.to_tensor %arg0 : !torch.tensor @@ -45,12 +45,12 @@ func @multiple_mutations_in_a_block(%arg0: !torch.vtensor, %arg1: !torch.vtensor return %equal_to_arg0, %equal_to_arg1, %equal_to_arg1_again, %equal_to_arg2 : !torch.vtensor, !torch.vtensor, !torch.vtensor, !torch.vtensor } -// CHECK-LABEL: func @mutation_followed_by_view_like_ops( +// CHECK-LABEL: func.func @mutation_followed_by_view_like_ops( // CHECK-SAME: %[[VALUE_T:.*]]: !torch.vtensor, %[[OVERWRITER:.*]]: !torch.vtensor, %[[INT_LIST:.*]]: !torch.list) -> !torch.vtensor { // CHECK: %[[VIEW:.*]] = torch.aten.view %[[OVERWRITER]], %[[INT_LIST]] : !torch.vtensor, !torch.list -> !torch.vtensor // CHECK: %[[RESULT:.*]] = torch.aten.permute %[[VIEW]], %[[INT_LIST]] : !torch.vtensor, !torch.list -> !torch.vtensor // CHECK: return %[[RESULT]] : !torch.vtensor -func @mutation_followed_by_view_like_ops(%value_t: !torch.vtensor, %overwriter: !torch.vtensor, %int_list: !torch.list) -> !torch.vtensor { +func.func @mutation_followed_by_view_like_ops(%value_t: !torch.vtensor, %overwriter: !torch.vtensor, %int_list: !torch.list) -> !torch.vtensor { %t = torch.copy.to_tensor %value_t : !torch.tensor torch.overwrite.tensor.contents %overwriter overwrites %t : !torch.vtensor, !torch.tensor %view = torch.aten.view %t, %int_list : !torch.tensor, !torch.list -> !torch.tensor @@ -59,10 +59,10 @@ func @mutation_followed_by_view_like_ops(%value_t: !torch.vtensor, %overwriter: return %value_result : !torch.vtensor } -// CHECK-LABEL: func @mutation_of_view_like_op_result( +// CHECK-LABEL: func.func @mutation_of_view_like_op_result( // CHECK-SAME: %[[VALUE_T:.*]]: !torch.vtensor, %[[OVERWRITER:.*]]: !torch.vtensor, %[[INT_LIST:.*]]: !torch.list) -> !torch.vtensor { // CHECK: return %[[OVERWRITER]] : !torch.vtensor -func @mutation_of_view_like_op_result(%value_t: !torch.vtensor, %overwriter: !torch.vtensor, %int_list: !torch.list) -> !torch.vtensor { +func.func @mutation_of_view_like_op_result(%value_t: !torch.vtensor, %overwriter: !torch.vtensor, %int_list: !torch.list) -> !torch.vtensor { %t = torch.copy.to_tensor %value_t : !torch.tensor %view = torch.aten.view %t, %int_list : !torch.tensor, !torch.list -> !torch.tensor torch.overwrite.tensor.contents %overwriter overwrites %view : !torch.vtensor, !torch.tensor @@ -70,20 +70,20 @@ func @mutation_of_view_like_op_result(%value_t: !torch.vtensor, %overwriter: !to return %result : !torch.vtensor } -// CHECK-LABEL: func @value_tensor_used_after_copy_was_mutated( +// CHECK-LABEL: func.func @value_tensor_used_after_copy_was_mutated( // CHECK-SAME: %[[VALUE_T:.*]]: !torch.vtensor, // CHECK-SAME: %[[OVERWRITER:.*]]: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { // CHECK: return %[[VALUE_T]], %[[OVERWRITER]] : !torch.vtensor, !torch.vtensor -func @value_tensor_used_after_copy_was_mutated(%value_t: !torch.vtensor, %overwriter: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { +func.func @value_tensor_used_after_copy_was_mutated(%value_t: !torch.vtensor, %overwriter: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { %t = torch.copy.to_tensor %value_t : !torch.tensor torch.overwrite.tensor.contents %overwriter overwrites %t : !torch.vtensor, !torch.tensor %value_mutated_t = torch.copy.to_vtensor %t : !torch.vtensor return %value_t, %value_mutated_t : !torch.vtensor, !torch.vtensor } -// CHECK-LABEL: func @unmodeled_mutation( +// CHECK-LABEL: func.func @unmodeled_mutation( // CHECK: torch.overwrite.tensor.contents -func @unmodeled_mutation(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { +func.func @unmodeled_mutation(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { %0 = torch.copy.to_tensor %arg0 : !torch.tensor torch.overwrite.tensor.contents %arg1 overwrites %0 : !torch.vtensor, !torch.tensor "some.op"(%0) : (!torch.tensor) -> () @@ -92,9 +92,9 @@ func @unmodeled_mutation(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch } // We don't yet handle nontrivial cases involving control flow. -// CHECK-LABEL: func @unimplemented_control_flow( +// CHECK-LABEL: func.func @unimplemented_control_flow( // CHECK: torch.copy.to_vtensor -func @unimplemented_control_flow(%arg0: !torch.vtensor, %arg1: !torch.vtensor, %cond: !torch.bool) -> (!torch.vtensor, !torch.vtensor) { +func.func @unimplemented_control_flow(%arg0: !torch.vtensor, %arg1: !torch.vtensor, %cond: !torch.bool) -> (!torch.vtensor, !torch.vtensor) { %tensor = torch.copy.to_tensor %arg0 : !torch.tensor %equal_to_arg0 = torch.copy.to_vtensor %tensor : !torch.vtensor torch.prim.If %cond -> () { @@ -107,33 +107,33 @@ func @unimplemented_control_flow(%arg0: !torch.vtensor, %arg1: !torch.vtensor, % return %equal_to_arg0, %equal_to_arg1 : !torch.vtensor, !torch.vtensor } -// CHECK-LABEL: func @non_value_tensor_returned( +// CHECK-LABEL: func.func @non_value_tensor_returned( // CHECK-SAME: %[[VALUE_T:.*]]: !torch.vtensor) -> !torch.tensor { // CHECK: %[[T:.*]] = torch.copy.to_tensor %[[VALUE_T]] : !torch.tensor // CHECK: return %[[T]] : !torch.tensor -func @non_value_tensor_returned(%value_t: !torch.vtensor) -> !torch.tensor { +func.func @non_value_tensor_returned(%value_t: !torch.vtensor) -> !torch.tensor { %t = torch.copy.to_tensor %value_t : !torch.tensor return %t : !torch.tensor } -// CHECK-LABEL: func @non_value_tensor_returned$with_overwrite( +// CHECK-LABEL: func.func @non_value_tensor_returned$with_overwrite( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %{{.*}}: !torch.vtensor) -> !torch.tensor { // CHECK: %[[RESULT:.*]] = torch.copy.to_tensor %[[ARG0]] : !torch.tensor // CHECK: return %[[RESULT]] : !torch.tensor -func @non_value_tensor_returned$with_overwrite(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.tensor { +func.func @non_value_tensor_returned$with_overwrite(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.tensor { %2 = torch.copy.to_tensor %arg1 : !torch.tensor torch.overwrite.tensor.contents %arg0 overwrites %2 : !torch.vtensor, !torch.tensor return %2 : !torch.tensor } -// CHECK-LABEL: func @non_value_tensor_returned$return_from_multiple_slices( +// CHECK-LABEL: func.func @non_value_tensor_returned$return_from_multiple_slices( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor) -> (!torch.tensor, !torch.vtensor, !torch.tensor) { // CHECK: %[[NON_VALUE_TENSOR0:.*]] = torch.copy.to_tensor %[[ARG0]] : !torch.tensor // CHECK: %[[NON_VALUE_TENSOR1:.*]] = torch.copy.to_tensor %[[ARG1]] : !torch.tensor // CHECK: return %[[NON_VALUE_TENSOR0]], %[[ARG0]], %[[NON_VALUE_TENSOR1]] : !torch.tensor, !torch.vtensor, !torch.tensor -func @non_value_tensor_returned$return_from_multiple_slices(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> (!torch.tensor, !torch.vtensor, !torch.tensor) { +func.func @non_value_tensor_returned$return_from_multiple_slices(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> (!torch.tensor, !torch.vtensor, !torch.tensor) { %0 = torch.copy.to_tensor %arg0 : !torch.tensor // Make a vtensor copy and return that, just to have a load-bearing use. // This test mainly checks the rewriting of the non-value tensor returns @@ -143,12 +143,12 @@ func @non_value_tensor_returned$return_from_multiple_slices(%arg0: !torch.vtenso return %0, %1, %2 : !torch.tensor, !torch.vtensor, !torch.tensor } -// CHECK-LABEL: func @viewlike$basic_unsqueeze( +// CHECK-LABEL: func.func @viewlike$basic_unsqueeze( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[UNSQUEEZE:.*]] = torch.aten.unsqueeze %[[ARG]], %[[INT0]] : !torch.vtensor, !torch.int -> !torch.vtensor // CHECK: return %[[UNSQUEEZE]] : !torch.vtensor -func @viewlike$basic_unsqueeze(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @viewlike$basic_unsqueeze(%arg0: !torch.vtensor) -> !torch.vtensor { %int0 = torch.constant.int 0 %0 = torch.copy.to_tensor %arg0 : !torch.tensor %1 = torch.aten.unsqueeze %0, %int0 : !torch.tensor, !torch.int -> !torch.tensor @@ -156,13 +156,13 @@ func @viewlike$basic_unsqueeze(%arg0: !torch.vtensor) -> !torch.vtensor { return %2 : !torch.vtensor } -// CHECK-LABEL: func @viewlike$basic_flatten( +// CHECK-LABEL: func.func @viewlike$basic_flatten( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[INTM1:.*]] = torch.constant.int -1 // CHECK: %[[FLATTEN:.*]] = torch.aten.flatten.using_ints %[[ARG]], %[[INT0]], %[[INTM1]] : !torch.vtensor, !torch.int, !torch.int -> !torch.vtensor // CHECK: return %[[FLATTEN]] : !torch.vtensor -func @viewlike$basic_flatten(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @viewlike$basic_flatten(%arg0: !torch.vtensor) -> !torch.vtensor { %start = torch.constant.int 0 %end = torch.constant.int -1 %0 = torch.copy.to_tensor %arg0 : !torch.tensor @@ -171,13 +171,13 @@ func @viewlike$basic_flatten(%arg0: !torch.vtensor) -> !torch.vtensor { return %2 : !torch.vtensor } -// CHECK-LABEL: func @viewlike$transitive( +// CHECK-LABEL: func.func @viewlike$transitive( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[UNSQUEEZE0:.*]] = torch.aten.unsqueeze %[[ARG]], %[[INT0]] : !torch.vtensor, !torch.int -> !torch.vtensor // CHECK: %[[UNSQUEEZE1:.*]] = torch.aten.unsqueeze %[[UNSQUEEZE0]], %[[INT0]] : !torch.vtensor, !torch.int -> !torch.vtensor // CHECK: return %[[UNSQUEEZE1]] : !torch.vtensor -func @viewlike$transitive(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @viewlike$transitive(%arg0: !torch.vtensor) -> !torch.vtensor { %int0 = torch.constant.int 0 %0 = torch.copy.to_tensor %arg0 : !torch.tensor %1 = torch.aten.unsqueeze %0, %int0 : !torch.tensor, !torch.int -> !torch.tensor @@ -186,14 +186,14 @@ func @viewlike$transitive(%arg0: !torch.vtensor) -> !torch.vtensor { return %3 : !torch.vtensor } -// CHECK-LABEL: func @viewlike$transitive_tree( +// CHECK-LABEL: func.func @viewlike$transitive_tree( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[UNSQUEEZE0:.*]] = torch.aten.unsqueeze %[[ARG]], %[[INT0]] : !torch.vtensor, !torch.int -> !torch.vtensor // CHECK: %[[RET0:.*]] = torch.aten.unsqueeze %[[UNSQUEEZE0]], %[[INT0]] : !torch.vtensor, !torch.int -> !torch.vtensor // CHECK: %[[RET1:.*]] = torch.aten.unsqueeze %[[UNSQUEEZE0]], %[[INT0]] : !torch.vtensor, !torch.int -> !torch.vtensor // CHECK: return %[[RET0]], %[[RET1]] : !torch.vtensor, !torch.vtensor -func @viewlike$transitive_tree(%arg0: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { +func.func @viewlike$transitive_tree(%arg0: !torch.vtensor) -> (!torch.vtensor, !torch.vtensor) { %int0 = torch.constant.int 0 %0 = torch.copy.to_tensor %arg0 : !torch.tensor // %1 has two users. @@ -208,11 +208,11 @@ func @viewlike$transitive_tree(%arg0: !torch.vtensor) -> (!torch.vtensor, !torch return %3, %5 : !torch.vtensor, !torch.vtensor } -// CHECK-LABEL: func @viewlike$unmodeled_op( +// CHECK-LABEL: func.func @viewlike$unmodeled_op( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[UNSQUEEZE:.*]] = torch.aten.unsqueeze {{.*}} : !torch.tensor, !torch.int -> !torch.tensor // CHECK: "some.op"(%[[UNSQUEEZE]]) : (!torch.tensor) -> () -func @viewlike$unmodeled_op(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @viewlike$unmodeled_op(%arg0: !torch.vtensor) -> !torch.vtensor { %int0 = torch.constant.int 0 %0 = torch.copy.to_tensor %arg0 : !torch.tensor %1 = torch.aten.unsqueeze %0, %int0 : !torch.tensor, !torch.int -> !torch.tensor @@ -221,23 +221,23 @@ func @viewlike$unmodeled_op(%arg0: !torch.vtensor) -> !torch.vtensor { return %2 : !torch.vtensor } -// CHECK-LABEL: func @viewlike$two_inputs_one_copy( +// CHECK-LABEL: func.func @viewlike$two_inputs_one_copy( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[EXPAND_AS:.*]] = torch.aten.expand_as %[[ARG]], %[[ARG]] : !torch.vtensor, !torch.vtensor -> !torch.vtensor // CHECK: return %[[EXPAND_AS]] : !torch.vtensor -func @viewlike$two_inputs_one_copy(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @viewlike$two_inputs_one_copy(%arg0: !torch.vtensor) -> !torch.vtensor { %0 = torch.copy.to_tensor %arg0 : !torch.tensor %1 = torch.aten.expand_as %0, %0 : !torch.tensor, !torch.tensor -> !torch.tensor %2 = torch.copy.to_vtensor %1 : !torch.vtensor return %2 : !torch.vtensor } -// CHECK-LABEL: func @viewlike$two_inputs_two_copies( +// CHECK-LABEL: func.func @viewlike$two_inputs_two_copies( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[EXPAND_AS:.*]] = torch.aten.expand_as %[[ARG0]], %[[ARG1]] : !torch.vtensor, !torch.vtensor -> !torch.vtensor // CHECK: return %[[EXPAND_AS]] : !torch.vtensor -func @viewlike$two_inputs_two_copies(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { +func.func @viewlike$two_inputs_two_copies(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { %0 = torch.copy.to_tensor %arg0 : !torch.tensor %1 = torch.copy.to_tensor %arg1 : !torch.tensor %2 = torch.aten.expand_as %0, %1 : !torch.tensor, !torch.tensor -> !torch.tensor diff --git a/test/Dialect/Torch/ops.mlir b/test/Dialect/Torch/ops.mlir index 83ec03c75..bf66318ea 100644 --- a/test/Dialect/Torch/ops.mlir +++ b/test/Dialect/Torch/ops.mlir @@ -1,52 +1,52 @@ // RUN: torch-mlir-opt %s | torch-mlir-opt | FileCheck %s -// CHECK-LABEL: func @torch.operator( -func @torch.operator(%arg0: !torch.tensor, %arg1: !torch.tensor) -> !torch.tensor { +// CHECK-LABEL: func.func @torch.operator( +func.func @torch.operator(%arg0: !torch.tensor, %arg1: !torch.tensor) -> !torch.tensor { // CHECK: torch.operator "ns.unqual.overload"(%arg0, %arg1) : (!torch.tensor, !torch.tensor) -> !torch.tensor %0 = torch.operator "ns.unqual.overload"(%arg0, %arg1) : (!torch.tensor, !torch.tensor) -> !torch.tensor return %0 : !torch.tensor } -func @torch.linear_params.create(%arg0: !torch.tensor, %arg1: !torch.tensor) -> (!torch.LinearParams, !torch.LinearParams) { +func.func @torch.linear_params.create(%arg0: !torch.tensor, %arg1: !torch.tensor) -> (!torch.LinearParams, !torch.LinearParams) { %with_bias = torch.linear_params.create %arg0, %arg1 : !torch.tensor, !torch.tensor %without_bias = torch.linear_params.create %arg0 : !torch.tensor return %with_bias, %without_bias : !torch.LinearParams, !torch.LinearParams } // CHECK: @tensor.default() -> !torch.tensor -func private @tensor.default() -> !torch.tensor +func.func private @tensor.default() -> !torch.tensor // CHECK: @tensor.default_explicit() -> !torch.tensor{{$}} -func private @tensor.default_explicit() -> !torch.tensor<*,unk> +func.func private @tensor.default_explicit() -> !torch.tensor<*,unk> // CHECK: @tensor.value_semantic() -> !torch.vtensor{{$}} -func private @tensor.value_semantic() -> !torch.vtensor<*,unk> +func.func private @tensor.value_semantic() -> !torch.vtensor<*,unk> // CHECK: @tensor.dtype() -> !torch.tensor<*,si32> -func private @tensor.dtype() -> !torch.tensor<*,si32> +func.func private @tensor.dtype() -> !torch.tensor<*,si32> // CHECK: @tensor.ranked() -> !torch.tensor<[?,?,?],unk> -func private @tensor.ranked() -> !torch.tensor<[?,?,?],unk> +func.func private @tensor.ranked() -> !torch.tensor<[?,?,?],unk> // CHECK: @tensor.some_sizes_known() -> !torch.tensor<[?,2,?,4],unk> -func private @tensor.some_sizes_known() -> !torch.tensor<[?,2,?,4],unk> +func.func private @tensor.some_sizes_known() -> !torch.tensor<[?,2,?,4],unk> // CHECK: @tensor.fully_determined() -> !torch.vtensor<[1,2,3,4],f32> -func private @tensor.fully_determined() -> !torch.vtensor<[1,2,3,4],f32> +func.func private @tensor.fully_determined() -> !torch.vtensor<[1,2,3,4],f32> // CHECK: @tuple.empty() -> !torch.tuple<> -func private @tuple.empty() -> !torch.tuple<> +func.func private @tuple.empty() -> !torch.tuple<> // CHECK: @tuple.one_element() -> !torch.tuple -func private @tuple.one_element() -> !torch.tuple +func.func private @tuple.one_element() -> !torch.tuple // CHECK: @tuple.two_elements() -> !torch.tuple -func private @tuple.two_elements() -> !torch.tuple +func.func private @tuple.two_elements() -> !torch.tuple // CHECK: @union.empty() -> !torch.union<> -func private @union.empty() -> !torch.union<> +func.func private @union.empty() -> !torch.union<> // CHECK: @union.one_element() -> !torch.union -func private @union.one_element() -> !torch.union +func.func private @union.one_element() -> !torch.union // CHECK: @union.two_elements() -> !torch.union -func private @union.two_elements() -> !torch.union +func.func private @union.two_elements() -> !torch.union // CHECK: @dict() -> !torch.dict -func private @dict() -> !torch.dict +func.func private @dict() -> !torch.dict -// CHECK-LABEL: func @torch.tensor.literal() { -func @torch.tensor.literal() { +// CHECK-LABEL: func.func @torch.tensor.literal() { +func.func @torch.tensor.literal() { // CHECK: torch.tensor.literal(dense<4.200000e+01> : tensor<3x2xf32>) : !torch.tensor %0 = torch.tensor.literal(dense<42.0> : tensor<3x2xf32>) : !torch.tensor // CHECK: torch.tensor.literal(dense<4.200000e+01> : tensor<3x2xf32>) : !torch.tensor<[3,2],f32> @@ -54,19 +54,19 @@ func @torch.tensor.literal() { return } -// CHECK-LABEL: func @torch.vtensor.literal() { -func @torch.vtensor.literal() { +// CHECK-LABEL: func.func @torch.vtensor.literal() { +func.func @torch.vtensor.literal() { // CHECK: torch.vtensor.literal(dense<4.200000e+01> : tensor<3x2xf32>) : !torch.vtensor<[3,2],f32> %0 = torch.vtensor.literal(dense<42.0> : tensor<3x2xf32>) : !torch.vtensor<[3,2],f32> return } -func @derefine(%arg0: !torch.tensor) -> !torch.optional { +func.func @derefine(%arg0: !torch.tensor) -> !torch.optional { %0 = torch.derefine %arg0 : !torch.tensor to !torch.optional return %0 : !torch.optional } -func @torch.prim.If(%arg0: !torch.bool, %arg1: !torch.int) -> !torch.int { +func.func @torch.prim.If(%arg0: !torch.bool, %arg1: !torch.int) -> !torch.int { %0 = torch.prim.If %arg0 -> (!torch.int) { %1 = torch.aten.add.int %arg1, %arg1 : !torch.int, !torch.int -> !torch.int torch.prim.If.yield %1 : !torch.int @@ -103,7 +103,7 @@ func @torch.prim.If(%arg0: !torch.bool, %arg1: !torch.int) -> !torch.int { %none = torch.constant.none // CHECK: %str = torch.constant.str "some str" %str = torch.constant.str "some str" -func private @f(%arg0: !torch.nn.Module<"test">) { +func.func private @f(%arg0: !torch.nn.Module<"test">) { return } @@ -131,7 +131,7 @@ torch.nn_module { } : !torch.nn.Module<"test"> -func @shape_calculations(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @shape_calculations(%arg0: !torch.vtensor) -> !torch.vtensor { %0 = torch.shape.calculate { %0 = torch.aten.tanh %arg0 : !torch.vtensor -> !torch.vtensor torch.shape.calculate.yield %0 : !torch.vtensor @@ -142,7 +142,7 @@ func @shape_calculations(%arg0: !torch.vtensor) -> !torch.vtensor { return %0 : !torch.vtensor } -func @number_type_subtypes(%arg0: !torch.tensor, %arg1: !torch.list, %arg2: !torch.union) { +func.func @number_type_subtypes(%arg0: !torch.tensor, %arg1: !torch.list, %arg2: !torch.union) { %0 = torch.aten.constant_pad_nd %arg0, %arg1, %arg2 : !torch.tensor, !torch.list, !torch.union -> !torch.tensor return } diff --git a/test/Dialect/Torch/prepare-for-globalize-object-graph.mlir b/test/Dialect/Torch/prepare-for-globalize-object-graph.mlir index 3e69c25ac..bbc8e9889 100644 --- a/test/Dialect/Torch/prepare-for-globalize-object-graph.mlir +++ b/test/Dialect/Torch/prepare-for-globalize-object-graph.mlir @@ -6,23 +6,23 @@ torch.class_type @c { } -// CHECK-LABEL: func private @test_call_method( +// CHECK-LABEL: func.func private @test_call_method( // CHECK-SAME: %[[RECEIVER:.*]]: !torch.nn.Module<"c">, // CHECK-SAME: %[[F:.*]]: !torch.float) -> !torch.float { // CHECK: %[[RET:.*]] = call @test_call_method(%[[RECEIVER]], %[[F]]) : (!torch.nn.Module<"c">, !torch.float) -> !torch.float // CHECK: return %[[RET]] : !torch.float -func private @test_call_method(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) -> !torch.float { +func.func private @test_call_method(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) -> !torch.float { %0 = torch.prim.CallMethod %arg0["test_call_method"] (%arg1) : !torch.nn.Module<"c">, (!torch.float) -> !torch.float return %0 : !torch.float } -// CHECK-LABEL: func private @test_call_indirect( +// CHECK-LABEL: func.func private @test_call_indirect( // CHECK-SAME: %[[RECEIVER:.*]]: !torch.nn.Module<"c">, // CHECK-SAME: %[[F:.*]]: !torch.float) -> !torch.float { // Ensure no func.constant. // CHECK-NEXT: %[[VAL_2:.*]] = call @test_call_method(%[[RECEIVER]], %[[F]]) : (!torch.nn.Module<"c">, !torch.float) -> !torch.float // CHECK-NEXT: return %[[VAL_2]] : !torch.float -func private @test_call_indirect(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) -> !torch.float { +func.func private @test_call_indirect(%arg0: !torch.nn.Module<"c">, %arg1: !torch.float) -> !torch.float { %0 = constant @test_call_method : (!torch.nn.Module<"c">, !torch.float) -> !torch.float %1 = call_indirect %0(%arg0, %arg1) : (!torch.nn.Module<"c">, !torch.float) -> !torch.float return %1 : !torch.float diff --git a/test/Dialect/Torch/reduce-op-variants-error.mlir b/test/Dialect/Torch/reduce-op-variants-error.mlir index f08c1c40c..98c492f5b 100644 --- a/test/Dialect/Torch/reduce-op-variants-error.mlir +++ b/test/Dialect/Torch/reduce-op-variants-error.mlir @@ -2,7 +2,7 @@ // ----- -func @convert_to_value_semantic_tensors_list( %list: !torch.list) -> !torch.tensor { +func.func @convert_to_value_semantic_tensors_list( %list: !torch.list) -> !torch.tensor { %int1 = torch.constant.int 1 // expected-error@+1 {{failed to legalize operation 'torch.aten.cat' that was explicitly marked illegal}} %ret = torch.aten.cat %list, %int1 : !torch.list, !torch.int -> !torch.tensor @@ -11,7 +11,7 @@ func @convert_to_value_semantic_tensors_list( %list: !torch.list) -> !to // ----- -func @convert_to_value_semantic_tensors_optional(%tensor_optional: !torch.optional, +func.func @convert_to_value_semantic_tensors_optional(%tensor_optional: !torch.optional, %t: !torch.tensor, %training: !torch.bool, %cudnn_enable: !torch.bool, diff --git a/test/Dialect/Torch/reduce-op-variants.mlir b/test/Dialect/Torch/reduce-op-variants.mlir index a6d5e582b..192c7ed13 100644 --- a/test/Dialect/Torch/reduce-op-variants.mlir +++ b/test/Dialect/Torch/reduce-op-variants.mlir @@ -1,17 +1,17 @@ // RUN: torch-mlir-opt -torch-reduce-op-variants %s | FileCheck %s -// CHECK-LABEL: func @convert_to_value_semantic_tensors( +// CHECK-LABEL: func.func @convert_to_value_semantic_tensors( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { // CHECK: %[[OPERAND_TENSOR:.*]] = torch.copy.to_vtensor %[[ARG]] : !torch.vtensor<[],f32> // CHECK: %[[RESULT_TENSOR:.*]] = torch.aten.tanh %[[OPERAND_TENSOR]] : !torch.vtensor<[],f32> -> !torch.vtensor<[],f32> // CHECK: %[[RET:.*]] = torch.copy.to_tensor %[[RESULT_TENSOR]] : !torch.tensor<[],f32> // CHECK: return %[[RET]] : !torch.tensor<[],f32> -func @convert_to_value_semantic_tensors(%arg0: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { +func.func @convert_to_value_semantic_tensors(%arg0: !torch.tensor<[],f32>) -> !torch.tensor<[],f32> { %0 = torch.aten.tanh %arg0 : !torch.tensor<[],f32> -> !torch.tensor<[],f32> return %0 : !torch.tensor<[],f32> } -// CHECK-LABEL: func @convert_to_value_semantic_tensors_list( +// CHECK-LABEL: func.func @convert_to_value_semantic_tensors_list( // CHECK-SAME: %[[VT0:.*]]: !torch.vtensor, %[[VT1:.*]]: !torch.vtensor, // CHECK-SAME: %[[VT2:.*]]: !torch.vtensor) -> !torch.tensor { // CHECK: %[[T0:.*]] = torch.copy.to_tensor %[[VT0]] : !torch.tensor @@ -30,7 +30,7 @@ func @convert_to_value_semantic_tensors(%arg0: !torch.tensor<[],f32>) -> !torch. // CHECK-SAME: !torch.list, !torch.int -> !torch.vtensor // CHECK: %[[RET:.*]] = torch.copy.to_tensor %[[VRET]] : !torch.tensor // CHECK: return %[[RET]] : !torch.tensor -func @convert_to_value_semantic_tensors_list(%vt0: !torch.vtensor, %vt1: !torch.vtensor, %vt2: !torch.vtensor) -> !torch.tensor { +func.func @convert_to_value_semantic_tensors_list(%vt0: !torch.vtensor, %vt1: !torch.vtensor, %vt2: !torch.vtensor) -> !torch.tensor { %t0 = torch.copy.to_tensor %vt0 : !torch.tensor %t1 = torch.copy.to_tensor %vt1 : !torch.tensor %t2 = torch.copy.to_tensor %vt2 : !torch.tensor @@ -40,7 +40,7 @@ func @convert_to_value_semantic_tensors_list(%vt0: !torch.vtensor, %vt1: !torch. return %ret : !torch.tensor } -// CHECK-LABEL: func @convert_to_value_semantic_tensors_optional( +// CHECK-LABEL: func.func @convert_to_value_semantic_tensors_optional( // CHECK-SAME: %[[INPUT:.*]]: !torch.tensor, %[[FLOAT_TENSOR:.*]]: !torch.tensor<[4],f32>, // CHECK-SAME: %[[TRAINING:.*]]: !torch.bool, %[[CUDNN_ENABLE:.*]]: !torch.bool, // CHECK-SAME: %[[FLOAT:.*]]: !torch.float) -> !torch.tensor { @@ -67,7 +67,7 @@ func @convert_to_value_semantic_tensors_list(%vt0: !torch.vtensor, %vt1: !torch. // CHECK: %[[RET:.*]] = torch.copy.to_tensor %[[VRET]] : !torch.tensor // CHECK: return %[[RET]] : !torch.tensor // CHECK: } -func @convert_to_value_semantic_tensors_optional(%t: !torch.tensor, +func.func @convert_to_value_semantic_tensors_optional(%t: !torch.tensor, %ft: !torch.tensor<[4],f32>, %training: !torch.bool, %cudnn_enable: !torch.bool, @@ -83,7 +83,7 @@ func @convert_to_value_semantic_tensors_optional(%t: !torch.tensor, return %ret: !torch.tensor } -// CHECK-LABEL: func @reduce_trailing_underscore_inplace_variant( +// CHECK-LABEL: func.func @reduce_trailing_underscore_inplace_variant( // CHECK-SAME: %[[ARG0:.*]]: !torch.tensor<[2,2],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.tensor<[2,2],f32>) -> (!torch.tensor<[2,2],f32>, !torch.tensor<[2,2],f32>) { // CHECK: %[[C1:.*]] = torch.constant.int 1 @@ -97,23 +97,23 @@ func @convert_to_value_semantic_tensors_optional(%t: !torch.tensor, // CHECK: %[[TENSOR_AGAIN:.*]] = torch.copy.to_vtensor %[[ARRAY_RESULT]] : !torch.vtensor<[2,2],f32> // CHECK: torch.overwrite.tensor.contents %[[TENSOR_AGAIN]] overwrites %[[ARG0]] : !torch.vtensor<[2,2],f32>, !torch.tensor<[2,2],f32> // CHECK: return %[[ARG0]], %[[ARG0]] : !torch.tensor<[2,2],f32>, !torch.tensor<[2,2],f32> -func @reduce_trailing_underscore_inplace_variant(%arg0: !torch.tensor<[2,2],f32>, %arg1: !torch.tensor<[2,2],f32>) -> (!torch.tensor<[2,2],f32>, !torch.tensor<[2,2],f32>) { +func.func @reduce_trailing_underscore_inplace_variant(%arg0: !torch.tensor<[2,2],f32>, %arg1: !torch.tensor<[2,2],f32>) -> (!torch.tensor<[2,2],f32>, !torch.tensor<[2,2],f32>) { %c1 = torch.constant.int 1 %0 = torch.aten.add_.Tensor %arg0, %arg1, %c1 : !torch.tensor<[2,2],f32>, !torch.tensor<[2,2],f32>, !torch.int -> !torch.tensor<[2,2],f32> return %0, %arg0 : !torch.tensor<[2,2],f32>, !torch.tensor<[2,2],f32> } -// CHECK-LABEL: func @torch.tensor.literal() -> !torch.tensor { +// CHECK-LABEL: func.func @torch.tensor.literal() -> !torch.tensor { // CHECK: %[[VTENSOR:.*]] = torch.vtensor.literal(dense<0.000000e+00> : tensor<7xf32>) : !torch.vtensor<[7],f32> // CHECK: %[[SIZES_ERASED:.*]] = torch.tensor_static_info_cast %[[VTENSOR]] : !torch.vtensor<[7],f32> to !torch.vtensor // CHECK: %[[TENSOR:.*]] = torch.copy.to_tensor %[[SIZES_ERASED]] : !torch.tensor // CHECK: return %[[TENSOR]] : !torch.tensor -func @torch.tensor.literal() -> !torch.tensor { +func.func @torch.tensor.literal() -> !torch.tensor { %0 = torch.tensor.literal(dense<0.0> : tensor<7xf32>) : !torch.tensor return %0 : !torch.tensor } -// CHECK-LABEL: func @convert_to_value_semantic_tensors_optional_list( +// CHECK-LABEL: func.func @convert_to_value_semantic_tensors_optional_list( // CHECK-SAME: %[[SELF:.*]]: !torch.tensor<[5],f32>, // CHECK-SAME: %[[INDICES:.*]]: !torch.tensor<[2,3],si64>) -> !torch.tensor { // CHECK: %[[INDICES_OPTIONAL_LIST:.*]] = torch.prim.ListConstruct %[[INDICES]] : @@ -124,13 +124,13 @@ func @torch.tensor.literal() -> !torch.tensor { // CHECK: %[[VRET:.*]] = torch.aten.index.Tensor %[[SELF_VTENSOR]], %[[INDICES_LIST]] : !torch.vtensor<[5],f32>, !torch.list> -> !torch.vtensor // CHECK: %[[RET:.*]] = torch.copy.to_tensor %[[VRET]] : !torch.tensor // CHECK: return %[[RET]] : !torch.tensor -func @convert_to_value_semantic_tensors_optional_list(%self: !torch.tensor<[5],f32>, %indices: !torch.tensor<[2,3],si64>) -> !torch.tensor { +func.func @convert_to_value_semantic_tensors_optional_list(%self: !torch.tensor<[5],f32>, %indices: !torch.tensor<[2,3],si64>) -> !torch.tensor { %tensor_optional_list = torch.prim.ListConstruct %indices : (!torch.tensor<[2,3],si64>) -> !torch.list>> %ret = torch.aten.index.Tensor %self, %tensor_optional_list : !torch.tensor<[5],f32>, !torch.list>> -> !torch.tensor return %ret : !torch.tensor } -// CHECK-LABEL: func @torch.aten.uniform_( +// CHECK-LABEL: func.func @torch.aten.uniform_( // CHECK-SAME: %[[T:.*]]: !torch.tensor, %[[MIN:.*]]: !torch.float, %[[MAX:.*]]: !torch.float, // CHECK-SAME: %[[GENERATOR:.*]]: !torch.none) -> !torch.tensor { // CHECK: %[[T_VTENSOR:.*]] = torch.copy.to_vtensor %[[T]] : !torch.vtensor @@ -140,12 +140,12 @@ func @convert_to_value_semantic_tensors_optional_list(%self: !torch.tensor<[5],f // CHECK: %[[COPY_VTENSOR:.*]] = torch.copy.to_vtensor %[[RET]] : !torch.vtensor // CHECK: torch.overwrite.tensor.contents %[[COPY_VTENSOR]] overwrites %[[T]] : !torch.vtensor, !torch.tensor // CHECK: return %[[T]] : !torch.tensor -func @torch.aten.uniform_(%t: !torch.tensor, %min: !torch.float, %max: !torch.float, %generator: !torch.none) -> !torch.tensor { +func.func @torch.aten.uniform_(%t: !torch.tensor, %min: !torch.float, %max: !torch.float, %generator: !torch.none) -> !torch.tensor { %ret = torch.aten.uniform_ %t, %min, %max, %generator: !torch.tensor, !torch.float, !torch.float, !torch.none -> !torch.tensor return %ret : !torch.tensor } -// CHECK-LABEL: func @torch.aten.bernoulli_.float( +// CHECK-LABEL: func.func @torch.aten.bernoulli_.float( // CHECK-SAME: %[[T:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[GENERATOR:.*]] = torch.constant.none // CHECK: %[[P:.*]] = torch.constant.float 5.000000e-01 @@ -155,14 +155,14 @@ func @torch.aten.uniform_(%t: !torch.tensor, %min: !torch.float, %max: !torch.fl // CHECK: %[[COPY_VTENSOR:.*]] = torch.copy.to_vtensor %[[RET]] : !torch.vtensor // CHECK: torch.overwrite.tensor.contents %[[COPY_VTENSOR]] overwrites %[[T]] : !torch.vtensor, !torch.tensor // CHECK: return %[[T]] : !torch.tensor -func @torch.aten.bernoulli_.float(%t: !torch.tensor) -> !torch.tensor { +func.func @torch.aten.bernoulli_.float(%t: !torch.tensor) -> !torch.tensor { %generator = torch.constant.none %p = torch.constant.float 5.000000e-01 %ret = torch.aten.bernoulli_.float %t, %p, %generator : !torch.tensor, !torch.float, !torch.none -> !torch.tensor return %ret : !torch.tensor } -// CHECK-LABEL: func @torch.aten.fill_.Scalar( +// CHECK-LABEL: func.func @torch.aten.fill_.Scalar( // CHECK-SAME: %[[T:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[VALUE:.*]] = torch.constant.int 1 // CHECK: %[[T_VTENSOR:.*]] = torch.copy.to_vtensor %[[T]] : !torch.vtensor @@ -171,13 +171,13 @@ func @torch.aten.bernoulli_.float(%t: !torch.tensor) -> !torch.tensor { // CHECK: %[[COPY_VTENSOR:.*]] = torch.copy.to_vtensor %[[RET]] : !torch.vtensor // CHECK: torch.overwrite.tensor.contents %[[COPY_VTENSOR]] overwrites %[[T]] : !torch.vtensor, !torch.tensor // CHECK: return %[[T]] : !torch.tensor -func @torch.aten.fill_.Scalar(%t: !torch.tensor) -> !torch.tensor { +func.func @torch.aten.fill_.Scalar(%t: !torch.tensor) -> !torch.tensor { %value = torch.constant.int 1 %ret = torch.aten.fill_.Scalar %t, %value : !torch.tensor, !torch.int -> !torch.tensor return %ret : !torch.tensor } -// CHECK-LABEL: func @torch.aten._index_put_impl_( +// CHECK-LABEL: func.func @torch.aten._index_put_impl_( // CHECK-SAME: %[[SELF:.*]]: !torch.tensor, %[[INDEX:.*]]: !torch.tensor, %[[VALUES:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: %[[FALSE:.*]] = torch.constant.bool false @@ -191,7 +191,7 @@ func @torch.aten.fill_.Scalar(%t: !torch.tensor) -> !torch.tensor { // CHECK: %[[COPY_VTENSOR:.*]] = torch.copy.to_vtensor %[[RET]] : !torch.vtensor // CHECK: torch.overwrite.tensor.contents %[[COPY_VTENSOR]] overwrites %[[SELF]] : !torch.vtensor, !torch.tensor // CHECK: return %[[SELF:.*]] : !torch.tensor -func @torch.aten._index_put_impl_(%self: !torch.tensor, %index: !torch.tensor, %values: !torch.tensor) -> !torch.tensor { +func.func @torch.aten._index_put_impl_(%self: !torch.tensor, %index: !torch.tensor, %values: !torch.tensor) -> !torch.tensor { %true = torch.constant.bool true %false = torch.constant.bool false %indicesList = torch.prim.ListConstruct %index : (!torch.tensor) -> !torch.list> @@ -199,7 +199,7 @@ func @torch.aten._index_put_impl_(%self: !torch.tensor, %index: !torch.tensor, % return %ret : !torch.tensor } -// CHECK-LABEL: func @torch.aten.copy_( +// CHECK-LABEL: func.func @torch.aten.copy_( // CHECK-SAME: %[[DST:.*]]: !torch.tensor, // CHECK-SAME: %[[SRC:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[FALSE:.*]] = torch.constant.bool false @@ -210,7 +210,7 @@ func @torch.aten._index_put_impl_(%self: !torch.tensor, %index: !torch.tensor, % // CHECK: %[[COPY_VTENSOR:.*]] = torch.copy.to_vtensor %[[RET]] : !torch.vtensor // CHECK: torch.overwrite.tensor.contents %[[COPY_VTENSOR]] overwrites %[[DST]] : !torch.vtensor, !torch.tensor // CHECK: return %[[DST]] : !torch.tensor -func @torch.aten.copy_(%dst: !torch.tensor, %src : !torch.tensor) -> !torch.tensor { +func.func @torch.aten.copy_(%dst: !torch.tensor, %src : !torch.tensor) -> !torch.tensor { %false = torch.constant.bool false %ret = torch.aten.copy_ %dst, %src, %false : !torch.tensor, !torch.tensor, !torch.bool -> !torch.tensor return %ret : !torch.tensor diff --git a/test/Dialect/Torch/refine-public-return.mlir b/test/Dialect/Torch/refine-public-return.mlir index bb2bf180e..0cb97d1bd 100644 --- a/test/Dialect/Torch/refine-public-return.mlir +++ b/test/Dialect/Torch/refine-public-return.mlir @@ -1,34 +1,34 @@ // RUN: torch-mlir-opt -split-input-file -verify-diagnostics %s -torch-refine-public-return | FileCheck %s -// CHECK-LABEL: func @basic( +// CHECK-LABEL: func.func @basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3,?],f32>) -> !torch.vtensor<[2,3,?],f32> { // CHECK: return %[[ARG]] : !torch.vtensor<[2,3,?],f32> -func @basic(%arg0: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor { +func.func @basic(%arg0: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor { %1 = torch.copy.to_tensor %arg0 : !torch.tensor<[2,3,?],f32> %2 = torch.tensor_static_info_cast %1 : !torch.tensor<[2,3,?],f32> to !torch.tensor return %2 : !torch.tensor } -// CHECK-LABEL: func @multiple_use_non_value_tensor( +// CHECK-LABEL: func.func @multiple_use_non_value_tensor( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[NON_VALUE_TENSOR:.*]] = torch.copy.to_tensor %[[ARG0]] : !torch.tensor // CHECK: torch.overwrite.tensor.contents %[[ARG1]] overwrites %[[NON_VALUE_TENSOR]] : !torch.vtensor, !torch.tensor // CHECK: %[[RESULT:.*]] = torch.copy.to_vtensor %[[NON_VALUE_TENSOR]] : !torch.vtensor // CHECK: return %[[RESULT]] : !torch.vtensor -func @multiple_use_non_value_tensor(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.tensor { +func.func @multiple_use_non_value_tensor(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.tensor { %0 = torch.copy.to_tensor %arg0 : !torch.tensor torch.overwrite.tensor.contents %arg1 overwrites %0 : !torch.vtensor, !torch.tensor return %0 : !torch.tensor } // No conversion on private function. -// CHECK-LABEL: func private @basic_private( +// CHECK-LABEL: func.func private @basic_private( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor { // CHECK: %[[COPIED:.*]] = torch.copy.to_tensor %[[ARG]] : !torch.tensor<[2,3,?],f32> // CHECK: %[[CASTED:.*]] = torch.tensor_static_info_cast %[[COPIED]] : !torch.tensor<[2,3,?],f32> to !torch.tensor // CHECK: return %[[CASTED]] : !torch.tensor -func private @basic_private(%arg0: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor { +func.func private @basic_private(%arg0: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor { %1 = torch.copy.to_tensor %arg0 : !torch.tensor<[2,3,?],f32> %2 = torch.tensor_static_info_cast %1 : !torch.tensor<[2,3,?],f32> to !torch.tensor return %2 : !torch.tensor @@ -38,11 +38,11 @@ func private @basic_private(%arg0: !torch.vtensor<[2,3,?],f32>) -> !torch.tensor // Call to public function. // expected-error @+1 {{unimplemented}} -func @called(%arg0: tensor<*xf32>) -> tensor<*xf32> { +func.func @called(%arg0: tensor<*xf32>) -> tensor<*xf32> { return %arg0 : tensor<*xf32> } -func private @caller(%arg0: tensor<*xf32>) -> tensor<*xf32> { +func.func private @caller(%arg0: tensor<*xf32>) -> tensor<*xf32> { %0 = call @called(%arg0) : (tensor<*xf32>) -> tensor<*xf32> return %0 : tensor<*xf32> } @@ -51,7 +51,7 @@ func private @caller(%arg0: tensor<*xf32>) -> tensor<*xf32> { // Multiple returns. // expected-error @+1 {{unimplemented}} -func @called(%arg0: tensor<*xf32>) -> tensor<*xf32> { +func.func @called(%arg0: tensor<*xf32>) -> tensor<*xf32> { %ctrue = arith.constant true cf.cond_br %ctrue, ^bb1, ^bb2 ^bb1: diff --git a/test/Dialect/Torch/refine-types-branch.mlir b/test/Dialect/Torch/refine-types-branch.mlir index c0bcb93b6..765e2508d 100644 --- a/test/Dialect/Torch/refine-types-branch.mlir +++ b/test/Dialect/Torch/refine-types-branch.mlir @@ -2,7 +2,7 @@ // ----- -// CHECK-LABEL: func @prim.if$branch_merge_type_tensor( +// CHECK-LABEL: func.func @prim.if$branch_merge_type_tensor( // CHECK-SAME: %[[PRED:.*]]: !torch.bool, // CHECK-SAME: %[[T1:.*]]: !torch.tensor, // CHECK-SAME: %[[T2:.*]]: !torch.tensor) -> !torch.bool { @@ -18,7 +18,7 @@ // CHECK: %[[RET:.*]] = torch.aten.__isnot__ %[[REFINED]], %[[NONE]] : !torch.tensor, !torch.none -> !torch.bool // CHECK: return %[[RET]] : !torch.bool -func @prim.if$branch_merge_type_tensor(%pred: !torch.bool, %t0: !torch.tensor, %t1: !torch.tensor) -> !torch.bool { +func.func @prim.if$branch_merge_type_tensor(%pred: !torch.bool, %t0: !torch.tensor, %t1: !torch.tensor) -> !torch.bool { %res = torch.prim.If %pred -> (!torch.optional) { %optional0 = torch.derefine %t0: !torch.tensor to !torch.optional torch.prim.If.yield %optional0: !torch.optional @@ -33,7 +33,7 @@ func @prim.if$branch_merge_type_tensor(%pred: !torch.bool, %t0: !torch.tensor, % // ----- -// CHECK-LABEL: func @prim.if$branch_merge_type_optional( +// CHECK-LABEL: func.func @prim.if$branch_merge_type_optional( // CHECK-SAME: %[[PRED:.*]]: !torch.bool, // CHECK-SAME: %[[T:.*]]: !torch.tensor) -> !torch.optional { // CHECK: %[[MERGED:.*]] = torch.prim.If %[[PRED]] -> (!torch.optional) { @@ -46,7 +46,7 @@ func @prim.if$branch_merge_type_tensor(%pred: !torch.bool, %t0: !torch.tensor, % // CHECK: } // CHECK: return %[[MERGED:.*]] : !torch.optional -func @prim.if$branch_merge_type_optional(%pred: !torch.bool, %t1: !torch.tensor) -> !torch.optional { +func.func @prim.if$branch_merge_type_optional(%pred: !torch.bool, %t1: !torch.tensor) -> !torch.optional { %res = torch.prim.If %pred -> (!torch.optional) { %none = torch.constant.none %optional0 = torch.derefine %none: !torch.none to !torch.optional @@ -60,7 +60,7 @@ func @prim.if$branch_merge_type_optional(%pred: !torch.bool, %t1: !torch.tensor) // ----- -// CHECK-LABEL: func @prim.if$refined_type_conflicting( +// CHECK-LABEL: func.func @prim.if$refined_type_conflicting( // CHECK-SAME: %[[NONE:.*]]: !torch.none) -> !torch.tensor { // CHECK: %[[OPTIONAL:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.optional // CHECK: %[[NOT_NONE:.*]] = torch.aten.__isnot__ %[[NONE]], %[[NONE]] : !torch.none, !torch.none -> !torch.bool @@ -73,7 +73,7 @@ func @prim.if$branch_merge_type_optional(%pred: !torch.bool, %t1: !torch.tensor) // CHECK: } // CHECK: return %[[PRED:.*]] : !torch.tensor -func @prim.if$refined_type_conflicting(%none: !torch.none) -> !torch.tensor { +func.func @prim.if$refined_type_conflicting(%none: !torch.none) -> !torch.tensor { %optional = torch.derefine %none: !torch.none to !torch.optional %pred = torch.aten.__isnot__ %optional, %none : !torch.optional, !torch.none -> !torch.bool %res = torch.prim.If %pred -> (!torch.tensor) { @@ -88,7 +88,7 @@ func @prim.if$refined_type_conflicting(%none: !torch.none) -> !torch.tensor { // ----- -// CHECK-LABEL: func @prim.loop$region_arg_to_internal( +// CHECK-LABEL: func.func @prim.loop$region_arg_to_internal( // CHECK-SAME: %[[ARG_NONE:.*]]: !torch.none) -> !torch.optional { // CHECK: %[[INT10:.*]] = torch.constant.int 10 // CHECK: %[[INDV:.*]] = torch.constant.int 0 @@ -105,7 +105,7 @@ func @prim.if$refined_type_conflicting(%none: !torch.none) -> !torch.tensor { // CHECK: %[[OPTIONAL:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.optional // CHECK: return %[[OPTIONAL]] : !torch.optional -func @prim.loop$region_arg_to_internal(%none: !torch.none) -> !torch.optional { +func.func @prim.loop$region_arg_to_internal(%none: !torch.none) -> !torch.optional { %int10 = torch.constant.int 10 %int0 = torch.constant.int 0 %true = torch.constant.bool true @@ -120,11 +120,11 @@ func @prim.loop$region_arg_to_internal(%none: !torch.none) -> !torch.optional !torch.vtensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[ATEN]] : !torch.vtensor<*,f32> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @f(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor { +func.func @f(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor { %cast = torch.tensor_static_info_cast %arg0 : !torch.vtensor<*,f32> to !torch.vtensor cf.br ^bb1(%cast: !torch.vtensor) ^bb1(%arg1: !torch.vtensor): @@ -134,16 +134,16 @@ func @f(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor { // ----- -// CHECK-LABEL: func @f -// CHECK: func private @callee +// CHECK-LABEL: func.func @f +// CHECK: func.func private @callee // CHECK-NEXT: torch.aten.tanh %{{.*}} : !torch.vtensor -> !torch.vtensor<*,f32> -func @f() { +func.func @f() { builtin.module { - func private @callee(%arg0: !torch.vtensor) { + func.func private @callee(%arg0: !torch.vtensor) { %1 = torch.aten.tanh %arg0 : !torch.vtensor -> !torch.vtensor return } - func @caller(%arg0: !torch.vtensor<*,f32>) { + func.func @caller(%arg0: !torch.vtensor<*,f32>) { %cast = torch.tensor_static_info_cast %arg0 : !torch.vtensor<*,f32> to !torch.vtensor call @callee(%cast) : (!torch.vtensor) -> () return diff --git a/test/Dialect/Torch/refine-types-ops.mlir b/test/Dialect/Torch/refine-types-ops.mlir index a1d32fdb5..261b0ecc8 100644 --- a/test/Dialect/Torch/refine-types-ops.mlir +++ b/test/Dialect/Torch/refine-types-ops.mlir @@ -4,7 +4,7 @@ // function (i.e. new code called from visitOperation). // ----- -// CHECK-LABEL: func @aten.arange.start$int64_dtype( +// CHECK-LABEL: func.func @aten.arange.start$int64_dtype( // CHECK-SAME: %[[START:.*]]: !torch.int, // CHECK-SAME: %[[END:.*]]: !torch.int) -> !torch.vtensor { // CHECK: %[[NONE:.*]] = torch.constant.none @@ -14,14 +14,14 @@ // CHECK-SAME: -> !torch.vtensor<*,si64> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[T]] : !torch.vtensor<*,si64> to !torch.vtensor // CHECK: return %[[RET]] : !torch.vtensor -func @aten.arange.start$int64_dtype(%start: !torch.int, %end: !torch.int) -> !torch.vtensor { +func.func @aten.arange.start$int64_dtype(%start: !torch.int, %end: !torch.int) -> !torch.vtensor { %none = torch.constant.none %ret = torch.aten.arange.start %start, %end, %none, %none, %none, %none: !torch.int, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor return %ret : !torch.vtensor } // ----- -// CHECK-LABEL: func @aten.arange.start$float32_dtype( +// CHECK-LABEL: func.func @aten.arange.start$float32_dtype( // CHECK-SAME: %[[START:.*]]: !torch.float, // CHECK-SAME: %[[END:.*]]: !torch.int) -> !torch.vtensor { // CHECK: %[[NONE:.*]] = torch.constant.none @@ -31,14 +31,14 @@ func @aten.arange.start$int64_dtype(%start: !torch.int, %end: !torch.int) -> !to // CHECK-SAME: -> !torch.vtensor<*,f32> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[T]] : !torch.vtensor<*,f32> to !torch.vtensor // CHECK: return %[[RET]] : !torch.vtensor -func @aten.arange.start$float32_dtype(%start: !torch.float, %end: !torch.int) -> !torch.vtensor { +func.func @aten.arange.start$float32_dtype(%start: !torch.float, %end: !torch.int) -> !torch.vtensor { %none = torch.constant.none %ret = torch.aten.arange.start %start, %end, %none, %none, %none, %none: !torch.float, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor return %ret : !torch.vtensor } // ----- -// CHECK-LABEL: func @aten.arange.start$specified_dtype( +// CHECK-LABEL: func.func @aten.arange.start$specified_dtype( // CHECK-SAME: %[[END:.*]]: !torch.int) -> !torch.vtensor { // CHECK: %[[CST6:.*]] = torch.constant.int 6 // CHECK: %[[NONE:.*]] = torch.constant.none @@ -48,7 +48,7 @@ func @aten.arange.start$float32_dtype(%start: !torch.float, %end: !torch.int) -> // CHECK-SAME: -> !torch.vtensor<*,f32> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[T]] : !torch.vtensor<*,f32> to !torch.vtensor // CHECK: return %[[RET]] : !torch.vtensor -func @aten.arange.start$specified_dtype(%end: !torch.int) -> !torch.vtensor { +func.func @aten.arange.start$specified_dtype(%end: !torch.int) -> !torch.vtensor { %int6 = torch.constant.int 6 %none = torch.constant.none %ret = torch.aten.arange %end, %int6, %none, %none, %none: !torch.int, !torch.int, !torch.none, !torch.none, !torch.none -> !torch.vtensor @@ -56,20 +56,20 @@ func @aten.arange.start$specified_dtype(%end: !torch.int) -> !torch.vtensor { } // ----- -// CHECK-LABEL: func @torch.aten.linear( +// CHECK-LABEL: func.func @torch.aten.linear( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,3],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[5,3],f32>, // CHECK-SAME: %[[ARG2:.*]]: !torch.vtensor<[5],f32>) -> !torch.vtensor { // CHECK: %[[LINEAR:.*]] = torch.aten.linear %[[ARG0]], %[[ARG1]], %[[ARG2]] : !torch.vtensor<[?,3],f32>, !torch.vtensor<[5,3],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<*,f32> // CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[LINEAR]] : !torch.vtensor<*,f32> to !torch.vtensor // CHECK: return %[[RESULT]] : !torch.vtensor -func @torch.aten.linear(%arg0: !torch.vtensor<[?,3],f32>, %arg1: !torch.vtensor<[5,3],f32>, %arg2: !torch.vtensor<[5],f32>) -> !torch.vtensor { +func.func @torch.aten.linear(%arg0: !torch.vtensor<[?,3],f32>, %arg1: !torch.vtensor<[5,3],f32>, %arg2: !torch.vtensor<[5],f32>) -> !torch.vtensor { %1 = torch.aten.linear %arg0, %arg1, %arg2 : !torch.vtensor<[?,3],f32>, !torch.vtensor<[5,3],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor return %1 : !torch.vtensor } // ----- -// CHECK-LABEL: func @aten.sum.dim_IntList( +// CHECK-LABEL: func.func @aten.sum.dim_IntList( // CHECK-SAME: %[[T:.*]]: !torch.vtensor<*,si64>) -> !torch.vtensor { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: %[[NONE:.*]] = torch.constant.none @@ -82,7 +82,7 @@ func @torch.aten.linear(%arg0: !torch.vtensor<[?,3],f32>, %arg1: !torch.vtensor< // CHECK-SAME: -> !torch.vtensor<*,si64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.vtensor<*,si64> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @aten.sum.dim_IntList(%t: !torch.vtensor<*,si64>) -> !torch.vtensor { +func.func @aten.sum.dim_IntList(%t: !torch.vtensor<*,si64>) -> !torch.vtensor { %false = torch.constant.bool false %none = torch.constant.none %int0 = torch.constant.int 0 @@ -93,14 +93,14 @@ func @aten.sum.dim_IntList(%t: !torch.vtensor<*,si64>) -> !torch.vtensor { } // ----- -// CHECK-LABEL: func @aten.any.dim( +// CHECK-LABEL: func.func @aten.any.dim( // CHECK-SAME: %[[T:.*]]: !torch.vtensor<*,i1>) -> !torch.vtensor { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: %[[INT_NEG1:.*]] = torch.constant.int -1 // CHECK: %[[RET:.*]] = torch.aten.any.dim %[[T]], %[[INT_NEG1]], %[[FALSE]] : !torch.vtensor<*,i1>, !torch.int, !torch.bool -> !torch.vtensor<*,i1> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.vtensor<*,i1> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @aten.any.dim(%t: !torch.vtensor<*,i1>) -> !torch.vtensor { +func.func @aten.any.dim(%t: !torch.vtensor<*,i1>) -> !torch.vtensor { %false = torch.constant.bool false %int-1 = torch.constant.int -1 %ret = torch.aten.any.dim %t, %int-1, %false : !torch.vtensor<*,i1>, !torch.int, !torch.bool -> !torch.vtensor @@ -108,18 +108,18 @@ func @aten.any.dim(%t: !torch.vtensor<*,i1>) -> !torch.vtensor { } // ----- -// CHECK-LABEL: func @aten.any( +// CHECK-LABEL: func.func @aten.any( // CHECK-SAME: %[[T:.*]]: !torch.vtensor<*,i1>) -> !torch.vtensor { // CHECK: %[[RET:.*]] = torch.aten.any %[[T]] : !torch.vtensor<*,i1> -> !torch.vtensor<*,i1> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.vtensor<*,i1> to !torch.vtensor // CHECK: return %[[CAST]] : !torch.vtensor -func @aten.any(%t: !torch.vtensor<*,i1>) -> !torch.vtensor { +func.func @aten.any(%t: !torch.vtensor<*,i1>) -> !torch.vtensor { %ret = torch.aten.any %t: !torch.vtensor<*,i1> -> !torch.vtensor return %ret : !torch.vtensor } // ----- -// CHECK-LABEL: func @torch.aten.zeros( +// CHECK-LABEL: func.func @torch.aten.zeros( // CHECK-SAME: %[[DIM0:.*]]: !torch.int) -> !torch.tensor { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[INT2:.*]] = torch.constant.int 2 @@ -127,7 +127,7 @@ func @aten.any(%t: !torch.vtensor<*,i1>) -> !torch.vtensor { // CHECK: %[[ZEROS:.*]] = torch.aten.zeros %[[SIZES]], %[[NONE]], %[[NONE]], %[[NONE]], %[[NONE]] : !torch.list, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[ZEROS]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.zeros(%dim0: !torch.int) -> !torch.tensor { +func.func @torch.aten.zeros(%dim0: !torch.int) -> !torch.tensor { %none = torch.constant.none %int2 = torch.constant.int 2 %sizesList = torch.prim.ListConstruct %dim0, %int2 : (!torch.int, !torch.int) -> !torch.list @@ -136,19 +136,19 @@ func @torch.aten.zeros(%dim0: !torch.int) -> !torch.tensor { } // ----- -// CHECK-LABEL: func @torch.aten.type_as( +// CHECK-LABEL: func.func @torch.aten.type_as( // CHECK-SAME: %[[INPUT:.*]]: !torch.tensor<[?],si64>, // CHECK-SAME: %[[OTHER:.*]]: !torch.tensor<[?,2],f32>) -> !torch.tensor { // CHECK: %[[RET:.*]] = torch.aten.type_as %[[INPUT]], %[[OTHER]] : !torch.tensor<[?],si64>, !torch.tensor<[?,2],f32> -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.type_as(%self: !torch.tensor<[?], si64>, %other: !torch.tensor<[?,2],f32>) -> !torch.tensor { +func.func @torch.aten.type_as(%self: !torch.tensor<[?], si64>, %other: !torch.tensor<[?,2],f32>) -> !torch.tensor { %ret = torch.aten.type_as %self, %other : !torch.tensor<[?], si64>, !torch.tensor<[?,2],f32> -> !torch.tensor return %ret: !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.cat( +// CHECK-LABEL: func.func @torch.aten.cat( // CHECK-SAME: %[[T1:.*]]: !torch.tensor<[?,1,4],f32>, // CHECK-SAME: %[[T2:.*]]: !torch.tensor<[2,3,4],f32>) -> !torch.tensor { // CHECK: %[[INT1:.*]] = torch.constant.int 1 @@ -156,7 +156,7 @@ func @torch.aten.type_as(%self: !torch.tensor<[?], si64>, %other: !torch.tensor< // CHECK: %[[RET:.*]] = torch.aten.cat %[[TENSORS]], %[[INT1]] : !torch.list, !torch.int -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.cat(%t0: !torch.tensor<[?,1,4], f32>, %t1: !torch.tensor<[2,3,4], f32>) -> !torch.tensor { +func.func @torch.aten.cat(%t0: !torch.tensor<[?,1,4], f32>, %t1: !torch.tensor<[2,3,4], f32>) -> !torch.tensor { %int1 = torch.constant.int 1 %tensorList = torch.prim.ListConstruct %t0, %t1: (!torch.tensor<[?,1,4], f32>, !torch.tensor<[2,3,4], f32>) -> !torch.list %ret = torch.aten.cat %tensorList, %int1 : !torch.list, !torch.int -> !torch.tensor @@ -164,29 +164,29 @@ func @torch.aten.cat(%t0: !torch.tensor<[?,1,4], f32>, %t1: !torch.tensor<[2,3,4 } // ----- -// CHECK-LABEL: func @torch.aten._shape_as_tensor( +// CHECK-LABEL: func.func @torch.aten._shape_as_tensor( // CHECK-SAME: %[[INPUT:.*]]: !torch.tensor<[?,1,4],f32>) -> !torch.tensor { // CHECK: %[[RET:.*]] = torch.aten._shape_as_tensor %[[INPUT]] : !torch.tensor<[?,1,4],f32> -> !torch.tensor<*,si64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,si64> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten._shape_as_tensor(%input: !torch.tensor<[?,1,4], f32>) -> !torch.tensor { +func.func @torch.aten._shape_as_tensor(%input: !torch.tensor<[?,1,4], f32>) -> !torch.tensor { %ret= torch.aten._shape_as_tensor %input : !torch.tensor<[?,1,4], f32> -> !torch.tensor return %ret : !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten._shape_as_tensor$unknown_input_shape( +// CHECK-LABEL: func.func @torch.aten._shape_as_tensor$unknown_input_shape( // CHECK-SAME: %[[INPUT:.*]]: !torch.tensor) -> !torch.tensor { // CHECK: %[[RET:.*]] = torch.aten._shape_as_tensor %[[INPUT]] : !torch.tensor -> !torch.tensor<*,si64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,si64> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten._shape_as_tensor$unknown_input_shape(%input: !torch.tensor) -> !torch.tensor { +func.func @torch.aten._shape_as_tensor$unknown_input_shape(%input: !torch.tensor) -> !torch.tensor { %ret= torch.aten._shape_as_tensor %input : !torch.tensor -> !torch.tensor return %ret : !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.embedding( +// CHECK-LABEL: func.func @torch.aten.embedding( // CHECK-SAME: %[[INPUT:.*]]: !torch.tensor<[104,512],f32>, // CHECK-SAME: %[[INDEXES:.*]]: !torch.tensor<[2,3],si64>) -> !torch.tensor { // CHECK: %[[FALSE:.*]] = torch.constant.bool false @@ -194,7 +194,7 @@ func @torch.aten._shape_as_tensor$unknown_input_shape(%input: !torch.tensor) -> // CHECK: %[[RET:.*]] = torch.aten.embedding %[[INPUT]], %[[INDEXES]], %[[PADDING_IDX]], %[[FALSE]], %[[FALSE]] : !torch.tensor<[104,512],f32>, !torch.tensor<[2,3],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.embedding(%weight: !torch.tensor<[104,512],f32>, %indices: !torch.tensor<[2,3], si64>) -> !torch.tensor { +func.func @torch.aten.embedding(%weight: !torch.tensor<[104,512],f32>, %indices: !torch.tensor<[2,3], si64>) -> !torch.tensor { %false = torch.constant.bool false %int1 = torch.constant.int 1 %ret = torch.aten.embedding %weight, %indices, %int1, %false, %false : !torch.tensor<[104,512],f32>, !torch.tensor<[2,3], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.tensor @@ -202,14 +202,14 @@ func @torch.aten.embedding(%weight: !torch.tensor<[104,512],f32>, %indices: !tor } // ----- -// CHECK-LABEL: func @torch.aten.tensor.float( +// CHECK-LABEL: func.func @torch.aten.tensor.float( // CHECK-SAME: %[[t:.*]]: !torch.float) -> !torch.tensor { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: %[[RET:.*]] = torch.aten.tensor.float %[[t]], %[[NONE]], %[[NONE]], %[[FALSE]] : !torch.float, !torch.none, !torch.none, !torch.bool -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.tensor.float(%t: !torch.float) -> !torch.tensor { +func.func @torch.aten.tensor.float(%t: !torch.float) -> !torch.tensor { %none = torch.constant.none %false = torch.constant.bool false %ret = torch.aten.tensor.float %t, %none, %none, %false : !torch.float, !torch.none, !torch.none, !torch.bool -> !torch.tensor @@ -217,7 +217,7 @@ func @torch.aten.tensor.float(%t: !torch.float) -> !torch.tensor { } // ----- -// CHECK-LABEL: func @torch.aten.tensor.float$specified_dtype( +// CHECK-LABEL: func.func @torch.aten.tensor.float$specified_dtype( // CHECK-SAME: %[[t:.*]]: !torch.float) -> !torch.tensor { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[CST11:.*]] = torch.constant.int 11 @@ -225,7 +225,7 @@ func @torch.aten.tensor.float(%t: !torch.float) -> !torch.tensor { // CHECK: %[[RET:.*]] = torch.aten.tensor.float %[[t]], %[[CST11]], %[[NONE]], %[[FALSE]] : !torch.float, !torch.int, !torch.none, !torch.bool -> !torch.tensor<*,i1> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,i1> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.tensor.float$specified_dtype(%t: !torch.float) -> !torch.tensor { +func.func @torch.aten.tensor.float$specified_dtype(%t: !torch.float) -> !torch.tensor { %none = torch.constant.none %int11 = torch.constant.int 11 %false = torch.constant.bool false @@ -234,59 +234,59 @@ func @torch.aten.tensor.float$specified_dtype(%t: !torch.float) -> !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.softmax.int( +// CHECK-LABEL: func.func @torch.aten.softmax.int( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[2,3],f32>, // CHECK-SAME: %[[DIM:.*]]: !torch.int) -> !torch.tensor { // CHECK: %[[DTYPE:.*]] = torch.constant.none // CHECK: %[[SOFTMAX:.*]] = torch.aten.softmax.int %[[T]], %[[DIM]], %[[DTYPE]] : !torch.tensor<[2,3],f32>, !torch.int, !torch.none -> !torch.tensor<*,f32> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[SOFTMAX]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[RET]] : !torch.tensor -func @torch.aten.softmax.int(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> !torch.tensor { +func.func @torch.aten.softmax.int(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> !torch.tensor { %none = torch.constant.none %ret = torch.aten.softmax.int %t, %dim, %none : !torch.tensor<[2,3],f32>, !torch.int, !torch.none -> !torch.tensor return %ret : !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.softmax.int$specified_dtype( +// CHECK-LABEL: func.func @torch.aten.softmax.int$specified_dtype( // CHECK-SAME: %[[T:.*]]: !torch.tensor<[2,3],f32>, // CHECK-SAME: %[[DIM:.*]]: !torch.int) -> !torch.tensor { // CHECK: %[[DTYPE:.*]] = torch.constant.int 4 // CHECK: %[[SOFTMAX:.*]] = torch.aten.softmax.int %[[T]], %[[DIM]], %[[DTYPE]] : !torch.tensor<[2,3],f32>, !torch.int, !torch.int -> !torch.tensor<*,si64> // CHECK: %[[RET:.*]] = torch.tensor_static_info_cast %[[SOFTMAX]] : !torch.tensor<*,si64> to !torch.tensor // CHECK: return %[[RET]] : !torch.tensor -func @torch.aten.softmax.int$specified_dtype(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> !torch.tensor { +func.func @torch.aten.softmax.int$specified_dtype(%t: !torch.tensor<[2,3],f32>, %dim: !torch.int) -> !torch.tensor { %int4 = torch.constant.int 4 %ret = torch.aten.softmax.int %t, %dim, %int4: !torch.tensor<[2,3],f32>, !torch.int, !torch.int -> !torch.tensor return %ret : !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.Matmul.Broadcast.Matrix( +// CHECK-LABEL: func.func @torch.aten.Matmul.Broadcast.Matrix( // CHECK-SAME: %[[LHS:.*]]: !torch.vtensor<*,f32>, // CHECK-SAME: %[[RHS:.*]]: !torch.vtensor<[?,?,?],f32>) -> !torch.tensor { // CHECK: %[[MUL:.*]] = torch.aten.matmul %[[LHS]], %[[RHS]] : !torch.vtensor<*,f32>, !torch.vtensor<[?,?,?],f32> -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[MUL]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.Matmul.Broadcast.Matrix(%arg0: !torch.vtensor<*,f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.tensor { +func.func @torch.aten.Matmul.Broadcast.Matrix(%arg0: !torch.vtensor<*,f32>, %arg1: !torch.vtensor<[?,?,?],f32>) -> !torch.tensor { %0 = torch.aten.matmul %arg0, %arg1 : !torch.vtensor<*,f32>, !torch.vtensor<[?,?,?],f32> -> !torch.tensor return %0 : !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.Matmul.Broadcast.Vector( +// CHECK-LABEL: func.func @torch.aten.Matmul.Broadcast.Vector( // CHECK-SAME: %[[LHS:.*]]: !torch.vtensor<*,f32>, // CHECK-SAME: %[[RHS:.*]]: !torch.vtensor<*,f32>) -> !torch.tensor { // CHECK: %[[MUL:.*]] = torch.aten.matmul %[[LHS]], %[[RHS]] : !torch.vtensor<*,f32>, !torch.vtensor<*,f32> -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[MUL]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.Matmul.Broadcast.Vector(%arg0: !torch.vtensor<*,f32>, %arg1: !torch.vtensor<*,f32>) -> !torch.tensor { +func.func @torch.aten.Matmul.Broadcast.Vector(%arg0: !torch.vtensor<*,f32>, %arg1: !torch.vtensor<*,f32>) -> !torch.tensor { %0 = torch.aten.matmul %arg0, %arg1 : !torch.vtensor<*,f32>, !torch.vtensor<*,f32> -> !torch.tensor return %0 : !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.to.dtype( +// CHECK-LABEL: func.func @torch.aten.to.dtype( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor<[?,?],f32>) -> !torch.tensor // CHECK: %[[TODTYPE:.*]] = torch.aten.to.dtype // CHECK-SAME: %[[ARG]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : @@ -294,7 +294,7 @@ func @torch.aten.Matmul.Broadcast.Vector(%arg0: !torch.vtensor<*,f32>, %arg1: !t // CHECK-SAME: -> !torch.tensor<*,si64> // CHECK-NEXT: %[[RES:.*]] = torch.tensor_static_info_cast %[[TODTYPE]] : !torch.tensor<*,si64> to !torch.tensor // CHECK-NEXT: return %[[RES]] : !torch.tensor -func @torch.aten.to.dtype(%arg0: !torch.tensor<[?,?],f32>) -> !torch.tensor{ +func.func @torch.aten.to.dtype(%arg0: !torch.tensor<[?,?],f32>) -> !torch.tensor{ %none = torch.constant.none %false = torch.constant.bool false %int4 = torch.constant.int 4 @@ -303,18 +303,18 @@ func @torch.aten.to.dtype(%arg0: !torch.tensor<[?,?],f32>) -> !torch.tensor{ } // ----- -// CHECK-LABEL: func @torch.prim.NumToTensor.Scalar( +// CHECK-LABEL: func.func @torch.prim.NumToTensor.Scalar( // CHECK-SAME: %[[SELF:.*]]: !torch.int) -> !torch.tensor { // CHECK: %[[NTT:.*]] = torch.prim.NumToTensor.Scalar %[[SELF]] : !torch.int -> !torch.tensor<*,si64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[NTT]] : !torch.tensor<*,si64> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.prim.NumToTensor.Scalar(%arg0: !torch.int) -> !torch.tensor { +func.func @torch.prim.NumToTensor.Scalar(%arg0: !torch.int) -> !torch.tensor { %0 = torch.prim.NumToTensor.Scalar %arg0: !torch.int -> !torch.tensor return %0: !torch.tensor } // ----- -// CHECK-LABEL: func @torch.aten.tensor( +// CHECK-LABEL: func.func @torch.aten.tensor( // CHECK-SAME: %[[DATA:.*]]: !torch.list>) -> !torch.tensor { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[FALSE:.*]] = torch.constant.bool false @@ -323,7 +323,7 @@ func @torch.prim.NumToTensor.Scalar(%arg0: !torch.int) -> !torch.tensor { // CHECK-SAME: -> !torch.tensor<*,f32> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,f32> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.tensor(%t: !torch.list>) -> !torch.tensor { +func.func @torch.aten.tensor(%t: !torch.list>) -> !torch.tensor { %none = torch.constant.none %false = torch.constant.bool false %ret = torch.aten.tensor %t, %none, %none, %false : !torch.list>, !torch.none, !torch.none, !torch.bool -> !torch.tensor @@ -331,7 +331,7 @@ func @torch.aten.tensor(%t: !torch.list>) -> !torch.tensor { } // ----- -// CHECK-LABEL: func @torch.aten.tensor$specified_dtype( +// CHECK-LABEL: func.func @torch.aten.tensor$specified_dtype( // CHECK-SAME: %[[DATA:.*]]: !torch.list>) -> !torch.tensor { // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[INT4:.*]] = torch.constant.int 4 @@ -339,7 +339,7 @@ func @torch.aten.tensor(%t: !torch.list>) -> !torch.tensor { // CHECK: %[[RET:.*]] = torch.aten.tensor %[[DATA]], %[[INT4]], %[[NONE]], %[[FALSE]] : !torch.list>, !torch.int, !torch.none, !torch.bool -> !torch.tensor<*,si64> // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[RET]] : !torch.tensor<*,si64> to !torch.tensor // CHECK: return %[[CAST]] : !torch.tensor -func @torch.aten.tensor$specified_dtype(%t: !torch.list>) -> !torch.tensor { +func.func @torch.aten.tensor$specified_dtype(%t: !torch.list>) -> !torch.tensor { %none = torch.constant.none %int4 = torch.constant.int 4 %false = torch.constant.bool false diff --git a/test/Dialect/Torch/refine-types.mlir b/test/Dialect/Torch/refine-types.mlir index c089a7a11..eadefbfa6 100644 --- a/test/Dialect/Torch/refine-types.mlir +++ b/test/Dialect/Torch/refine-types.mlir @@ -7,35 +7,35 @@ // should go in refine-types-ops.mlir. // ----- -// CHECK-LABEL: func @basic( +// CHECK-LABEL: func.func @basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<*,f32>) -> !torch.vtensor { // CHECK: %[[TANH:.*]] = torch.aten.tanh %[[ARG0]] : !torch.vtensor<*,f32> -> !torch.vtensor<*,f32> // CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[TANH]] : !torch.vtensor<*,f32> to !torch.vtensor // CHECK: return %[[RESULT]] : !torch.vtensor -func @basic(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor { +func.func @basic(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor { %1 = torch.aten.tanh %arg0 : !torch.vtensor<*,f32> -> !torch.vtensor return %1 : !torch.vtensor } // ----- -// CHECK-LABEL: func @keep_existing_shape_information( +// CHECK-LABEL: func.func @keep_existing_shape_information( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<*,f32>) -> !torch.vtensor<[2],f32> { // CHECK: %[[TANH:.*]] = torch.aten.tanh %[[ARG0]] : !torch.vtensor<*,f32> -> !torch.vtensor<[2],f32> // CHECK: return %[[TANH]] : !torch.vtensor<[2],f32> -func @keep_existing_shape_information(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor<[2],f32> { +func.func @keep_existing_shape_information(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor<[2],f32> { %1 = torch.aten.tanh %arg0 : !torch.vtensor<*,f32> -> !torch.vtensor<[2], f32> return %1 : !torch.vtensor<[2],f32> } // ----- -// CHECK-LABEL: func @propagate_through_multiple_ops( +// CHECK-LABEL: func.func @propagate_through_multiple_ops( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<*,f32>) -> !torch.vtensor { // CHECK: %[[TANH0:.*]] = torch.aten.tanh %[[ARG0]] : !torch.vtensor<*,f32> -> !torch.vtensor<*,f32> // CHECK: %[[TANH1:.*]] = torch.aten.tanh %[[TANH0]] : !torch.vtensor<*,f32> -> !torch.vtensor<*,f32> // CHECK: %[[TANH2:.*]] = torch.aten.tanh %[[TANH1]] : !torch.vtensor<*,f32> -> !torch.vtensor<*,f32> // CHECK: %[[TANH3:.*]] = torch.tensor_static_info_cast %[[TANH2]] : !torch.vtensor<*,f32> to !torch.vtensor // CHECK: return %[[TANH3]] : !torch.vtensor -func @propagate_through_multiple_ops(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor { +func.func @propagate_through_multiple_ops(%arg0: !torch.vtensor<*,f32>) -> !torch.vtensor { %1 = torch.aten.tanh %arg0 : !torch.vtensor<*,f32> -> !torch.vtensor %2 = torch.aten.tanh %1 : !torch.vtensor -> !torch.vtensor %3 = torch.aten.tanh %2 : !torch.vtensor -> !torch.vtensor @@ -45,108 +45,108 @@ func @propagate_through_multiple_ops(%arg0: !torch.vtensor<*,f32>) -> !torch.vte // ----- // Check rewriting logic in case of mixes of users that do/don't allow type // refinement. -// CHECK-LABEL: func @mixed_allowing_not_allowing_type_refinement( +// CHECK-LABEL: func.func @mixed_allowing_not_allowing_type_refinement( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<*,f32>) -> (!torch.vtensor, !torch.vtensor) { // CHECK: %[[TANH0:.*]] = torch.aten.tanh %[[ARG0]] : !torch.vtensor<*,f32> -> !torch.vtensor<*,f32> // CHECK: %[[ERASED:.*]] = torch.tensor_static_info_cast %[[TANH0]] : !torch.vtensor<*,f32> to !torch.vtensor // CHECK: %[[TANH1:.*]] = torch.aten.tanh %[[TANH0]] : !torch.vtensor<*,f32> -> !torch.vtensor<*,f32> // CHECK: return %[[ERASED]], %[[ERASED]] : !torch.vtensor, !torch.vtensor -func @mixed_allowing_not_allowing_type_refinement(%arg0: !torch.vtensor<*,f32>) -> (!torch.vtensor, !torch.vtensor) { +func.func @mixed_allowing_not_allowing_type_refinement(%arg0: !torch.vtensor<*,f32>) -> (!torch.vtensor, !torch.vtensor) { %1 = torch.aten.tanh %arg0 : !torch.vtensor<*,f32> -> !torch.vtensor %3 = torch.aten.tanh %1 : !torch.vtensor -> !torch.vtensor return %1, %1 : !torch.vtensor, !torch.vtensor } // ----- -// CHECK-LABEL: func @type_promotion$same_category_different_width( +// CHECK-LABEL: func.func @type_promotion$same_category_different_width( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?],si32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],unk> { // CHECK: %[[ALPHA:.*]] = torch.constant.int 3 // CHECK: %[[ADD:.*]] = torch.aten.add.Tensor %[[ARG0]], %[[ARG1]], %[[ALPHA]] : !torch.vtensor<[?],si32>, !torch.vtensor<[?],si64>, !torch.int -> !torch.vtensor<[?],si64> // CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[ADD]] : !torch.vtensor<[?],si64> to !torch.vtensor<[?],unk> // CHECK: return %[[RESULT]] : !torch.vtensor<[?],unk> -func @type_promotion$same_category_different_width(%arg0: !torch.vtensor<[?],si32>, %arg1: !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],unk> { +func.func @type_promotion$same_category_different_width(%arg0: !torch.vtensor<[?],si32>, %arg1: !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],unk> { %int3 = torch.constant.int 3 %0 = torch.aten.add.Tensor %arg0, %arg1, %int3 : !torch.vtensor<[?],si32>, !torch.vtensor<[?],si64>, !torch.int -> !torch.vtensor<[?],unk> return %0 : !torch.vtensor<[?],unk> } // ----- -// CHECK-LABEL: func @type_promotion$different_category( +// CHECK-LABEL: func.func @type_promotion$different_category( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?],si64>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?],unk> { // CHECK: %[[ALPHA:.*]] = torch.constant.int 3 // CHECK: %[[ADD:.*]] = torch.aten.add.Tensor %[[ARG0]], %[[ARG1]], %[[ALPHA]] : !torch.vtensor<[?],si64>, !torch.vtensor<[?],f32>, !torch.int -> !torch.vtensor<[?],f32> // CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[ADD]] : !torch.vtensor<[?],f32> to !torch.vtensor<[?],unk> // CHECK: return %[[RESULT]] : !torch.vtensor<[?],unk> -func @type_promotion$different_category(%arg0: !torch.vtensor<[?],si64>, %arg1: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?],unk> { +func.func @type_promotion$different_category(%arg0: !torch.vtensor<[?],si64>, %arg1: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?],unk> { %int3 = torch.constant.int 3 %0 = torch.aten.add.Tensor %arg0, %arg1, %int3 : !torch.vtensor<[?],si64>, !torch.vtensor<[?],f32>, !torch.int -> !torch.vtensor<[?],unk> return %0 : !torch.vtensor<[?],unk> } // ----- -// CHECK-LABEL: func @type_promotion$same_category_zero_rank_wider( +// CHECK-LABEL: func.func @type_promotion$same_category_zero_rank_wider( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[],f64>) -> !torch.vtensor<[?],unk> { // CHECK: %[[ALPHA:.*]] = torch.constant.float 2.300000e+00 // CHECK: %[[ADD:.*]] = torch.aten.add.Tensor %[[ARG0]], %[[ARG1]], %[[ALPHA]] : !torch.vtensor<[?],f32>, !torch.vtensor<[],f64>, !torch.float -> !torch.vtensor<[?],f32> // CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[ADD]] : !torch.vtensor<[?],f32> to !torch.vtensor<[?],unk> // CHECK: return %[[RESULT]] : !torch.vtensor<[?],unk> -func @type_promotion$same_category_zero_rank_wider(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[],f64>) -> !torch.vtensor<[?],unk> { +func.func @type_promotion$same_category_zero_rank_wider(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[],f64>) -> !torch.vtensor<[?],unk> { %float2.300000e00 = torch.constant.float 2.300000e+00 %0 = torch.aten.add.Tensor %arg0, %arg1, %float2.300000e00 : !torch.vtensor<[?],f32>, !torch.vtensor<[],f64>, !torch.float -> !torch.vtensor<[?],unk> return %0 : !torch.vtensor<[?],unk> } // ----- -// CHECK-LABEL: func @type_promotion$zero_rank_higher_category( +// CHECK-LABEL: func.func @type_promotion$zero_rank_higher_category( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?],si64>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],unk> { // CHECK: %[[ALPHA:.*]] = torch.constant.int 2 // CHECK: %[[ADD:.*]] = torch.aten.add.Tensor %[[ARG0]], %[[ARG1]], %[[ALPHA]] : !torch.vtensor<[?],si64>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[?],f32> // CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[ADD]] : !torch.vtensor<[?],f32> to !torch.vtensor<[?],unk> // CHECK: return %[[RESULT]] : !torch.vtensor<[?],unk> -func @type_promotion$zero_rank_higher_category(%arg0: !torch.vtensor<[?],si64>, %arg1: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],unk> { +func.func @type_promotion$zero_rank_higher_category(%arg0: !torch.vtensor<[?],si64>, %arg1: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],unk> { %int2 = torch.constant.int 2 %0 = torch.aten.add.Tensor %arg0, %arg1, %int2 : !torch.vtensor<[?],si64>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[?],unk> return %0 : !torch.vtensor<[?],unk> } // ----- -// CHECK-LABEL: func @type_promotion$alpha_wider( +// CHECK-LABEL: func.func @type_promotion$alpha_wider( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],unk> { // CHECK: %[[ALPHA:.*]] = torch.constant.float 2.300000e+00 // CHECK: %[[ADD:.*]] = torch.aten.add.Tensor %[[ARG0]], %[[ARG1]], %[[ALPHA]] : !torch.vtensor<[?],f32>, !torch.vtensor<[],f32>, !torch.float -> !torch.vtensor<[?],f32> // CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[ADD]] : !torch.vtensor<[?],f32> to !torch.vtensor<[?],unk> // CHECK: return %[[RESULT]] : !torch.vtensor<[?],unk> -func @type_promotion$alpha_wider(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],unk> { +func.func @type_promotion$alpha_wider(%arg0: !torch.vtensor<[?],f32>, %arg1: !torch.vtensor<[],f32>) -> !torch.vtensor<[?],unk> { %float2.300000e00 = torch.constant.float 2.300000e+00 %0 = torch.aten.add.Tensor %arg0, %arg1, %float2.300000e00 : !torch.vtensor<[?],f32>, !torch.vtensor<[],f32>, !torch.float -> !torch.vtensor<[?],unk> return %0 : !torch.vtensor<[?],unk> } // ----- -// CHECK-LABEL: func @type_promotion_scalar_operation( +// CHECK-LABEL: func.func @type_promotion_scalar_operation( // CHECK-SAME: %[[FLOAT:.*]]: !torch.float, // CHECK-SAME: %[[INT:.*]]: !torch.int) -> !torch.number { // CHECK: %[[ADD:.*]] = torch.aten.add %[[FLOAT]], %[[INT]] : !torch.float, !torch.int -> !torch.float // CHECK: %[[RET:.*]] = torch.derefine %[[ADD]] : !torch.float to !torch.number // CHECK: return %[[RET]] : !torch.number -func @type_promotion_scalar_operation(%float: !torch.float, %int: !torch.int) -> !torch.number { +func.func @type_promotion_scalar_operation(%float: !torch.float, %int: !torch.int) -> !torch.number { %ret = torch.aten.add %float, %int : !torch.float, !torch.int -> !torch.number return %ret : !torch.number } // ----- -// CHECK-LABEL: func @torch.overwrite.tensor.contents$dynamic_overwrites_static( +// CHECK-LABEL: func.func @torch.overwrite.tensor.contents$dynamic_overwrites_static( // CHECK-SAME: %[[STATIC:.*]]: !torch.vtensor<[2],f32>, // CHECK-SAME: %[[DYNAMIC:.*]]: !torch.vtensor<[?],f32>) -> !torch.vtensor<[2],f32> { // CHECK: %[[CAST:.*]] = torch.tensor_static_info_cast %[[DYNAMIC_COPY:.*]] : !torch.vtensor<[?],f32> to !torch.vtensor<*,f32> // CHECK: %[[CAST2:.*]] = torch.tensor_static_info_cast %[[CAST:.*]] : !torch.vtensor<*,f32> to !torch.vtensor<*,f32> // CHECK: torch.overwrite.tensor.contents %[[CAST2]] overwrites %[[STATIC_COPY:.*]] : !torch.vtensor<*,f32>, !torch.tensor<*,f32> -func @torch.overwrite.tensor.contents$dynamic_overwrites_static(%static: !torch.vtensor<[2],f32>, %dynamic: !torch.vtensor<[?],f32>) -> !torch.vtensor<[2],f32> { +func.func @torch.overwrite.tensor.contents$dynamic_overwrites_static(%static: !torch.vtensor<[2],f32>, %dynamic: !torch.vtensor<[?],f32>) -> !torch.vtensor<[2],f32> { %static_no_type = torch.tensor_static_info_cast %static : !torch.vtensor<[2],f32> to !torch.vtensor %static_copy = torch.copy.to_tensor %static_no_type : !torch.tensor %dynamic_no_type = torch.tensor_static_info_cast %dynamic : !torch.vtensor<[?],f32> to !torch.vtensor @@ -157,14 +157,14 @@ func @torch.overwrite.tensor.contents$dynamic_overwrites_static(%static: !torch. } // ----- -// CHECK-LABEL: func @torch.overwrite.tensor.contents$static_overwrites_dynamic( +// CHECK-LABEL: func.func @torch.overwrite.tensor.contents$static_overwrites_dynamic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[2],f32>, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?],f32> { // CHECK: %[[ARG0_ERASED:.*]] = torch.tensor_static_info_cast %[[ARG0]] : !torch.vtensor<[2],f32> to !torch.vtensor<*,f32> // CHECK: %[[ARG1_ERASED:.*]] = torch.tensor_static_info_cast %[[ARG1]] : !torch.vtensor<[?],f32> to !torch.vtensor<*,f32> // CHECK: %[[MUTABLE_COPY:.*]] = torch.copy.to_tensor %[[ARG1_ERASED]] : !torch.tensor<*,f32> // CHECK: torch.overwrite.tensor.contents %[[ARG0_ERASED]] overwrites %[[MUTABLE_COPY]] : !torch.vtensor<*,f32>, !torch.tensor<*,f32> -func @torch.overwrite.tensor.contents$static_overwrites_dynamic(%static: !torch.vtensor<[2],f32>, %dynamic: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?],f32> { +func.func @torch.overwrite.tensor.contents$static_overwrites_dynamic(%static: !torch.vtensor<[2],f32>, %dynamic: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?],f32> { %static_no_type = torch.tensor_static_info_cast %static : !torch.vtensor<[2],f32> to !torch.vtensor %dynamic_no_type = torch.tensor_static_info_cast %dynamic : !torch.vtensor<[?],f32> to !torch.vtensor %dynamic_copy = torch.copy.to_tensor %dynamic_no_type : !torch.tensor @@ -175,23 +175,23 @@ func @torch.overwrite.tensor.contents$static_overwrites_dynamic(%static: !torch. } // ----- -// CHECK-LABEL: func @bf16_result_type( +// CHECK-LABEL: func.func @bf16_result_type( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<*,bf16>) -> !torch.vtensor<[2],bf16> { // CHECK: %[[SQRT:.*]] = torch.aten.sqrt %[[ARG0]] : !torch.vtensor<*,bf16> -> !torch.vtensor<[2],bf16> // CHECK: return %[[SQRT]] : !torch.vtensor<[2],bf16> -func @bf16_result_type(%arg0: !torch.vtensor<*,bf16>) -> !torch.vtensor<[2],bf16> { +func.func @bf16_result_type(%arg0: !torch.vtensor<*,bf16>) -> !torch.vtensor<[2],bf16> { %1 = torch.aten.sqrt %arg0 : !torch.vtensor<*,bf16> -> !torch.vtensor<[2], bf16> return %1 : !torch.vtensor<[2],bf16> } // ----- -// CHECK-LABEL: func @propagate_scalar_type( +// CHECK-LABEL: func.func @propagate_scalar_type( // CHECK-SAME: %[[INT:.*]]: !torch.int) -> !torch.number { // CHECK: %[[NUM:.*]] = torch.derefine %[[INT]] : !torch.int to !torch.number // CHECK: %[[ABS:.*]] = torch.prim.abs.Scalar %[[INT]] : !torch.int -> !torch.int // CHECK: %[[RET:.*]] = torch.derefine %[[ABS]] : !torch.int to !torch.number // CHECK: return %[[RET]] : !torch.number -func @propagate_scalar_type(%arg0: !torch.int) -> !torch.number { +func.func @propagate_scalar_type(%arg0: !torch.int) -> !torch.number { %num = torch.derefine %arg0 : !torch.int to !torch.number %1 = torch.prim.abs.Scalar %num: !torch.number -> !torch.number return %1 : !torch.number diff --git a/test/Dialect/Torch/reify-shape-calculations.mlir b/test/Dialect/Torch/reify-shape-calculations.mlir index 925361cd3..a5ed49a53 100644 --- a/test/Dialect/Torch/reify-shape-calculations.mlir +++ b/test/Dialect/Torch/reify-shape-calculations.mlir @@ -1,20 +1,20 @@ // RUN: torch-mlir-opt -torch-reify-shape-calculations -split-input-file %s | FileCheck %s // CHECK: module { -// CHECK: func private @__torch_mlir_shape_fn.aten.tanh( +// CHECK: func.func private @__torch_mlir_shape_fn.aten.tanh( -// CHECK-LABEL: func @basic( +// CHECK-LABEL: func.func @basic( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[RESULT:.*]] = torch.shape.calculate { // CHECK: %[[TANH:.*]] = torch.aten.tanh %[[ARG]] : !torch.vtensor -> !torch.vtensor // CHECK: torch.shape.calculate.yield %[[TANH]] : !torch.vtensor // CHECK: } shapes { // CHECK: %[[SHAPE:.*]] = torch.aten.size %[[ARG]] : !torch.vtensor -> !torch.list -// CHECK: %[[RESULT_SHAPE:.*]] = call @__torch_mlir_shape_fn.aten.tanh(%[[SHAPE]]) : (!torch.list) -> !torch.list +// CHECK: %[[RESULT_SHAPE:.*]] = func.call @__torch_mlir_shape_fn.aten.tanh(%[[SHAPE]]) : (!torch.list) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[RESULT_SHAPE]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[RESULT:.*]] : !torch.vtensor -func @basic(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @basic(%arg0: !torch.vtensor) -> !torch.vtensor { %0 = torch.aten.tanh %arg0 : !torch.vtensor -> !torch.vtensor return %0 : !torch.vtensor } @@ -22,9 +22,9 @@ func @basic(%arg0: !torch.vtensor) -> !torch.vtensor { // ----- // CHECK: module { -// CHECK: func private @__torch_mlir_shape_fn.aten.fill.Scalar( +// CHECK: func.func private @__torch_mlir_shape_fn.aten.fill.Scalar( -// CHECK-LABEL: func @valsem_ops( +// CHECK-LABEL: func.func @valsem_ops( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.int) -> !torch.vtensor { // CHECK: %[[RESULT:.*]] = torch.shape.calculate { @@ -32,11 +32,11 @@ func @basic(%arg0: !torch.vtensor) -> !torch.vtensor { // CHECK: torch.shape.calculate.yield %[[VALUE]] : !torch.vtensor // CHECK: } shapes { // CHECK: %[[SHAPE:.*]] = torch.aten.size %[[ARG0]] : !torch.vtensor -> !torch.list -// CHECK: %[[RESULT_SHAPE:.*]] = call @__torch_mlir_shape_fn.aten.fill.Scalar(%[[SHAPE]], %{{.*}}) : (!torch.list, !torch.float) -> !torch.list +// CHECK: %[[RESULT_SHAPE:.*]] = func.call @__torch_mlir_shape_fn.aten.fill.Scalar(%[[SHAPE]], %{{.*}}) : (!torch.list, !torch.float) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[RESULT_SHAPE]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[RESULT:.*]] : !torch.vtensor -func @valsem_ops(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { +func.func @valsem_ops(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { %0 = torch.valsem.aten.fill.Scalar %arg0, %arg1 : !torch.vtensor, !torch.int -> !torch.vtensor return %0 : !torch.vtensor } @@ -44,10 +44,10 @@ func @valsem_ops(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { // ----- // CHECK: module { -// CHECK-LABEL: func private @__torch_mlir_shape_fn.aten.uniform( +// CHECK-LABEL: func.func private @__torch_mlir_shape_fn.aten.uniform( // CHECK-SAME: {{.*}}!torch.any) -// CHECK-LABEL: func @adjust_shape_function_arg$torch.any( +// CHECK-LABEL: func.func @adjust_shape_function_arg$torch.any( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.float) -> !torch.vtensor { // CHECK: %[[NONE:.*]] = torch.constant.none @@ -57,11 +57,11 @@ func @valsem_ops(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { // CHECK: } shapes { // CHECK: %[[ARG0_SHAPE:.*]] = torch.aten.size %[[ARG0]] : !torch.vtensor -> !torch.list // CHECK: %[[ANY:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.any -// CHECK: %[[SHAPE:.*]] = call @__torch_mlir_shape_fn.aten.uniform(%[[ARG0_SHAPE]], %[[ARG1]], %[[ARG1]], %[[ANY]]) : (!torch.list, !torch.float, !torch.float, !torch.any) -> !torch.list +// CHECK: %[[SHAPE:.*]] = func.call @__torch_mlir_shape_fn.aten.uniform(%[[ARG0_SHAPE]], %[[ARG1]], %[[ARG1]], %[[ANY]]) : (!torch.list, !torch.float, !torch.float, !torch.any) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[SHAPE]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[RESULT:.*]] : !torch.vtensor -func @adjust_shape_function_arg$torch.any(%arg0: !torch.vtensor, %arg1: !torch.float) -> !torch.vtensor { +func.func @adjust_shape_function_arg$torch.any(%arg0: !torch.vtensor, %arg1: !torch.float) -> !torch.vtensor { %none = torch.constant.none %0 = torch.valsem.aten.uniform %arg0, %arg1, %arg1, %none : !torch.vtensor, !torch.float, !torch.float, !torch.none -> !torch.vtensor return %0 : !torch.vtensor @@ -74,9 +74,9 @@ func @adjust_shape_function_arg$torch.any(%arg0: !torch.vtensor, %arg1: !torch.f // callees of the shape functions. // CHECK: module { -// CHECK: func private @__torch_mlir_shape_fn.aten.add.Tensor( +// CHECK: func.func private @__torch_mlir_shape_fn.aten.add.Tensor( -// CHECK-LABEL: func @adjust_shape_function_arg$scalar( +// CHECK-LABEL: func.func @adjust_shape_function_arg$scalar( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[INT1:.*]] = torch.constant.int 1 @@ -87,11 +87,11 @@ func @adjust_shape_function_arg$torch.any(%arg0: !torch.vtensor, %arg1: !torch.f // CHECK: %[[ARG0_SHAPE:.*]] = torch.aten.size %[[ARG0]] : !torch.vtensor -> !torch.list // CHECK: %[[ARG1_SHAPE:.*]] = torch.aten.size %[[ARG1]] : !torch.vtensor -> !torch.list // CHECK: %[[SCALAR_CONVERTED:.*]] = torch.aten.Float.Scalar %[[INT1]] : !torch.int -> !torch.float -// CHECK: %[[RESULT_SHAPE:.*]] = call @__torch_mlir_shape_fn.aten.add.Tensor(%[[ARG0_SHAPE]], %[[ARG1_SHAPE]], %[[SCALAR_CONVERTED]]) : (!torch.list, !torch.list, !torch.float) -> !torch.list +// CHECK: %[[RESULT_SHAPE:.*]] = func.call @__torch_mlir_shape_fn.aten.add.Tensor(%[[ARG0_SHAPE]], %[[ARG1_SHAPE]], %[[SCALAR_CONVERTED]]) : (!torch.list, !torch.list, !torch.float) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[RESULT_SHAPE]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[RESULT:.*]] : !torch.vtensor -func @adjust_shape_function_arg$scalar(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { +func.func @adjust_shape_function_arg$scalar(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { %int1 = torch.constant.int 1 %0 = torch.aten.add.Tensor %arg0, %arg1, %int1 : !torch.vtensor, !torch.vtensor, !torch.int -> !torch.vtensor return %0 : !torch.vtensor @@ -100,9 +100,9 @@ func @adjust_shape_function_arg$scalar(%arg0: !torch.vtensor, %arg1: !torch.vten // ----- // CHECK: module { -// CHECK: func private @__torch_mlir_shape_fn.aten.topk( +// CHECK: func.func private @__torch_mlir_shape_fn.aten.topk( -// CHECK-LABEL: func @multiple_results( +// CHECK-LABEL: func.func @multiple_results( // CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> (!torch.tensor, !torch.tensor) { // CHECK: %[[TRUE:.*]] = torch.constant.bool true // CHECK: %[[INT3:.*]] = torch.constant.int 3 @@ -112,13 +112,13 @@ func @adjust_shape_function_arg$scalar(%arg0: !torch.vtensor, %arg1: !torch.vten // CHECK: torch.shape.calculate.yield %[[TOP_VALUES]], %[[TOPK_INDICES]] : !torch.tensor, !torch.tensor // CHECK: } shapes { // CHECK: %[[ARG_SHAPE:.*]] = torch.aten.size %[[ARG]] : !torch.tensor -> !torch.list -// CHECK: %[[TOPK_SHAPE_TUPLE:.*]] = call @__torch_mlir_shape_fn.aten.topk(%[[ARG_SHAPE]], %[[INT3]], %[[INT1]], %[[TRUE]], %[[TRUE]]) : (!torch.list, !torch.int, !torch.int, !torch.bool, !torch.bool) -> !torch.tuple, list> +// CHECK: %[[TOPK_SHAPE_TUPLE:.*]] = func.call @__torch_mlir_shape_fn.aten.topk(%[[ARG_SHAPE]], %[[INT3]], %[[INT1]], %[[TRUE]], %[[TRUE]]) : (!torch.list, !torch.int, !torch.int, !torch.bool, !torch.bool) -> !torch.tuple, list> // CHECK: %[[TOPK_SHAPE:.*]]:2 = torch.prim.TupleUnpack %[[TOPK_SHAPE_TUPLE]] : !torch.tuple, list> -> !torch.list, !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[TOPK_SHAPE]]#0, %[[TOPK_SHAPE]]#1 : !torch.list, !torch.list // CHECK: } : !torch.tensor, !torch.tensor // CHECK: return %[[RESULTS:.*]]#0, %[[RESULTS]]#1 : !torch.tensor, !torch.tensor -func @multiple_results(%arg0: !torch.tensor) -> (!torch.tensor, !torch.tensor) { +func.func @multiple_results(%arg0: !torch.tensor) -> (!torch.tensor, !torch.tensor) { %true = torch.constant.bool true %int3 = torch.constant.int 3 %int1 = torch.constant.int 1 @@ -128,7 +128,7 @@ func @multiple_results(%arg0: !torch.tensor) -> (!torch.tensor, !torch.tensor) { // ----- -// CHECK-LABEL: func @adjust_shape_function_arg$optional( +// CHECK-LABEL: func.func @adjust_shape_function_arg$optional( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[RESULT:.*]] = torch.shape.calculate { @@ -138,11 +138,11 @@ func @multiple_results(%arg0: !torch.tensor) -> (!torch.tensor, !torch.tensor) { // CHECK: %[[SHAPE0:.*]] = torch.aten.size %[[ARG0]] : !torch.vtensor -> !torch.list // CHECK: %[[SHAPE1:.*]] = torch.aten.size %[[ARG1]] : !torch.vtensor -> !torch.list // CHECK: %[[DEREFINED:.*]] = torch.derefine %{{.*}} : !torch.none to !torch.optional> -// CHECK: %[[SHAPE:.*]] = call @__torch_mlir_shape_fn.aten.conv2d(%[[SHAPE0]], %[[SHAPE1]], %[[DEREFINED]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list +// CHECK: %[[SHAPE:.*]] = func.call @__torch_mlir_shape_fn.aten.conv2d(%[[SHAPE0]], %[[SHAPE1]], %[[DEREFINED]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[SHAPE]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[RESULT:.*]] : !torch.vtensor -func @adjust_shape_function_arg$optional(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { +func.func @adjust_shape_function_arg$optional(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { %int3 = torch.constant.int 3 %int4 = torch.constant.int 4 %int2 = torch.constant.int 2 @@ -157,7 +157,7 @@ func @adjust_shape_function_arg$optional(%arg0: !torch.vtensor, %arg1: !torch.vt // ----- -// CHECK-LABEL: func @adjust_shape_function_arg$optional_tensor( +// CHECK-LABEL: func.func @adjust_shape_function_arg$optional_tensor( // CHECK-SAME: %[[ARG:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[FALSE:.*]] = torch.constant.bool false // CHECK: %[[TRUE:.*]] = torch.constant.bool true @@ -184,11 +184,11 @@ func @adjust_shape_function_arg$optional(%arg0: !torch.vtensor, %arg1: !torch.vt // CHECK: %[[DEREFINED_NONE1:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.optional> // CHECK: %[[DEREFINED_NONE2:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.optional> // CHECK: %[[DEREFINED_NONE3:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.optional> -// CHECK: %[[BN_SHAPE:.*]] = call @__torch_mlir_shape_fn.aten.batch_norm(%[[ARG_SIZE]], %[[DEREFINED_OPTIONAL_SIZE:.*]], %[[DEREFINED_NONE1]], %[[DEREFINED_NONE2]], %[[DEREFINED_NONE3]], %[[FALSE]], %[[C1EM1]], %[[C1EM5]], %[[TRUE]]) : (!torch.list, !torch.optional>, !torch.optional>, !torch.optional>, !torch.optional>, !torch.bool, !torch.float, !torch.float, !torch.bool) -> !torch.list +// CHECK: %[[BN_SHAPE:.*]] = func.call @__torch_mlir_shape_fn.aten.batch_norm(%[[ARG_SIZE]], %[[DEREFINED_OPTIONAL_SIZE:.*]], %[[DEREFINED_NONE1]], %[[DEREFINED_NONE2]], %[[DEREFINED_NONE3]], %[[FALSE]], %[[C1EM1]], %[[C1EM5]], %[[TRUE]]) : (!torch.list, !torch.optional>, !torch.optional>, !torch.optional>, !torch.optional>, !torch.bool, !torch.float, !torch.float, !torch.bool) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[BN_SHAPE]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[RESULT:.*]] : !torch.vtensor -func @adjust_shape_function_arg$optional_tensor(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @adjust_shape_function_arg$optional_tensor(%arg0: !torch.vtensor) -> !torch.vtensor { %false = torch.constant.bool false %true = torch.constant.bool true %float1.000000e-05 = torch.constant.float 1.000000e-05 @@ -201,7 +201,7 @@ func @adjust_shape_function_arg$optional_tensor(%arg0: !torch.vtensor) -> !torch // ----- -// CHECK-LABEL: func @adjust_shape_function_arg$list( +// CHECK-LABEL: func.func @adjust_shape_function_arg$list( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.vtensor) -> !torch.vtensor { // CHECK: %[[LIST:.*]] = torch.prim.ListConstruct %[[ARG1]] : (!torch.vtensor) -> !torch.list @@ -221,11 +221,11 @@ func @adjust_shape_function_arg$optional_tensor(%arg0: !torch.vtensor) -> !torch // CHECK: %{{.*}} = torch.aten.append.t %[[ADJUSTED_LIST]], %[[ADJUSTED_ELEMENT]] : !torch.list>>, !torch.optional> -> !torch.list>> // CHECK: torch.prim.Loop.condition %[[CTRUE]], iter() // CHECK: } : (!torch.int, !torch.bool) -> () -// CHECK: %[[RESULT_SHAPE:.*]] = call @__torch_mlir_shape_fn.aten.index.Tensor(%[[ARG0_SHAPE]], %[[ADJUSTED_LIST]]) : (!torch.list, !torch.list>>) -> !torch.list +// CHECK: %[[RESULT_SHAPE:.*]] = func.call @__torch_mlir_shape_fn.aten.index.Tensor(%[[ARG0_SHAPE]], %[[ADJUSTED_LIST]]) : (!torch.list, !torch.list>>) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[RESULT_SHAPE]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[VAL_15:.*]] : !torch.vtensor -func @adjust_shape_function_arg$list(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { +func.func @adjust_shape_function_arg$list(%arg0: !torch.vtensor, %arg1: !torch.vtensor) -> !torch.vtensor { %0 = torch.prim.ListConstruct %arg1 : (!torch.vtensor) -> !torch.list %1 = torch.aten.index.Tensor %arg0, %0 : !torch.vtensor, !torch.list -> !torch.vtensor return %1 : !torch.vtensor diff --git a/test/Dialect/Torch/simplify-shape-calculations.mlir b/test/Dialect/Torch/simplify-shape-calculations.mlir index e18330ca8..3c533e5f4 100644 --- a/test/Dialect/Torch/simplify-shape-calculations.mlir +++ b/test/Dialect/Torch/simplify-shape-calculations.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-opt -torch-simplify-shape-calculations -split-input-file %s | FileCheck %s -// CHECK-LABEL: func @refine_shape_calculate_result$basic( +// CHECK-LABEL: func.func @refine_shape_calculate_result$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.int) -> !torch.vtensor { // CHECK: %[[INT2:.*]] = torch.constant.int 2 @@ -14,7 +14,7 @@ // CHECK: } : !torch.vtensor<[2,?],unk> // CHECK: %[[RESULT_ERASED:.*]] = torch.tensor_static_info_cast %[[RESULT:.*]] : !torch.vtensor<[2,?],unk> to !torch.vtensor // CHECK: return %[[RESULT_ERASED]] : !torch.vtensor -func @refine_shape_calculate_result$basic(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { +func.func @refine_shape_calculate_result$basic(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { %int2 = torch.constant.int 2 %0 = torch.shape.calculate { torch.shape.calculate.yield %arg0 : !torch.vtensor @@ -25,10 +25,10 @@ func @refine_shape_calculate_result$basic(%arg0: !torch.vtensor, %arg1: !torch.i return %0 : !torch.vtensor } -// CHECK-LABEL: func @refine_shape_calculate_result$clobber_one_element( +// CHECK-LABEL: func.func @refine_shape_calculate_result$clobber_one_element( // CHECK: %[[RESULT_ERASED:.*]] = torch.tensor_static_info_cast %{{.*}} : !torch.vtensor<[?,2],unk> to !torch.vtensor // CHECK: return %[[RESULT_ERASED]] : !torch.vtensor -func @refine_shape_calculate_result$clobber_one_element(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.vtensor { +func.func @refine_shape_calculate_result$clobber_one_element(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.vtensor { %int0 = torch.constant.int 0 %int2 = torch.constant.int 2 %0 = torch.shape.calculate { @@ -47,10 +47,10 @@ func @refine_shape_calculate_result$clobber_one_element(%arg0: !torch.vtensor, % return %0 : !torch.vtensor } -// CHECK-LABEL: func @refine_shape_calculate_result$clobber_all_elements( +// CHECK-LABEL: func.func @refine_shape_calculate_result$clobber_all_elements( // CHECK: %[[RESULT_ERASED:.*]] = torch.tensor_static_info_cast %{{.*}} : !torch.vtensor<[?,?],unk> to !torch.vtensor // CHECK: return %[[RESULT_ERASED]] : !torch.vtensor -func @refine_shape_calculate_result$clobber_all_elements(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.vtensor { +func.func @refine_shape_calculate_result$clobber_all_elements(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.bool) -> !torch.vtensor { %int0 = torch.constant.int 0 %int2 = torch.constant.int 2 %0 = torch.shape.calculate { @@ -71,10 +71,10 @@ func @refine_shape_calculate_result$clobber_all_elements(%arg0: !torch.vtensor, } // Make sure that information previously in the IR is not lost. -// CHECK-LABEL: func @refine_shape_calculate_result$meet_with_existing_information( +// CHECK-LABEL: func.func @refine_shape_calculate_result$meet_with_existing_information( // CHECK: %[[RESULT_ERASED:.*]] = torch.tensor_static_info_cast %{{.*}} : !torch.vtensor<[2,3],f32> to !torch.vtensor<[?,3],f32> // CHECK: return %[[RESULT_ERASED]] : !torch.vtensor<[?,3],f32> -func @refine_shape_calculate_result$meet_with_existing_information(%arg0: !torch.vtensor<[?,3],f32>, %arg1: !torch.int) -> !torch.vtensor<[?,3],f32> { +func.func @refine_shape_calculate_result$meet_with_existing_information(%arg0: !torch.vtensor<[?,3],f32>, %arg1: !torch.int) -> !torch.vtensor<[?,3],f32> { %int0 = torch.constant.int 0 %int2 = torch.constant.int 2 %0 = torch.shape.calculate { @@ -87,9 +87,9 @@ func @refine_shape_calculate_result$meet_with_existing_information(%arg0: !torch } // Don't insert static info casts if not needed. -// CHECK-LABEL: func @refine_shape_calculate_result$user_allows_type_refinement( +// CHECK-LABEL: func.func @refine_shape_calculate_result$user_allows_type_refinement( // CHECK-NOT: torch.tensor_static_info_cast -func @refine_shape_calculate_result$user_allows_type_refinement(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @refine_shape_calculate_result$user_allows_type_refinement(%arg0: !torch.vtensor) -> !torch.vtensor { %int2 = torch.constant.int 2 %0 = torch.aten.tanh %arg0 : !torch.vtensor -> !torch.vtensor %1 = torch.shape.calculate { @@ -102,7 +102,7 @@ func @refine_shape_calculate_result$user_allows_type_refinement(%arg0: !torch.vt return %2 : !torch.vtensor } -// CHECK-LABEL: func @fully_unroll_prim_loop$unroll( +// CHECK-LABEL: func.func @fully_unroll_prim_loop$unroll( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.list) -> !torch.vtensor { // CHECK: %[[INT1:.*]] = torch.constant.int 1 @@ -117,7 +117,7 @@ func @refine_shape_calculate_result$user_allows_type_refinement(%arg0: !torch.vt // CHECK: torch.shape.calculate.yield.shapes %[[ARG1]] : !torch.list // CHECK: } : !torch.vtensor // CHECK: return %[[RESULT:.*]] : !torch.vtensor -func @fully_unroll_prim_loop$unroll(%arg0: !torch.vtensor, %arg1: !torch.list) -> !torch.vtensor { +func.func @fully_unroll_prim_loop$unroll(%arg0: !torch.vtensor, %arg1: !torch.list) -> !torch.vtensor { %true = torch.constant.bool true %int0 = torch.constant.int 0 %int3 = torch.constant.int 3 @@ -134,9 +134,9 @@ func @fully_unroll_prim_loop$unroll(%arg0: !torch.vtensor, %arg1: !torch.list, %arg2: !torch.int) -> !torch.vtensor { +func.func @fully_unroll_prim_loop$no_unroll(%arg0: !torch.vtensor, %arg1: !torch.list, %arg2: !torch.int) -> !torch.vtensor { %true = torch.constant.bool true %int3 = torch.constant.int 3 %0 = torch.shape.calculate { @@ -152,13 +152,13 @@ func @fully_unroll_prim_loop$no_unroll(%arg0: !torch.vtensor, %arg1: !torch.list return %0 : !torch.vtensor } -// CHECK-LABEL: func @abstractly_interpret_list_ops$basic( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$basic( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.int, // CHECK-SAME: %[[ARG2:.*]]: !torch.int) -> !torch.vtensor { // CHECK: %[[SHAPE:.*]] = torch.prim.ListConstruct %[[ARG1]], %[[ARG2]] : (!torch.int, !torch.int) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[SHAPE]] : !torch.list -func @abstractly_interpret_list_ops$basic(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$basic(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int) -> !torch.vtensor { %0 = torch.shape.calculate { torch.shape.calculate.yield %arg0 : !torch.vtensor } shapes { @@ -171,10 +171,10 @@ func @abstractly_interpret_list_ops$basic(%arg0: !torch.vtensor, %arg1: !torch.i } // Test the different supported mutation ops. -// CHECK-LABEL: func @abstractly_interpret_list_ops$mutation_ops( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$mutation_ops( // CHECK: %[[SHAPE:.*]] = torch.prim.ListConstruct %int1, %arg1, %arg2, %arg3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[SHAPE]] : !torch.list -func @abstractly_interpret_list_ops$mutation_ops(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$mutation_ops(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int) -> !torch.vtensor { %int0 = torch.constant.int 0 %int1 = torch.constant.int 1 %int2 = torch.constant.int 2 @@ -192,10 +192,10 @@ func @abstractly_interpret_list_ops$mutation_ops(%arg0: !torch.vtensor, %arg1: ! } // Test negative indexes with set_item op. -// CHECK-LABEL: func @abstractly_interpret_list_ops$neg_index_set_item( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$neg_index_set_item( // CHECK: %[[SHAPE:.*]] = torch.prim.ListConstruct %arg1, %arg2 : (!torch.int, !torch.int) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[SHAPE]] : !torch.list -func @abstractly_interpret_list_ops$neg_index_set_item(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$neg_index_set_item(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int, %arg3: !torch.int) -> !torch.vtensor { %int1 = torch.constant.int 1 %int-1 = torch.constant.int -1 %int-2 = torch.constant.int -2 @@ -211,10 +211,10 @@ func @abstractly_interpret_list_ops$neg_index_set_item(%arg0: !torch.vtensor, %a } // Test interspersed mutation and evaluation ops. -// CHECK-LABEL: func @abstractly_interpret_list_ops$mix_mutation_and_evaluation_ops( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$mix_mutation_and_evaluation_ops( // CHECK: %[[SHAPE:.*]] = torch.prim.ListConstruct %int0, %int1, %int2 : (!torch.int, !torch.int, !torch.int) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[SHAPE]] : !torch.list -func @abstractly_interpret_list_ops$mix_mutation_and_evaluation_ops(%arg0: !torch.vtensor) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$mix_mutation_and_evaluation_ops(%arg0: !torch.vtensor) -> !torch.vtensor { %0 = torch.shape.calculate { torch.shape.calculate.yield %arg0 : !torch.vtensor } shapes { @@ -230,10 +230,10 @@ func @abstractly_interpret_list_ops$mix_mutation_and_evaluation_ops(%arg0: !torc return %0 : !torch.vtensor } -// CHECK-LABEL: func @abstractly_interpret_list_ops$use_of_alias$not_yet_handled( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$use_of_alias$not_yet_handled( // CHECK: torch.aten.append.t // CHECK: torch.aten.append.t -func @abstractly_interpret_list_ops$use_of_alias$not_yet_handled(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$use_of_alias$not_yet_handled(%arg0: !torch.vtensor, %arg1: !torch.int, %arg2: !torch.int) -> !torch.vtensor { %0 = torch.shape.calculate { torch.shape.calculate.yield %arg0 : !torch.vtensor } shapes { @@ -247,13 +247,13 @@ func @abstractly_interpret_list_ops$use_of_alias$not_yet_handled(%arg0: !torch.v return %0 : !torch.vtensor } -// CHECK-LABEL: func @abstractly_interpret_list_ops$readonly_op_in_child_region( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$readonly_op_in_child_region( // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor, // CHECK-SAME: %[[VAL_1:.*]]: !torch.int) -> !torch.vtensor { // CHECK: %[[INT3:.*]] = torch.constant.int 3 // CHECK: %[[SHAPE:.*]] = torch.prim.ListConstruct %[[INT3]] : (!torch.int) -> !torch.list // CHECK: torch.shape.calculate.yield.shapes %[[SHAPE]] : !torch.list -func @abstractly_interpret_list_ops$readonly_op_in_child_region(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$readonly_op_in_child_region(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { %true = torch.constant.bool true %int3 = torch.constant.int 3 %int0 = torch.constant.int 0 @@ -276,9 +276,9 @@ func @abstractly_interpret_list_ops$readonly_op_in_child_region(%arg0: !torch.vt } // The mutation in the child region prevents us from abstractly interpreting. -// CHECK-LABEL: func @abstractly_interpret_list_ops$mutation_in_child_region( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$mutation_in_child_region( // CHECK: torch.aten.append.t -func @abstractly_interpret_list_ops$mutation_in_child_region(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$mutation_in_child_region(%arg0: !torch.vtensor, %arg1: !torch.int) -> !torch.vtensor { %true = torch.constant.bool true %int3 = torch.constant.int 3 %int0 = torch.constant.int 0 @@ -300,7 +300,7 @@ func @abstractly_interpret_list_ops$mutation_in_child_region(%arg0: !torch.vtens return %0 : !torch.vtensor } -// CHECK-LABEL: func @abstractly_interpret_list_ops$miscompile$list_identity( +// CHECK-LABEL: func.func @abstractly_interpret_list_ops$miscompile$list_identity( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor, // CHECK-SAME: %[[ARG1:.*]]: !torch.list, // CHECK-SAME: %[[ARG2:.*]]: !torch.bool) -> !torch.vtensor { @@ -329,7 +329,7 @@ func @abstractly_interpret_list_ops$mutation_in_child_region(%arg0: !torch.vtens // CHECK: } : !torch.vtensor<[3,3],unk> // CHECK: %[[VAL_13:.*]] = torch.tensor_static_info_cast %[[VAL_14:.*]] : !torch.vtensor<[3,3],unk> to !torch.vtensor // CHECK: return %[[VAL_13]] : !torch.vtensor -func @abstractly_interpret_list_ops$miscompile$list_identity(%arg0: !torch.vtensor, %arg1: !torch.list, %arg2: !torch.bool) -> !torch.vtensor { +func.func @abstractly_interpret_list_ops$miscompile$list_identity(%arg0: !torch.vtensor, %arg1: !torch.list, %arg2: !torch.bool) -> !torch.vtensor { %true = torch.constant.bool true %int3 = torch.constant.int 3 %int0 = torch.constant.int 0 @@ -373,7 +373,7 @@ func @abstractly_interpret_list_ops$miscompile$list_identity(%arg0: !torch.vtens // This test should usually not be the one to catch an issue. // If it does catch an issue then it indicates a more precise unit test that is // missing. -// CHECK-LABEL: func @basic_integration( +// CHECK-LABEL: func.func @basic_integration( // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?],unk>) -> !torch.vtensor { // CHECK: %[[INT0:.*]] = torch.constant.int 0 // CHECK: %[[INT1:.*]] = torch.constant.int 1 @@ -388,7 +388,7 @@ func @abstractly_interpret_list_ops$miscompile$list_identity(%arg0: !torch.vtens // CHECK: } : !torch.vtensor<[?,?],unk> // CHECK: %[[RESULT_ERASED:.*]] = torch.tensor_static_info_cast %[[RESULT:.*]] : !torch.vtensor<[?,?],unk> to !torch.vtensor // CHECK: return %[[RESULT_ERASED]] : !torch.vtensor -func @basic_integration(%arg0: !torch.vtensor<[?,?],unk>) -> !torch.vtensor { +func.func @basic_integration(%arg0: !torch.vtensor<[?,?],unk>) -> !torch.vtensor { %true = torch.constant.bool true %0 = torch.shape.calculate { %1 = torch.aten.tanh %arg0 : !torch.vtensor<[?,?],unk> -> !torch.vtensor diff --git a/test/Dialect/TorchConversion/finalizing-backend-type-conversion.mlir b/test/Dialect/TorchConversion/finalizing-backend-type-conversion.mlir index 2c75d7945..a16da0932 100644 --- a/test/Dialect/TorchConversion/finalizing-backend-type-conversion.mlir +++ b/test/Dialect/TorchConversion/finalizing-backend-type-conversion.mlir @@ -3,10 +3,10 @@ // This test is largely copied from `finalizing-bufferize` upstream, as it // covers the same scope. -// CHECK-LABEL: func @eliminate_materializations( +// CHECK-LABEL: func.func @eliminate_materializations( // CHECK-SAME: %[[ARG:.*]]: tensor) -> tensor { // CHECK: return %[[ARG]] : tensor -func @eliminate_materializations(%arg0: tensor) -> tensor { +func.func @eliminate_materializations(%arg0: tensor) -> tensor { %0 = torch_c.from_builtin_tensor %arg0 : tensor -> !torch.vtensor<[],f32> %1 = torch_c.to_builtin_tensor %0 : !torch.vtensor<[],f32> -> tensor return %1 : tensor @@ -15,38 +15,38 @@ func @eliminate_materializations(%arg0: tensor) -> tensor { // Do a basic check of other types. Under the hood they all take the same // code paths as for !torch.vtensor, so we just spot-check them here. -// CHECK-LABEL: func @eliminate_materializations$torch.bool( +// CHECK-LABEL: func.func @eliminate_materializations$torch.bool( // CHECK-SAME: %[[ARG:.*]]: i1) -> i1 { // CHECK: return %[[ARG]] : i1 -func @eliminate_materializations$torch.bool(%arg0: i1) -> i1 { +func.func @eliminate_materializations$torch.bool(%arg0: i1) -> i1 { %0 = torch_c.from_i1 %arg0 %1 = torch_c.to_i1 %0 return %1 : i1 } -// CHECK-LABEL: func @eliminate_materializations$torch.int( +// CHECK-LABEL: func.func @eliminate_materializations$torch.int( // CHECK-SAME: %[[ARG:.*]]: i64) -> i64 { // CHECK: return %[[ARG]] : i64 -func @eliminate_materializations$torch.int(%arg0: i64) -> i64 { +func.func @eliminate_materializations$torch.int(%arg0: i64) -> i64 { %0 = torch_c.from_i64 %arg0 %1 = torch_c.to_i64 %0 return %1 : i64 } -// CHECK-LABEL: func @eliminate_materializations$torch.float( +// CHECK-LABEL: func.func @eliminate_materializations$torch.float( // CHECK-SAME: %[[ARG:.*]]: f64) -> f64 { // CHECK: return %[[ARG]] : f64 -func @eliminate_materializations$torch.float(%arg0: f64) -> f64 { +func.func @eliminate_materializations$torch.float(%arg0: f64) -> f64 { %0 = torch_c.from_f64 %arg0 %1 = torch_c.to_f64 %0 return %1 : f64 } -// CHECK-LABEL: func @eliminate_materializations$torch.Generator( +// CHECK-LABEL: func.func @eliminate_materializations$torch.Generator( // CHECK-SAME: %[[VAL_0:.*]]: i64) -> i64 { // CHECK: return %[[VAL_0]] : i64 // CHECK: } -func @eliminate_materializations$torch.Generator(%arg0: i64) -> i64 { +func.func @eliminate_materializations$torch.Generator(%arg0: i64) -> i64 { %0 = torch_c.i64_to_generator %arg0 %1 = torch_c.generator_to_i64 %0 return %1 : i64 @@ -54,7 +54,7 @@ func @eliminate_materializations$torch.Generator(%arg0: i64) -> i64 { // ----- -func @unable_to_convert_lone_buffer_cast() -> tensor { +func.func @unable_to_convert_lone_buffer_cast() -> tensor { // expected-error @+1 {{failed to legalize operation 'test.source'}} %0 = "test.source"() : () -> !torch.vtensor<[],f32> %1 = torch_c.to_builtin_tensor %0 : !torch.vtensor<[],f32> -> tensor @@ -63,7 +63,7 @@ func @unable_to_convert_lone_buffer_cast() -> tensor { // ----- -func @unable_to_convert_lone_tensor_load(%arg0: tensor) { +func.func @unable_to_convert_lone_tensor_load(%arg0: tensor) { %0 = torch_c.from_builtin_tensor %arg0 : tensor -> !torch.vtensor<[],f32> // expected-error @+1 {{failed to legalize operation 'test.sink'}} "test.sink"(%0) : (!torch.vtensor<[],f32>) -> () diff --git a/test/Dialect/TorchConversion/func-backend-type-conversion.mlir b/test/Dialect/TorchConversion/func-backend-type-conversion.mlir index ddf366c7e..0a00d1b91 100644 --- a/test/Dialect/TorchConversion/func-backend-type-conversion.mlir +++ b/test/Dialect/TorchConversion/func-backend-type-conversion.mlir @@ -3,48 +3,48 @@ // This test is largely copied from `func-bufferize` upstream, as it covers // the same scope. -// CHECK-LABEL: func @identity( +// CHECK-LABEL: func.func @identity( // CHECK-SAME: %[[ARG:.*]]: tensor) -> tensor { // CHECK: return %[[ARG]] : tensor -func @identity(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { +func.func @identity(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { return %arg0 : !torch.vtensor<[],f32> } -// CHECK-LABEL: func @block_arguments( +// CHECK-LABEL: func.func @block_arguments( // CHECK-SAME: %[[ARG:.*]]: tensor) -> tensor { // CHECK: cf.br ^bb1(%[[ARG]] : tensor) // CHECK: ^bb1(%[[BBARG:.*]]: tensor): // CHECK: return %[[BBARG]] : tensor -func @block_arguments(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { +func.func @block_arguments(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { cf.br ^bb1(%arg0: !torch.vtensor<[],f32>) ^bb1(%bbarg: !torch.vtensor<[],f32>): return %bbarg : !torch.vtensor<[],f32> } -// CHECK-LABEL: func private @source() -> tensor -// CHECK-LABEL: func @call_source() -> tensor { +// CHECK-LABEL: func.func private @source() -> tensor +// CHECK-LABEL: func.func @call_source() -> tensor { // CHECK: %[[RET:.*]] = call @source() : () -> tensor // CHECK: return %[[RET]] : tensor -func private @source() -> !torch.vtensor<[],f32> -func @call_source() -> !torch.vtensor<[],f32> { +func.func private @source() -> !torch.vtensor<[],f32> +func.func @call_source() -> !torch.vtensor<[],f32> { %0 = call @source() : () -> !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } -// CHECK-LABEL: func @call_sink( +// CHECK-LABEL: func.func @call_sink( // CHECK-SAME: %[[ARG:.*]]: tensor) { // CHECK: call @sink(%[[ARG]]) : (tensor) -> () // CHECK: return -func private @sink(!torch.vtensor<[],f32>) -func @call_sink(%arg0: !torch.vtensor<[],f32>) { +func.func private @sink(!torch.vtensor<[],f32>) +func.func @call_sink(%arg0: !torch.vtensor<[],f32>) { call @sink(%arg0) : (!torch.vtensor<[],f32>) -> () return } -// CHECK-LABEL: func @unconverted_op_in_body() -> tensor { +// CHECK-LABEL: func.func @unconverted_op_in_body() -> tensor { // CHECK: %[[TENSOR:.*]] = "test.source"() : () -> !torch.vtensor<[],f32> // CHECK: %[[BUILTIN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[TENSOR]] : !torch.vtensor<[],f32> -> tensor // CHECK: return %[[BUILTIN_TENSOR]] : tensor -func @unconverted_op_in_body() -> !torch.vtensor<[],f32> { +func.func @unconverted_op_in_body() -> !torch.vtensor<[],f32> { %0 = "test.source"() : () -> !torch.vtensor<[],f32> return %0 : !torch.vtensor<[],f32> } @@ -53,7 +53,7 @@ func @unconverted_op_in_body() -> !torch.vtensor<[],f32> { // Because this pass updates block arguments, it needs to also atomically // update all terminators and issue an error if that is not possible. -func @unable_to_update_terminator(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { +func.func @unable_to_update_terminator(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> { %0 = arith.constant true cf.cond_br %0, ^bb1(%arg0: !torch.vtensor<[],f32>), ^bb2(%arg0: !torch.vtensor<[],f32>) ^bb1(%bbarg0: !torch.vtensor<[],f32>): @@ -72,7 +72,7 @@ func @unable_to_update_terminator(%arg0: !torch.vtensor<[],f32>) -> !torch.vtens // CHECK: while // CHECK: scf.while // CHECK: scf.condition -func @bwhile(%arg0: i64, %arg1: i64) -> i64 { +func.func @bwhile(%arg0: i64, %arg1: i64) -> i64 { %c2_i64 = arith.constant 2 : i64 %0:2 = scf.while (%arg2 = %arg0) : (i64) -> (i64, i64) { %1 = arith.cmpi slt, %arg2, %arg1 : i64 @@ -88,30 +88,30 @@ func @bwhile(%arg0: i64, %arg1: i64) -> i64 { // Do a basic check of other types. Under the hood they all take the same // code paths as for !torch.vtensor, so we just spot-check them here. -// CHECK-LABEL: func @identity$torch.bool( +// CHECK-LABEL: func.func @identity$torch.bool( // CHECK-SAME: %[[ARG:.*]]: i1) -> i1 { // CHECK: return %[[ARG]] : i1 -func @identity$torch.bool(%arg0: !torch.bool) -> !torch.bool { +func.func @identity$torch.bool(%arg0: !torch.bool) -> !torch.bool { return %arg0 : !torch.bool } -// CHECK-LABEL: func @identity$torch.int( +// CHECK-LABEL: func.func @identity$torch.int( // CHECK-SAME: %[[ARG:.*]]: i64) -> i64 { // CHECK: return %[[ARG]] : i64 -func @identity$torch.int(%arg0: !torch.int) -> !torch.int { +func.func @identity$torch.int(%arg0: !torch.int) -> !torch.int { return %arg0 : !torch.int } -// CHECK-LABEL: func @identity$torch.float( +// CHECK-LABEL: func.func @identity$torch.float( // CHECK-SAME: %[[ARG:.*]]: f64) -> f64 { // CHECK: return %[[ARG]] : f64 -func @identity$torch.float(%arg0: !torch.float) -> !torch.float { +func.func @identity$torch.float(%arg0: !torch.float) -> !torch.float { return %arg0 : !torch.float } -// CHECK-LABEL: func @identity$torch.Generator( +// CHECK-LABEL: func.func @identity$torch.Generator( // CHECK-SAME: %[[VAL_0:.*]]: i64) -> i64 { // CHECK: return %[[VAL_0]] : i64 -func @identity$torch.Generator(%arg0: !torch.Generator) -> !torch.Generator { +func.func @identity$torch.Generator(%arg0: !torch.Generator) -> !torch.Generator { return %arg0 : !torch.Generator } diff --git a/test/Dialect/TorchConversion/ops.mlir b/test/Dialect/TorchConversion/ops.mlir index 7a03c2905..e10aede04 100644 --- a/test/Dialect/TorchConversion/ops.mlir +++ b/test/Dialect/TorchConversion/ops.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-opt %s | torch-mlir-opt | FileCheck %s -// CHECK-LABEL: func @builtin_tensor_interop( -func @builtin_tensor_interop(%arg0: tensor<*xf32>, %arg1: tensor<3x?xi8>, %arg2: !torch.vtensor<*,f32>, %arg3: !torch.vtensor<[3,?],si8>) { +// CHECK-LABEL: func.func @builtin_tensor_interop( +func.func @builtin_tensor_interop(%arg0: tensor<*xf32>, %arg1: tensor<3x?xi8>, %arg2: !torch.vtensor<*,f32>, %arg3: !torch.vtensor<[3,?],si8>) { // CHECK: torch_c.from_builtin_tensor %arg0 : tensor<*xf32> -> !torch.vtensor<*,f32> %0 = torch_c.from_builtin_tensor %arg0 : tensor<*xf32> -> !torch.vtensor<*,f32> // CHECK: torch_c.from_builtin_tensor %arg1 : tensor<3x?xi8> -> !torch.vtensor<[3,?],si8> diff --git a/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir b/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir index 3b9a8a244..1c3e466a2 100644 --- a/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir +++ b/test/Dialect/TorchConversion/verify-invariants-before-backend-lowering.mlir @@ -2,7 +2,7 @@ // ----- -func @unknown_rank(%arg0: !torch.vtensor<[],f32>) { +func.func @unknown_rank(%arg0: !torch.vtensor<[],f32>) { // expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}} // expected-note@+1 {{this is likely due to a missing shape transfer function in shape_lib_gen.py}} %0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<*,f32> @@ -11,7 +11,7 @@ func @unknown_rank(%arg0: !torch.vtensor<[],f32>) { // ----- -func @unknown_dtype(%arg0: !torch.vtensor<[],f32>) { +func.func @unknown_dtype(%arg0: !torch.vtensor<[],f32>) { // expected-error@+2 {{unsupported by backend lowering: tensor with unknown rank or dtype}} // expected-note@+1 {{this is likely due to a missing shape transfer function in shape_lib_gen.py}} %0 = torch.aten.mul.Tensor %arg0, %arg0 : !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[],unk> @@ -20,7 +20,7 @@ func @unknown_dtype(%arg0: !torch.vtensor<[],f32>) { // ----- -func @unresolved_operator(%arg0: !torch.vtensor<[],f32>, %arg1: !torch.int) { +func.func @unresolved_operator(%arg0: !torch.vtensor<[],f32>, %arg1: !torch.int) { // expected-error@+2 {{unsupported by backend lowering: `torch.operator` op}} // expected-note@+1 {{this is likely due to a missing op that needs to be generated by torch_ods_gen.py}} torch.operator "aten.mul.Scalar"(%arg0, %arg1) : (!torch.vtensor<[],f32>, !torch.int) -> !torch.vtensor<[],f32> diff --git a/test/Dialect/TorchConversion/verify-linalg-on-tensors-backend-contract.mlir b/test/Dialect/TorchConversion/verify-linalg-on-tensors-backend-contract.mlir index 8dfddb11d..09ad1f3b4 100644 --- a/test/Dialect/TorchConversion/verify-linalg-on-tensors-backend-contract.mlir +++ b/test/Dialect/TorchConversion/verify-linalg-on-tensors-backend-contract.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-opt -torch-verify-linalg-on-tensors-backend-contract -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s -// CHECK: func @mm -func @mm(%arg0: tensor, %arg1: tensor) -> tensor { +// CHECK: func.func @mm +func.func @mm(%arg0: tensor, %arg1: tensor) -> tensor { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %cst = arith.constant 0.000000e+00 : f32 @@ -23,7 +23,7 @@ func @mm(%arg0: tensor, %arg1: tensor) -> tensor { // expected-error@+1 {{Module does not conform to the linalg-on-tensors backend contract.}} module { - func @disallowed() { + func.func @disallowed() { // expected-error@+1 {{failed to legalize operation 'unknown_dialect.unknown_op'}} "unknown_dialect.unknown_op"() : () -> () return @@ -46,7 +46,7 @@ module { // expected-error@+1 {{Module does not conform to the linalg-on-tensors backend contract.}} module { - func @disallowed(%arg0: !torch.tensor) -> !torch.tensor { + func.func @disallowed(%arg0: !torch.tensor) -> !torch.tensor { // expected-error@+1 {{failed to legalize operation 'func.return'}} return %arg0 : !torch.tensor } diff --git a/test/Dialect/TorchConversion/verify-tosa-backend-contract.mlir b/test/Dialect/TorchConversion/verify-tosa-backend-contract.mlir index 56d7394f5..2a55a3231 100644 --- a/test/Dialect/TorchConversion/verify-tosa-backend-contract.mlir +++ b/test/Dialect/TorchConversion/verify-tosa-backend-contract.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-opt -torch-verify-tosa-backend-contract -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s -// CHECK: func @tanh -func @tanh(%arg0: tensor) -> tensor { +// CHECK: func.func @tanh +func.func @tanh(%arg0: tensor) -> tensor { %0 = "tosa.tanh"(%arg0) : (tensor) -> tensor return %0 : tensor } @@ -12,7 +12,7 @@ func @tanh(%arg0: tensor) -> tensor { // expected-error@+1 {{Module does not conform to the TOSA backend contract.}} module { - func @disallowed() { + func.func @disallowed() { // expected-error@+1 {{failed to legalize operation 'unknown_dialect.unknown_op'}} "unknown_dialect.unknown_op"() : () -> () return @@ -35,7 +35,7 @@ module { // expected-error@+1 {{Module does not conform to the TOSA backend contract.}} module { - func @disallowed(%arg0: !torch.tensor) -> !torch.tensor { + func.func @disallowed(%arg0: !torch.tensor) -> !torch.tensor { // expected-error@+1 {{failed to legalize operation 'func.return'}} return %arg0 : !torch.tensor } diff --git a/test/RefBackend/insert-rng-globals.mlir b/test/RefBackend/insert-rng-globals.mlir index c1d1e104a..c44d3397a 100644 --- a/test/RefBackend/insert-rng-globals.mlir +++ b/test/RefBackend/insert-rng-globals.mlir @@ -1,7 +1,7 @@ // RUN: torch-mlir-opt %s -refback-insert-rng-globals -split-input-file | FileCheck %s // CHECK-LABEL: memref.global "private" @global_seed : memref = dense<0> -// CHECK-LABEL: func @f() -> i64 { +// CHECK-LABEL: func.func @f() -> i64 { // CHECK: %[[MEMREF:.*]] = memref.get_global @global_seed : memref // CHECK: %[[SEED:.*]] = memref.load %[[MEMREF]][] : memref // CHECK: %[[MULTIPLIER:.*]] = arith.constant 6364136223846793005 : i64 @@ -13,7 +13,7 @@ // CHECK: memref.store %[[NEXT_SEED]], %[[MEMREF]][] : memref // CHECK: return %[[NEXT_SEED]] : i64 module { - func @f() -> i64 { + func.func @f() -> i64 { %seed = torch_c.get_next_seed : () -> i64 return %seed : i64 } diff --git a/test/RefBackend/munge-calling-conventions.mlir b/test/RefBackend/munge-calling-conventions.mlir index 5a6aab3db..c55e43b3b 100644 --- a/test/RefBackend/munge-calling-conventions.mlir +++ b/test/RefBackend/munge-calling-conventions.mlir @@ -1,43 +1,43 @@ // RUN: torch-mlir-opt %s -refback-munge-calling-conventions -split-input-file | FileCheck %s -// CHECK-LABEL: func @f( +// CHECK-LABEL: func.func @f( // CHECK-SAME: %[[ARG0:.*]]: memref<*xf32>) attributes {llvm.emit_c_interface} { // CHECK: %[[VAL:.*]] = memref.cast %[[ARG0]] : memref<*xf32> to memref // CHECK: %[[RESULT:.*]] = memref.cast %[[VAL]] : memref to memref<*xf32> // CHECK: call @refbackend_consume_func_return_mrf32(%[[RESULT]]) : (memref<*xf32>) -> () // CHECK: return -func @f(%arg0: memref) -> memref { +func.func @f(%arg0: memref) -> memref { return %arg0 : memref } // ----- -// CHECK-LABEL: func @i( +// CHECK-LABEL: func.func @i( // CHECK-SAME: %[[ARG0:.*]]: memref<*xi64>) attributes {llvm.emit_c_interface} { // CHECK: %[[VAL:.*]] = memref.cast %[[ARG0]] : memref<*xi64> to memref // CHECK: %[[RESULT:.*]] = memref.cast %[[VAL]] : memref to memref<*xi64> // CHECK: call @refbackend_consume_func_return_mri64(%[[RESULT]]) : (memref<*xi64>) -> () // CHECK: return -func @i(%arg0: memref) -> memref { +func.func @i(%arg0: memref) -> memref { return %arg0 : memref } // ----- -// CHECK-LABEL: func @elemental_type( +// CHECK-LABEL: func.func @elemental_type( // CHECK-SAME: %[[ARG0:.*]]: memref<*xi64>) attributes {llvm.emit_c_interface} { // CHECK: %[[VAL:.*]] = memref.cast %[[ARG0]] : memref<*xi64> to memref // CHECK: %[[RESULT:.*]] = memref.load %[[VAL]][] : memref // CHECK: call @refbackend_consume_func_return_i64(%[[RESULT]]) : (i64) -> () // CHECK: return -func @elemental_type(%arg0: memref) -> i64 { +func.func @elemental_type(%arg0: memref) -> i64 { %0 = memref.load %arg0[] : memref return %0 : i64 } // ----- -// CHECK-LABEL: func @multiple_return_values( +// CHECK-LABEL: func.func @multiple_return_values( // CHECK-SAME: %[[ARG0:.*]]: memref<*xf32>, %[[ARG1:.*]]: memref<*xf32>, // CHECK-SAME: %[[ARG2:.*]]: memref<*xf32>) attributes {llvm.emit_c_interface} { // CHECK: %[[VAL0:.*]] = memref.cast %[[ARG0]] : memref<*xf32> to memref @@ -50,13 +50,13 @@ func @elemental_type(%arg0: memref) -> i64 { // CHECK-SAME: : (memref<*xf32>, memref<*xf32>, memref<*xf32>) -> () // CHECK: return -func @multiple_return_values(%arg0: memref, %arg1: memref, %arg2: memref) -> (memref, memref, memref) { +func.func @multiple_return_values(%arg0: memref, %arg1: memref, %arg2: memref) -> (memref, memref, memref) { return %arg0 ,%arg1, %arg2 : memref, memref, memref } // ----- -// CHECK-LABEL: func @two_return_values( +// CHECK-LABEL: func.func @two_return_values( // CHECK-SAME: %[[ARG0:.*]]: memref<*xf32>, %[[ARG1:.*]]: memref<*xi64>) // CHECK-SAME: attributes {llvm.emit_c_interface} { // CHECK: %[[VAL0:.*]] = memref.cast %[[ARG0]] : memref<*xf32> to memref @@ -67,6 +67,6 @@ func @multiple_return_values(%arg0: memref, %arg1: memref, %arg2: // CHECK-SAME: : (memref<*xf32>, memref<*xi64>) -> () // CHECK: return -func @two_return_values(%arg0: memref, %arg1: memref) -> (memref, memref) { +func.func @two_return_values(%arg0: memref, %arg1: memref) -> (memref, memref) { return %arg0 ,%arg1 : memref, memref } diff --git a/test/python/importer/jit_ir/ivalue_import/annotations/arg-tensor-type-bound.py b/test/python/importer/jit_ir/ivalue_import/annotations/arg-tensor-type-bound.py index c638f475c..e8bcd4864 100644 --- a/test/python/importer/jit_ir/ivalue_import/annotations/arg-tensor-type-bound.py +++ b/test/python/importer/jit_ir/ivalue_import/annotations/arg-tensor-type-bound.py @@ -21,7 +21,7 @@ recursivescriptmodule = torch.jit.script(test_module) annotator = ClassAnnotator() class_type = recursivescriptmodule._c._type() -# CHECK: func private @__torch__.TestModule.forward( +# CHECK: func.func private @__torch__.TestModule.forward( # CHECK-SAME: %arg0: !torch.nn.Module<"__torch__.TestModule">, # CHECK-SAME: %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[?,1024],si8>}, # CHECK-SAME: %arg2: !torch.tensor {torch.type_bound = !torch.vtensor<[],f32>} diff --git a/test/python/importer/jit_ir/ivalue_import/functions-that-call-methods.py b/test/python/importer/jit_ir/ivalue_import/functions-that-call-methods.py index db58c51a4..ade43aca0 100644 --- a/test/python/importer/jit_ir/ivalue_import/functions-that-call-methods.py +++ b/test/python/importer/jit_ir/ivalue_import/functions-that-call-methods.py @@ -13,18 +13,18 @@ mb = ModuleBuilder() # Interesting test case, where a function calls a method. -# CHECK-LABEL: func private @__torch__.TestModule.forward +# CHECK-LABEL: func.func private @__torch__.TestModule.forward # CHECK-SAME: (%[[ARG0:.*]]: !torch.nn.Module<"__torch__.TestModule">, %[[ARG1:.*]]: !torch.tensor) -> !torch.none { # CHECK: %[[F:.*]] = constant @__torch__.calls_method : (!torch.nn.Module<"__torch__.TestModule">, !torch.tensor) -> !torch.none # CHECK: %[[RET:.*]] = call_indirect %[[F]](%[[ARG0]], %[[ARG1]]) : (!torch.nn.Module<"__torch__.TestModule">, !torch.tensor) -> !torch.none # CHECK: return %[[RET]] : !torch.none # CHECK: } -# CHECK-LABEL: func private @__torch__.TestModule.method +# CHECK-LABEL: func.func private @__torch__.TestModule.method # CHECK-SAME: (%[[ARG0:.*]]: !torch.nn.Module<"__torch__.TestModule">, %[[ARG1:.*]]: !torch.tensor) -> !torch.none { # CHECK: %[[RET:.*]] = torch.constant.none # CHECK: return %[[RET]] : !torch.none # CHECK: } -# CHECK-LABEL: func private @__torch__.calls_method +# CHECK-LABEL: func.func private @__torch__.calls_method # CHECK-SAME: (%[[ARG0:.*]]: !torch.nn.Module<"__torch__.TestModule">, %[[ARG1:.*]]: !torch.tensor) -> !torch.none { # CHECK: %[[RET:.*]] = torch.prim.CallMethod %[[ARG0]]["method"] (%[[ARG1]]) : !torch.nn.Module<"__torch__.TestModule">, (!torch.tensor) -> !torch.none # CHECK: return %[[RET]] : !torch.none diff --git a/test/python/importer/jit_ir/ivalue_import/functions.py b/test/python/importer/jit_ir/ivalue_import/functions.py index 13536f58f..484260617 100644 --- a/test/python/importer/jit_ir/ivalue_import/functions.py +++ b/test/python/importer/jit_ir/ivalue_import/functions.py @@ -11,13 +11,13 @@ from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder mb = ModuleBuilder() -# CHECK-LABEL: func private @__torch__.TestModule.forward +# CHECK-LABEL: func.func private @__torch__.TestModule.forward # CHECK-SAME: (%[[ARG0:.*]]: !torch.nn.Module<"__torch__.TestModule">, %[[ARG1:.*]]: !torch.tensor) -> !torch.tensor { # CHECK: %[[VAL_2:.*]] = constant @__torch__.identity : (!torch.tensor) -> !torch.tensor # CHECK: %[[VAL_3:.*]] = call_indirect %[[VAL_2]](%[[ARG1]]) : (!torch.tensor) -> !torch.tensor # CHECK: return %[[VAL_3]] : !torch.tensor # CHECK: } -# CHECK-LABEL: func private @__torch__.identity +# CHECK-LABEL: func.func private @__torch__.identity # CHECK-SAME: (%[[ARG:.*]]: !torch.tensor) -> !torch.tensor { # CHECK: return %[[ARG]] : !torch.tensor # CHECK: } diff --git a/test/python/importer/jit_ir/ivalue_import/methods-derefine.py b/test/python/importer/jit_ir/ivalue_import/methods-derefine.py index fd156a41c..6a941330d 100644 --- a/test/python/importer/jit_ir/ivalue_import/methods-derefine.py +++ b/test/python/importer/jit_ir/ivalue_import/methods-derefine.py @@ -17,7 +17,7 @@ class TestModule(torch.nn.Module): def __init__(self): super().__init__() - # CHECK-LABEL: func private @__torch__.TestModule.forward( + # CHECK-LABEL: func.func private @__torch__.TestModule.forward( # CHECK-SAME: %[[SELF:.*]]: !torch.nn.Module<"__torch__.TestModule">) -> !torch.optional { # CHECK: %[[NONE:.*]] = torch.constant.none # CHECK: %[[DEREFINED:.*]] = torch.derefine %[[NONE]] : !torch.none to !torch.optional diff --git a/test/python/importer/jit_ir/ivalue_import/methods.py b/test/python/importer/jit_ir/ivalue_import/methods.py index 66d806683..fc246c458 100644 --- a/test/python/importer/jit_ir/ivalue_import/methods.py +++ b/test/python/importer/jit_ir/ivalue_import/methods.py @@ -21,7 +21,7 @@ mb = ModuleBuilder() # Given how systematic this is, we don't treat the symbol names as opaque (i.e. # we don't need to capture their names when FileCheck testing). -# CHECK-LABEL: func private @__torch__.TestModule.forward +# CHECK-LABEL: func.func private @__torch__.TestModule.forward # CHECK-SAME: (%[[SELF:.*]]: !torch.nn.Module<"__torch__.TestModule">, %[[X:.*]]: !torch.tensor) -> !torch.tensor { # CHECK: return %[[X]] : !torch.tensor # CHECK: } diff --git a/test/python/importer/jit_ir/ivalue_import/prim.py b/test/python/importer/jit_ir/ivalue_import/prim.py index 097cece0b..55fed3299 100644 --- a/test/python/importer/jit_ir/ivalue_import/prim.py +++ b/test/python/importer/jit_ir/ivalue_import/prim.py @@ -17,7 +17,7 @@ class TestModule(torch.nn.Module): self.t1 = torch.ones(1) self.t2 = torch.ones(1) - # CHECK-LABEL: func private @__torch__.TestModule.forward( + # CHECK-LABEL: func.func private @__torch__.TestModule.forward( # CHECK-SAME: %[[SELF:.*]]: !torch.nn.Module<"{{.*}}">) -> !torch.none { def forward(self): # CHECK: %[[T2:.*]] = torch.prim.GetAttr %[[SELF]]["t2"] @@ -25,7 +25,7 @@ class TestModule(torch.nn.Module): self.t1 = self.t2 # CHECK: torch.prim.CallMethod %[[SELF]]["callee"] (%{{.*}}, %{{.*}}) self.callee(self.t1, self.t2) - # CHECK-LABEL: func private @__torch__.TestModule.callee( + # CHECK-LABEL: func.func private @__torch__.TestModule.callee( # CHECK-SAME: %[[SELF:.*]]: !torch.nn.Module<"{{.*}}">, # CHECK-SAME: %[[X:.*]]: !torch.tensor, # CHECK-SAME: %[[Y:.*]]: !torch.tensor diff --git a/test/python/importer/jit_ir/ivalue_import/submodules-select.py b/test/python/importer/jit_ir/ivalue_import/submodules-select.py index 099c16364..b0834691e 100644 --- a/test/python/importer/jit_ir/ivalue_import/submodules-select.py +++ b/test/python/importer/jit_ir/ivalue_import/submodules-select.py @@ -24,7 +24,7 @@ class TestModule(torch.nn.Module): self.s1 = Submodule(1) self.s2 = Submodule(2) - # CHECK-LABEL: func private @{{.*}}TestModule.forward + # CHECK-LABEL: func.func private @{{.*}}TestModule.forward def forward(self, b: bool): # Modules with the same class can be selected between. # CHECK: %[[MOD:.*]] = torch.prim.If diff --git a/test/python/importer/jit_ir/node_import/classes.py b/test/python/importer/jit_ir/node_import/classes.py index 65ac8fb6e..511aac690 100644 --- a/test/python/importer/jit_ir/node_import/classes.py +++ b/test/python/importer/jit_ir/node_import/classes.py @@ -18,7 +18,7 @@ class BasicClass: def __init__(self, x: int): self.x = x -# CHECK-LABEL: func @__torch__.prim_CreateObject( +# CHECK-LABEL: func.func @__torch__.prim_CreateObject( # CHECK-SAME: %[[ARG0:.*]]: !torch.int) -> !torch.nn.Module<"__torch__.BasicClass"> { # CHECK: %[[OBJECT:.*]] = torch.prim.CreateObject !torch.nn.Module<"__torch__.BasicClass"> # CHECK: %[[NONE:.*]] = torch.prim.CallMethod %[[OBJECT]]["__init__"] (%[[ARG0]]) : !torch.nn.Module<"__torch__.BasicClass">, (!torch.int) -> !torch.none diff --git a/test/python/importer/jit_ir/node_import/debug-info.py b/test/python/importer/jit_ir/node_import/debug-info.py index d4493827e..b6543ed61 100644 --- a/test/python/importer/jit_ir/node_import/debug-info.py +++ b/test/python/importer/jit_ir/node_import/debug-info.py @@ -9,7 +9,7 @@ from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.add3 +# CHECK-LABEL: func.func @__torch__.add3 # Note that line-level debug information for parts unannotated in the Torch # graph are ascribed to the first op that carries source information. Presently # this includes naked constants, return and the function itself. This heuristic diff --git a/test/python/importer/jit_ir/node_import/dict.py b/test/python/importer/jit_ir/node_import/dict.py index b2bbe38cd..ed4371bb0 100644 --- a/test/python/importer/jit_ir/node_import/dict.py +++ b/test/python/importer/jit_ir/node_import/dict.py @@ -12,7 +12,7 @@ from typing import Tuple, Optional, List, NamedTuple, Dict mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.dict_literal_empty() -> !torch.dict { +# CHECK-LABEL: func.func @__torch__.dict_literal_empty() -> !torch.dict { # CHECK: %[[DICT:.*]] = torch.prim.DictConstruct keys() values() -> !torch.dict # CHECK: return %[[DICT]] : !torch.dict @mb.import_function @@ -21,7 +21,7 @@ def dict_literal_empty() -> Dict[str, torch.Tensor]: return {} -# CHECK-LABEL: func @__torch__.dict_literal( +# CHECK-LABEL: func.func @__torch__.dict_literal( # CHECK-SAME: %[[K0:.*]]: !torch.str, %[[V0:.*]]: !torch.tensor, # CHECK-SAME: %[[K1:.*]]: !torch.str, %[[V1:.*]]: !torch.tensor) # CHECK-SAME: -> !torch.dict> { diff --git a/test/python/importer/jit_ir/node_import/function-block-arg-adjustment.py b/test/python/importer/jit_ir/node_import/function-block-arg-adjustment.py index f70ad3db5..e245ec870 100644 --- a/test/python/importer/jit_ir/node_import/function-block-arg-adjustment.py +++ b/test/python/importer/jit_ir/node_import/function-block-arg-adjustment.py @@ -10,7 +10,7 @@ from utils import create_script_function mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.refined_block_arg( +# CHECK-LABEL: func.func @__torch__.refined_block_arg( # CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.tensor { # CHECK: %[[REFINED:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.tensor to !torch.tensor<[1,384],f32> # CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[REFINED]] : !torch.tensor<[1,384],f32> to !torch.tensor diff --git a/test/python/importer/jit_ir/node_import/function-derefine.py b/test/python/importer/jit_ir/node_import/function-derefine.py index 19de8ceb0..94eed3cef 100644 --- a/test/python/importer/jit_ir/node_import/function-derefine.py +++ b/test/python/importer/jit_ir/node_import/function-derefine.py @@ -11,7 +11,7 @@ import typing mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.optional_return( +# CHECK-LABEL: func.func @__torch__.optional_return( # CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.optional { # CHECK: %[[RET:.*]] = torch.derefine %[[ARG]] : !torch.int to !torch.optional # CHECK: return %[[RET]] : !torch.optional @@ -20,14 +20,14 @@ mb = ModuleBuilder() def optional_return(i: int) -> typing.Optional[int]: return i -# CHECK-LABEL: func @__torch__.optional_arg( +# CHECK-LABEL: func.func @__torch__.optional_arg( # CHECK-SAME: %[[ARG:.*]]: !torch.optional) -> !torch.none { @mb.import_function @torch.jit.script def optional_arg(i: typing.Optional[int]) -> None: return -# CHECK-LABEL: func @__torch__.calls_optional_arg( +# CHECK-LABEL: func.func @__torch__.calls_optional_arg( # CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.none { # CHECK: %[[CALLEE:.*]] = constant @__torch__.optional_arg : (!torch.optional) -> !torch.none # CHECK: %[[DEREFINED:.*]] = torch.derefine %[[ARG]] : !torch.int to !torch.optional diff --git a/test/python/importer/jit_ir/node_import/if.py b/test/python/importer/jit_ir/node_import/if.py index 7f135e115..fd8a7267e 100644 --- a/test/python/importer/jit_ir/node_import/if.py +++ b/test/python/importer/jit_ir/node_import/if.py @@ -32,7 +32,7 @@ def prim_If(b: bool, i: int): else: return i * i -# CHECK-LABEL: func @__torch__.prim_If_derefine( +# CHECK-LABEL: func.func @__torch__.prim_If_derefine( # CHECK-SAME: %[[B:.*]]: !torch.bool, # CHECK-SAME: %[[I:.*]]: !torch.int) -> !torch.optional { # CHECK: %[[NONE:.*]] = torch.constant.none diff --git a/test/python/importer/jit_ir/node_import/list.py b/test/python/importer/jit_ir/node_import/list.py index b2b4a909e..9a09914e3 100644 --- a/test/python/importer/jit_ir/node_import/list.py +++ b/test/python/importer/jit_ir/node_import/list.py @@ -9,7 +9,7 @@ from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.f( +# CHECK-LABEL: func.func @__torch__.f( # CHECK-SAME: %[[T0:.*]]: !torch.tensor, # CHECK-SAME: %[[T1:.*]]: !torch.tensor) -> !torch.list { # CHECK: %[[RET:.*]] = torch.prim.ListConstruct %[[T0]], %[[T1]] : (!torch.tensor, !torch.tensor) -> !torch.list diff --git a/test/python/importer/jit_ir/node_import/loop.py b/test/python/importer/jit_ir/node_import/loop.py index 6ae05240b..e21f4c8c0 100644 --- a/test/python/importer/jit_ir/node_import/loop.py +++ b/test/python/importer/jit_ir/node_import/loop.py @@ -11,7 +11,7 @@ import typing mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.prim_Loop_forlike( +# CHECK-LABEL: func.func @__torch__.prim_Loop_forlike( # CHECK-SAME: %[[MAX_ITERATIONS:.*]]: !torch.int) -> !torch.float { # CHECK: %[[BOOL_TRUE:.*]] = torch.constant.bool true # CHECK: %[[F_INIT:.*]] = torch.constant.float 0.000000e+00 @@ -29,7 +29,7 @@ def prim_Loop_forlike(n: int): f += i return f -# CHECK-LABEL: func @__torch__.prim_Loop_whilelike( +# CHECK-LABEL: func.func @__torch__.prim_Loop_whilelike( # CHECK-SAME: %[[VAL_0:.*]]: !torch.int) -> !torch.float { # CHECK: %[[F_INIT:.*]] = torch.constant.float 3.200000e+00 # CHECK: %[[MAX_ITERATIONS:.*]] = torch.constant.int 9223372036854775807 @@ -49,7 +49,7 @@ def prim_Loop_whilelike(n: int): f = f * f return f -# CHECK-LABEL: func @__torch__.prim_Loop_derefine( +# CHECK-LABEL: func.func @__torch__.prim_Loop_derefine( # CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.optional { # CHECK: %[[TRUE:.*]] = torch.constant.bool true # CHECK: %[[NONE:.*]] = torch.constant.none diff --git a/test/python/importer/jit_ir/node_import/prim.py b/test/python/importer/jit_ir/node_import/prim.py index 885d35f00..21ec33c92 100644 --- a/test/python/importer/jit_ir/node_import/prim.py +++ b/test/python/importer/jit_ir/node_import/prim.py @@ -15,7 +15,7 @@ import typing mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.prim_NumToTensor( +# CHECK-LABEL: func.func @__torch__.prim_NumToTensor( # CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.tensor { # CHECK: %[[RET:.*]] = torch.prim.NumToTensor.Scalar %[[ARG]] : !torch.int -> !torch.tensor # CHECK: return %[[RET]] : !torch.tensor @@ -25,7 +25,7 @@ mb = ModuleBuilder() def prim_NumToTensor(i: int): return _to_tensor(i) -# CHECK-LABEL: func @__torch__.prim_Print( +# CHECK-LABEL: func.func @__torch__.prim_Print( # CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.none { # CHECK: %[[STR:.*]] = torch.constant.str "x" # CHECK: torch.prim.Print(%[[STR]], %[[ARG]]) : !torch.str, !torch.tensor @@ -34,7 +34,7 @@ def prim_NumToTensor(i: int): def prim_Print(x): print("x", x) -# CHECK-LABEL: func @__torch__.prim_RaiseException() -> !torch.none { +# CHECK-LABEL: func.func @__torch__.prim_RaiseException() -> !torch.none { # CHECK: %[[ERRORSTR:.*]] = torch.constant.str "Error" # CHECK: %[[NONE:.*]] = torch.prim.Uninitialized : !torch.none # CHECK: torch.prim.RaiseException %[[ERRORSTR]] @@ -44,7 +44,7 @@ def prim_Print(x): def prim_RaiseException(): raise Exception("Error") -# CHECK-LABEL: func @__torch__.prim_unchecked_cast( +# CHECK-LABEL: func.func @__torch__.prim_unchecked_cast( # CHECK-SAME: %[[ARG:.*]]: !torch.optional) -> !torch.int { # CHECK: %[[NONE:.*]] = torch.constant.none # CHECK: %[[C3:.*]] = torch.constant.int 3 @@ -63,7 +63,7 @@ def prim_unchecked_cast(i: typing.Optional[int]): return 3 return i -# CHECK-LABEL: func @__torch__.prim_TupleUnpack( +# CHECK-LABEL: func.func @__torch__.prim_TupleUnpack( # CHECK-SAME: %[[ARG:.*]]: !torch.tuple) -> !torch.int { # CHECK: %[[RET:.*]]:2 = torch.prim.TupleUnpack %[[ARG]] : !torch.tuple -> !torch.int, !torch.int # CHECK: return %[[RET]]#0 : !torch.int @@ -73,7 +73,7 @@ def prim_TupleUnpack(tup: typing.Tuple[int, int]): val, _ = tup return val -# CHECK-LABEL: func @__torch__.prim_TupleIndex( +# CHECK-LABEL: func.func @__torch__.prim_TupleIndex( # CHECK-SAME: %[[ARG:.*]]: !torch.tuple) -> !torch.tensor { # CHECK: %[[RET:.*]] = torch.prim.TupleIndex %[[ARG]], %[[IDX:.*]] : !torch.tuple, !torch.int -> !torch.tensor # CHECK: return %[[RET]] : !torch.tensor @@ -82,7 +82,7 @@ def prim_TupleUnpack(tup: typing.Tuple[int, int]): def prim_TupleIndex(tup: typing.Tuple[torch.Tensor, torch.Tensor]): return tup[0] -# CHECK-LABEL: func @__torch__.prim_ListUnpack( +# CHECK-LABEL: func.func @__torch__.prim_ListUnpack( # CHECK-SAME: %[[ARG:.*]]: !torch.list) -> !torch.int { # CHECK: %[[RET:.*]]:3 = torch.prim.ListUnpack %[[ARG]] : !torch.list -> !torch.int, !torch.int # CHECK: return %[[RET]]#1 : !torch.int @@ -92,7 +92,7 @@ def prim_ListUnpack(l: typing.List[int]): _, val, _ = l return val -# CHECK-LABEL: func @__torch__.prim_dtype( +# CHECK-LABEL: func.func @__torch__.prim_dtype( # CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.int { # CHECK: %[[RET:.*]] = torch.prim.dtype %[[ARG]] : !torch.tensor -> !torch.int # CHECK: return %[[RET]] : !torch.int @@ -101,7 +101,7 @@ def prim_ListUnpack(l: typing.List[int]): def prim_dtype(x): return x.dtype -# CHECK-LABEL: func @__torch__.prim_layout( +# CHECK-LABEL: func.func @__torch__.prim_layout( # CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.int { # CHECK: %[[RET:.*]] = torch.prim.layout %[[ARG]] : !torch.tensor -> !torch.int # CHECK: return %[[RET]] : !torch.int @@ -110,7 +110,7 @@ def prim_dtype(x): def prim_layout(x): return x.layout -# CHECK-LABEL: func @__torch__.prim_device( +# CHECK-LABEL: func.func @__torch__.prim_device( # CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.Device { # CHECK: %[[RET:.*]] = torch.prim.device %[[ARG]] : !torch.tensor -> !torch.Device # CHECK: return %[[RET]] : !torch.Device @@ -119,7 +119,7 @@ def prim_layout(x): def prim_device(x): return x.device -# CHECK-LABEL: func @__torch__.prim_min( +# CHECK-LABEL: func.func @__torch__.prim_min( # CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.tuple { # CHECK: %[[SINGLETON:.*]] = torch.prim.ListConstruct %[[ARG]] : (!torch.int) -> !torch.list # CHECK: %[[MIN1:.*]] = torch.prim.min.self_int %[[SINGLETON]] : !torch.list -> !torch.int @@ -133,7 +133,7 @@ def prim_device(x): def prim_min(x: int): return min(x), min(x,x), min(x, x, x) -# CHECK-LABEL: func @__torch__.prim_max( +# CHECK-LABEL: func.func @__torch__.prim_max( # CHECK-SAME: %[[ARG:.*]]: !torch.int) -> !torch.tuple { # CHECK: %[[SINGLETON:.*]] = torch.prim.ListConstruct %[[ARG]] : (!torch.int) -> !torch.list # CHECK: %[[MAX1:.*]] = torch.prim.max.self_int %[[SINGLETON]] : !torch.list -> !torch.int @@ -147,7 +147,7 @@ def prim_min(x: int): def prim_max(x: int): return max(x), max(x,x), max(x, x, x) -# CHECK-LABEL: func @__torch__.prim_Constant_list() -> !torch.list { +# CHECK-LABEL: func.func @__torch__.prim_Constant_list() -> !torch.list { # CHECK: %[[A:.*]] = torch.constant.int 1 # CHECK: %[[B:.*]] = torch.constant.int 2 # CHECK: %[[C:.*]] = torch.constant.int 3 diff --git a/test/python/importer/jit_ir/node_import/tuple.py b/test/python/importer/jit_ir/node_import/tuple.py index 2bd66d610..8e14b677f 100644 --- a/test/python/importer/jit_ir/node_import/tuple.py +++ b/test/python/importer/jit_ir/node_import/tuple.py @@ -14,7 +14,7 @@ mb = ModuleBuilder() NT = NamedTuple('NT', [('f1', Optional[torch.Tensor]), ('f2', Optional[torch.Tensor])]) -# CHECK-LABEL: func @__torch__.tuple( +# CHECK-LABEL: func.func @__torch__.tuple( # CHECK-SAME: %[[T0:.*]]: !torch.tensor, # CHECK-SAME: %[[T1:.*]]: !torch.tensor) -> # CHECK-SAME: !torch.tuple { @@ -27,7 +27,7 @@ def tuple(t0, t1): return t0, t1 -# CHECK-LABEL: func @__torch__.tuple_optional( +# CHECK-LABEL: func.func @__torch__.tuple_optional( # CHECK-SAME: %[[T0:.*]]: !torch.tensor, # CHECK-SAME: %[[T1:.*]]: !torch.tensor) -> # CHECK-SAME: !torch.tuple, optional> { @@ -44,7 +44,7 @@ def tuple_optional( return t0, t1 -# CHECK-LABEL: func @__torch__.namedtuple_optional( +# CHECK-LABEL: func.func @__torch__.namedtuple_optional( # CHECK-SAME: %[[T0:.*]]: !torch.tensor, # CHECK-SAME: %[[T1:.*]]: !torch.tensor) -> # CHECK-SAME: !torch.tuple, optional> { @@ -59,7 +59,7 @@ def namedtuple_optional( return NT(t0, t1) -# CHECK-LABEL: func @__torch__.tuple_construct_arg_needs_refinement( +# CHECK-LABEL: func.func @__torch__.tuple_construct_arg_needs_refinement( # CHECK-SAME: %[[T0:.*]]: !torch.tensor, # CHECK-SAME: %[[T1:.*]]: !torch.tensor) -> !torch.tuple { # CHECK: %[[T0_REFINED:.*]] = torch.tensor_static_info_cast %[[T1]] : !torch.tensor to !torch.tensor<[4],f32> diff --git a/test/python/importer/jit_ir/node_import/union.py b/test/python/importer/jit_ir/node_import/union.py index f87ba7665..691a8e413 100644 --- a/test/python/importer/jit_ir/node_import/union.py +++ b/test/python/importer/jit_ir/node_import/union.py @@ -11,7 +11,7 @@ from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder mb = ModuleBuilder() -# CHECK-LABEL: func @__torch__.f( +# CHECK-LABEL: func.func @__torch__.f( # CHECK-SAME: %{{.*}}: !torch.union) -> !torch.none { @mb.import_function