mirror of https://github.com/llvm/torch-mlir
Bump llvm to f9031f00f2c9 (#3672)
As title --------- Co-authored-by: Muhammad Abubakar <jane.doe@getcruise.com>pull/3675/head
parent
5bc59ce1fa
commit
98e08023bb
|
@ -1 +1 @@
|
|||
Subproject commit 585523750e2bbe374d1cb3bf4ff9d53de29b9593
|
||||
Subproject commit f9031f00f2c90bc0af274b45ec3e169b5250a688
|
|
@ -124,8 +124,8 @@ func.func @torch.prim.loop$while(%arg0: !torch.int) -> !torch.float {
|
|||
// CHECK-NEXT: %[[VAL_1:.*]] = torch_c.to_f64 %[[TORCH_VAL_1]]
|
||||
// CHECK-NEXT: scf.yield %[[BLOCK_CONDITION]], %[[VAL_0]], %[[VAL_1]] : i1, f64, f64
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: %[[TORCH_LOOP_0:.*]] = torch_c.from_f64 %[[LOOP]]#0
|
||||
// CHECK-NEXT: %[[TORCH_LOOP_1:.*]] = torch_c.from_f64 %[[LOOP]]#1
|
||||
// CHECK-DAG: %[[TORCH_LOOP_0:.*]] = torch_c.from_f64 %[[LOOP]]#0
|
||||
// CHECK-DAG: %[[TORCH_LOOP_1:.*]] = torch_c.from_f64 %[[LOOP]]#1
|
||||
// CHECK-NEXT: return %[[TORCH_LOOP_0]], %[[TORCH_LOOP_1]] : !torch.float, !torch.float
|
||||
func.func @torch.prim.loop$while_with_multiple_values() -> (!torch.float, !torch.float) {
|
||||
%float3.200000e00 = torch.constant.float 3.200000e+00
|
||||
|
@ -198,8 +198,8 @@ func.func @torch.prim.Loop$for(%arg0: !torch.int) -> !torch.float {
|
|||
// CHECK-NEXT: %[[VAL_1:.*]] = torch_c.to_f64 %[[TORCH_VAL_1]]
|
||||
// CHECK-NEXT: scf.yield %[[VAL_0]], %[[VAL_1]] : f64, f64
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: %[[RETURN_0:.*]] = torch_c.from_f64 %[[LOOP]]#0
|
||||
// CHECK-NEXT: %[[RETURN_1:.*]] = torch_c.from_f64 %[[LOOP]]#1
|
||||
// CHECK-DAG: %[[RETURN_0:.*]] = torch_c.from_f64 %[[LOOP]]#0
|
||||
// CHECK-DAG: %[[RETURN_1:.*]] = torch_c.from_f64 %[[LOOP]]#1
|
||||
// CHECK-NEXT: return %[[RETURN_0]], %[[RETURN_1]] : !torch.float, !torch.float
|
||||
// CHECK-NEXT: }
|
||||
func.func @torch.prim.Loop$for_with_multiple_results(%arg0: !torch.int) -> (!torch.float, !torch.float) {
|
||||
|
|
|
@ -40,10 +40,8 @@ func.func @torch.prim.NumToTensor.Scalar$basic() -> !torch.vtensor<[], si64> {
|
|||
|
||||
// CHECK-LABEL: func.func @torch.aten.contiguous(
|
||||
// CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[4,64],f32>) -> !torch.vtensor<[4,64],f32> {
|
||||
// CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[4,64],f32> -> tensor<4x64xf32>
|
||||
// CHECK: %int0 = torch.constant.int 0
|
||||
// CHECK: %[[VAL_2:.*]] = torch_c.from_builtin_tensor %[[VAL_1]] : tensor<4x64xf32> -> !torch.vtensor<[4,64],f32>
|
||||
// CHECK: return %[[VAL_2]] : !torch.vtensor<[4,64],f32>
|
||||
// CHECK: return %[[VAL_0]] : !torch.vtensor<[4,64],f32>
|
||||
func.func @torch.aten.contiguous(%arg0: !torch.vtensor<[4,64],f32>) -> !torch.vtensor<[4,64],f32> {
|
||||
%int0 = torch.constant.int 0
|
||||
%0 = torch.aten.contiguous %arg0, %int0 : !torch.vtensor<[4,64],f32>, !torch.int -> !torch.vtensor<[4,64],f32>
|
||||
|
|
|
@ -103,8 +103,8 @@ func.func @torch.aten.max_pool2d$padding(%arg0: !torch.vtensor<[?,?,?,?],f32>) -
|
|||
// CHECK: %[[T21:.*]] = stablehlo.select %[[T18]], %[[T19]], %[[T20]] : tensor<i1>, tensor<i64>
|
||||
// CHECK: stablehlo.return %[[T17]], %[[T21]] : tensor<f32>, tensor<i64>
|
||||
// CHECK: }) : (tensor<?x?x?xf32>, tensor<?x?x?xi64>, tensor<f32>, tensor<i64>) -> (tensor<?x?x?xf32>, tensor<?x?x?xi64>)
|
||||
// CHECK: %[[T14:.*]] = torch_c.from_builtin_tensor %[[T13]]#0 : tensor<?x?x?xf32> -> !torch.vtensor<[?,?,?],f32>
|
||||
// CHECK: %[[T15:.*]] = torch_c.from_builtin_tensor %[[T13]]#1 : tensor<?x?x?xi64> -> !torch.vtensor<[?,?,?],si64>
|
||||
// CHECK-DAG: %[[T14:.*]] = torch_c.from_builtin_tensor %[[T13]]#0 : tensor<?x?x?xf32> -> !torch.vtensor<[?,?,?],f32>
|
||||
// CHECK-DAG: %[[T15:.*]] = torch_c.from_builtin_tensor %[[T13]]#1 : tensor<?x?x?xi64> -> !torch.vtensor<[?,?,?],si64>
|
||||
// CHECK: return %[[T14]], %[[T15]] : !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,?],si64>
|
||||
func.func @torch.aten.max_pool2d_with_indices(%arg0: !torch.vtensor<[?,?,?],f32>) -> (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?,?],si64>) {
|
||||
%int3 = torch.constant.int 3
|
||||
|
|
|
@ -801,10 +801,8 @@ func.func @torch.aten.unsqueeze$negative_dim(%arg0: !torch.vtensor<[4,3],si32> )
|
|||
|
||||
// CHECK-LABEL: func.func @torch.aten.contiguous$basic(
|
||||
// CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> {
|
||||
// CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor<?x?xf32>
|
||||
// CHECK: %[[VAL_2:.*]] = torch.constant.int 0
|
||||
// CHECK: %[[VAL_3:.*]] = torch_c.from_builtin_tensor %[[VAL_1]] : tensor<?x?xf32> -> !torch.vtensor<[?,?],f32>
|
||||
// CHECK: return %[[VAL_3]] : !torch.vtensor<[?,?],f32>
|
||||
// CHECK: return %[[VAL_0]] : !torch.vtensor<[?,?],f32>
|
||||
// CHECK: }
|
||||
func.func @torch.aten.contiguous$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch.vtensor<[?,?],f32> {
|
||||
%int0 = torch.constant.int 0
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
// CHECK: %[[OUT_CURRENT_ELEMENT:.*]] = arith.addi %[[OUT_PREV_ELEMENT]], %[[IN_ELEMENT]] : i32
|
||||
// CHECK: tm_tensor.yield %[[OUT_CURRENT_ELEMENT]] : i32
|
||||
// CHECK: }
|
||||
// CHECK: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32>
|
||||
// CHECK: %[[ACC_TENSOR_NEW:.*]] = bufferization.to_tensor %[[ACC_MEMREF_NEW]] : memref<i32>
|
||||
// CHECK-DAG: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32>
|
||||
// CHECK-DAG: %[[ACC_TENSOR_NEW:.*]] = bufferization.to_tensor %[[ACC_MEMREF_NEW]] : memref<i32>
|
||||
// CHECK: return %[[OUT_TENSOR_NEW]], %[[ACC_TENSOR_NEW]] : tensor<128xi32>, tensor<i32>
|
||||
func.func @scan_1d_inclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tensor<i32>) -> (tensor<128xi32>, tensor<i32>) {
|
||||
%ret_out, %ret_acc = tm_tensor.scan dimension(0) inclusive(true)
|
||||
|
@ -41,8 +41,8 @@ func.func @scan_1d_inclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc:
|
|||
// CHECK: %[[OUT_CURRENT_ELEMENT:.*]] = arith.addi %[[OUT_PREV_ELEMENT]], %[[IN_ELEMENT]] : i32
|
||||
// CHECK: tm_tensor.yield %[[OUT_CURRENT_ELEMENT]] : i32
|
||||
// CHECK: }
|
||||
// CHECK: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32>
|
||||
// CHECK: %[[ACC_TENSOR_NEW:.*]] = bufferization.to_tensor %[[ACC_MEMREF_NEW]] : memref<i32>
|
||||
// CHECK-DAG: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32>
|
||||
// CHECK-DAG: %[[ACC_TENSOR_NEW:.*]] = bufferization.to_tensor %[[ACC_MEMREF_NEW]] : memref<i32>
|
||||
// CHECK: return %[[OUT_TENSOR_NEW]], %[[ACC_TENSOR_NEW]] : tensor<128xi32>, tensor<i32>
|
||||
func.func @scan_1d_exclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc: tensor<i32>) -> (tensor<128xi32>, tensor<i32>) {
|
||||
%ret_out, %ret_acc = tm_tensor.scan dimension(0) inclusive(false)
|
||||
|
|
Loading…
Reference in New Issue