mirror of https://github.com/llvm/torch-mlir
Tidy up test/E2E
- Make rank1.mlir be the new "basic.mlir", as it is really the simplest case. - Move basic.mlir to mixed-ranks.mlir - Delete starting-from-linalg.mlir, it wasn't really useful anymore.pull/1/head
parent
eaeb4011e6
commit
889fe0d6c2
|
@ -1,19 +1,10 @@
|
|||
// RUN: npcomp-opt <%s -pass-pipeline=e2e-lowering-pipeline | FileCheck %s --dump-input=fail
|
||||
|
||||
// This is the simplest case, which is easy to stare at for debugging
|
||||
// purposes.
|
||||
|
||||
// CHECK-LABEL: func @rank1
|
||||
func @rank1(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
|
||||
%0 = "tcf.add"(%arg0, %arg1) : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
|
||||
return %0 : tensor<?xf32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @rank2
|
||||
func @rank2(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
|
||||
%0 = "tcf.add"(%arg0, %arg1) : (tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32>
|
||||
return %0 : tensor<?x?xf32>
|
||||
}
|
||||
|
||||
// CHxCK-LABEL: func @rank1and2
|
||||
func @rank1and2(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
|
||||
%0 = "tcf.add"(%arg0, %arg1) : (tensor<?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32>
|
||||
return %0 : tensor<?x?xf32>
|
||||
}
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
// RUN: npcomp-opt <%s -pass-pipeline=e2e-lowering-pipeline | FileCheck %s --dump-input=fail
|
||||
|
||||
// CHECK-LABEL: func @rank1
|
||||
func @rank1(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
|
||||
%0 = "tcf.add"(%arg0, %arg1) : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
|
||||
return %0 : tensor<?xf32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @rank2
|
||||
func @rank2(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
|
||||
%0 = "tcf.add"(%arg0, %arg1) : (tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32>
|
||||
return %0 : tensor<?x?xf32>
|
||||
}
|
||||
|
||||
// CHxCK-LABEL: func @rank1and2
|
||||
func @rank1and2(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
|
||||
%0 = "tcf.add"(%arg0, %arg1) : (tensor<?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32>
|
||||
return %0 : tensor<?x?xf32>
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
// RUN: npcomp-opt <%s -pass-pipeline=e2e-lowering-pipeline | FileCheck %s --dump-input=fail
|
||||
|
||||
// This is the simplest case, which is easy to stare at for debugging
|
||||
// purposes.
|
||||
|
||||
// CHECK-LABEL: func @rank1
|
||||
func @rank1(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
|
||||
%0 = "tcf.add"(%arg0, %arg1) : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
|
||||
return %0 : tensor<?xf32>
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
// RUN: npcomp-opt -lower-to-hybrid-tensor-memref-pipeline <%s | FileCheck %s --dump-input=fail
|
||||
|
||||
#map0 = affine_map<(d0) -> (d0)>
|
||||
func @f(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
|
||||
%0 = "shape.shape_of"(%arg0) : (tensor<?xf32>) -> !shape.shape
|
||||
%1 = "shape.shape_of"(%arg1) : (tensor<?xf32>) -> !shape.shape
|
||||
%2 = "shape.broadcast"(%0, %1) : (!shape.shape, !shape.shape) -> !shape.shape
|
||||
%3 = "shape.abort_if_error"(%2) : (!shape.shape) -> none
|
||||
%4 = "tcp.island"(%3) ( {
|
||||
%5 = "tcp.broadcast_to"(%arg0, %2) : (tensor<?xf32>, !shape.shape) -> tensor<?xf32>
|
||||
%6 = "tcp.broadcast_to"(%arg1, %2) : (tensor<?xf32>, !shape.shape) -> tensor<?xf32>
|
||||
// CHECK: tcp.alloc_memref
|
||||
%7 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel"]} %5, %6 {
|
||||
^bb0(%arg2: f32, %arg3: f32): // no predecessors
|
||||
%8 = addf %arg2, %arg3 : f32
|
||||
linalg.yield %8 : f32
|
||||
}: tensor<?xf32>, tensor<?xf32> -> tensor<?xf32>
|
||||
"tcp.yield"(%7) : (tensor<?xf32>) -> ()
|
||||
}) : (none) -> tensor<?xf32>
|
||||
return %4 : tensor<?xf32>
|
||||
}
|
||||
|
Loading…
Reference in New Issue