// RUN: npcomp-opt -lower-shaped-results-to-memref <%s -split-input-file | FileCheck %s --dump-input=fail // CHECK-LABEL: func @tcp_broadcast_to func @tcp_broadcast_to(%arg0: tensor, %arg1: tensor) -> tensor { // Check for two nested loops, but don't look at more detail for now. // TODO: This pass should not create loops. Instead it should create a // buffer version of tcp.broadcast_to // CHECK: scf.for // CHECK: scf.for // CHECK-NOT: tcp.shaped_results %0 = tcp.shaped_results %arg1 { %0 = tcp.broadcast_to %arg0, %arg1 : (tensor, tensor) -> tensor tcp.yield %0 : tensor } : tensor -> tensor return %0 : tensor } // ----- // CHECK-LABEL: func @tcp_add( // CHECK-SAME: %arg0: tensor, // CHECK-SAME: %arg1: tensor) -> tensor { // CHECK: %[[LHSSHAPE:.*]] = shape.shape_of %arg0 : tensor -> tensor // CHECK: %[[LHS:.*]] = tcp.tensor_to_memref %arg0 : tensor -> memref // CHECK: %[[RHS:.*]] = tcp.tensor_to_memref %arg1 : tensor -> memref // CHECK: %[[RESULT:.*]] = tcp.alloc_memref %[[LHSSHAPE]] : memref // CHECK: linalg.generic {{.*}} ins(%[[LHS]], %[[RHS]] {{.*}}) outs(%[[RESULT]] {{.*}}) { // CHECK: ^bb0(%[[VAL_6:.*]]: f32, %[[VAL_7:.*]]: f32, %[[VAL_8:.*]]: f32): // CHECK: %[[VAL_9:.*]] = addf %[[VAL_6]], %[[VAL_7]] : f32 // CHECK: linalg.yield %[[VAL_9]] : f32 // CHECK: } // CHECK: %[[RET:.*]] = tcp.memref_to_tensor %[[RESULT]] : memref -> tensor // CHECK: return %[[RET]] : tensor // CHECK: } func @tcp_add(%arg0: tensor, %arg1: tensor) -> tensor { %0 = shape.shape_of %arg0 : tensor -> tensor %1 = tcp.shaped_results %0 { %2 = tcp.add %arg0, %arg1 : (tensor, tensor) -> tensor tcp.yield %2 : tensor } : tensor -> tensor return %1 : tensor } // ----- // Check just the linalg body. The code is otherwise shared with tcp.add. // CHECK-LABEL: func @tcp_max // CHECK: ^bb0(%[[LHS:.*]]: f32, %[[RHS:.*]]: f32, %[[DST:.*]]: f32): // CHECK: %[[GREATER:.*]] = cmpf "ogt", %[[LHS]], %[[RHS]] : f32 // CHECK: %[[MAX:.*]] = select %[[GREATER]], %[[LHS]], %[[RHS]] : f32 // CHECK: linalg.yield %[[MAX]] : f32 func @tcp_max(%arg0: tensor, %arg1: tensor) -> tensor { %0 = shape.shape_of %arg0 : tensor -> tensor %1 = tcp.shaped_results %0 { %2 = tcp.max %arg0, %arg1 : (tensor, tensor) -> tensor tcp.yield %2 : tensor } : tensor -> tensor return %1 : tensor } // ----- // CHECK-LABEL: func @tcp_matmul( // CHECK-SAME: %arg0: tensor, // CHECK-SAME: %arg1: tensor, // CHECK-SAME: %[[SHAPE:.*]]: tensor) -> tensor { // CHECK: %[[LHS:.*]] = tcp.tensor_to_memref %arg0 : tensor -> memref // CHECK: %[[RHS:.*]] = tcp.tensor_to_memref %arg1 : tensor -> memref // CHECK: %[[RESULT:.*]] = tcp.alloc_memref %[[SHAPE]] : memref // CHECK: %[[C0:.*]] = constant 0.000000e+00 : f32 // CHECK: linalg.fill(%2, %[[C0]]) : memref, f32 // CHECK: linalg.matmul ins(%[[LHS]], %[[RHS]] : memref, memref) outs(%[[RESULT]] : memref) // CHECK: %[[RET:.*]] = tcp.memref_to_tensor %[[RESULT]] : memref -> tensor // CHECK: return %[[RET]] : tensor // CHECK: } func @tcp_matmul(%arg0: tensor, %arg1: tensor, %shape: tensor) -> tensor { %0 = tcp.shaped_results %shape { %matmul = tcp.matmul %arg0, %arg1 : (tensor, tensor) -> tensor tcp.yield %matmul : tensor } : tensor -> tensor return %0 : tensor }