// RUN: npcomp-opt <%s -convert-tcf-to-linalg | FileCheck %s --dump-input=fail // CHECK-LABEL: func @tcf_matmul( // CHECK-SAME: %[[LHS:.*]]: tensor, // CHECK-SAME: %[[RHS:.*]]: tensor) -> tensor { // CHECK: %[[C0F32:.*]] = constant 0.000000e+00 : f32 // CHECK: %[[C0:.*]] = constant 0 : index // CHECK: %[[C1:.*]] = constant 1 : index // CHECK: %[[LHSK:.*]] = dim %[[LHS]], %[[C1]] : tensor // CHECK: %[[RHSK:.*]] = dim %[[RHS]], %[[C0]] : tensor // CHECK: %[[KEQUAL:.*]] = cmpi "eq", %[[LHSK]], %[[RHSK]] : index // CHECK: %[[WINESS:.*]] = shape.cstr_require %[[KEQUAL]], "mismatching contracting dimension for matmul" // CHECK: %[[RET:.*]] = shape.assuming %[[WINESS]] -> (tensor) { // CHECK: %[[LHSROWS:.*]] = dim %[[LHS]], %[[C0]] : tensor // CHECK: %[[RHSCOLS:.*]] = dim %[[RHS]], %[[C1]] : tensor // CHECK: %[[SHAPE:.*]] = tensor_from_elements %[[LHSROWS]], %[[RHSCOLS]] : tensor<2xindex> // CHECK: %[[INIT_TENSOR:.*]] = tcp.splatted %[[C0F32]], %[[SHAPE]] : (f32, tensor<2xindex>) -> tensor // CHECK: %[[MATMUL:.*]] = linalg.matmul ins(%[[LHS]], %[[RHS]] : tensor, tensor) init(%[[INIT_TENSOR]] : tensor) -> tensor // CHECK: shape.assuming_yield %[[MATMUL]] : tensor // CHECK: } // CHECK: return %[[RET:.*]] : tensor func @tcf_matmul(%arg0: tensor, %arg1: tensor) -> tensor { %0 = tcf.matmul %arg0, %arg1 : (tensor, tensor) -> tensor return %0 : tensor }