Bump llvm-project to a37cf17834d39411ed1d669098b428f8374c5b45

Changes:
- Change to operand ordering of `linalg.fill`.
pull/239/head
Sean Silva 2021-06-23 10:03:29 -07:00
parent 90c6c64fd6
commit 145d4ae23c
6 changed files with 22 additions and 22 deletions

@ -1 +1 @@
Subproject commit 116841c623747972d0ae80239d3ea7b8409b868b
Subproject commit a37cf17834d39411ed1d669098b428f8374c5b45

View File

@ -212,7 +212,7 @@ public:
Value c0 =
rewriter.create<ConstantOp>(loc, FloatAttr::get(elementType, 0.0));
Value zeroFill =
rewriter.create<linalg::FillOp>(loc, initTensor, c0).getResult(0);
rewriter.create<linalg::FillOp>(loc, c0, initTensor).getResult(0);
Value matmul = rewriter
.create<linalg::MatmulOp>(loc, zeroFill.getType(),
ValueRange{lhs, rhs}, zeroFill)

View File

@ -176,7 +176,7 @@ public:
if (failed(resultsOrFailure))
return failure();
auto results = *resultsOrFailure;
rewriter.create<linalg::FillOp>(op.getLoc(), results[0], op.splatVal());
rewriter.create<linalg::FillOp>(op.getLoc(), op.splatVal(), results[0]);
rewriter.replaceOp(op, results);
return success();
}
@ -210,7 +210,7 @@ public:
sizes.push_back(size);
strides.push_back(stride);
}
rewriter.create<linalg::FillOp>(op.getLoc(), results[0], op.fillVal());
rewriter.create<linalg::FillOp>(op.getLoc(), op.fillVal(), results[0]);
auto unpadded = rewriter.create<memref::SubViewOp>(
op.getLoc(), results[0], ValueRange(offsets), ValueRange(sizes),
ValueRange(strides));

View File

@ -1,21 +1,21 @@
// RUN: npcomp-opt -npcomp-verify-backend-contract -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s
// CHECK: func @mm
func @mm(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> attributes {iree.module.export} {
%c0 = constant 0 : index
%c1 = constant 1 : index
%cst = constant 0.000000e+00 : f32
%0 = memref.dim %arg0, %c0 : tensor<?x?xf32>
%1 = memref.dim %arg0, %c1 : tensor<?x?xf32>
%2 = memref.dim %arg1, %c0 : tensor<?x?xf32>
%3 = memref.dim %arg1, %c1 : tensor<?x?xf32>
%4 = cmpi eq, %1, %2 : index
assert %4, "mismatching contracting dimension for aten.mm"
%5 = linalg.init_tensor [%0, %3] : tensor<?x?xf32>
%6 = linalg.fill(%5, %cst) : tensor<?x?xf32>, f32 -> tensor<?x?xf32>
%7 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %7 : tensor<?x?xf32>
}
func @mm(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> attributes {iree.module.export} {
%c0 = constant 0 : index
%c1 = constant 1 : index
%cst = constant 0.000000e+00 : f32
%0 = memref.dim %arg0, %c0 : tensor<?x?xf32>
%1 = memref.dim %arg0, %c1 : tensor<?x?xf32>
%2 = memref.dim %arg1, %c0 : tensor<?x?xf32>
%3 = memref.dim %arg1, %c1 : tensor<?x?xf32>
%4 = cmpi eq, %1, %2 : index
assert %4, "mismatching contracting dimension for aten.mm"
%5 = linalg.init_tensor [%0, %3] : tensor<?x?xf32>
%6 = linalg.fill(%cst, %5) : f32, tensor<?x?xf32> -> tensor<?x?xf32>
%7 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %7 : tensor<?x?xf32>
}
// -----

View File

@ -17,7 +17,7 @@
// CHECK: assert %[[EQ]], "mismatching contracting dimension for torch.aten.mm"
// CHECK: %[[INIT_TENSOR:.*]] = linalg.init_tensor [%[[LHS_DIM_0]], %[[RHS_DIM_1]]] : tensor<?x?xf32>
// CHECK: %[[CF0:.*]] = constant 0.000000e+00 : f32
// CHECK: %[[ZEROFILL:.*]] = linalg.fill(%[[INIT_TENSOR]], %[[CF0]]) : tensor<?x?xf32>, f32 -> tensor<?x?xf32>
// CHECK: %[[ZEROFILL:.*]] = linalg.fill(%[[CF0]], %[[INIT_TENSOR]]) : f32, tensor<?x?xf32> -> tensor<?x?xf32>
// CHECK: %[[MATMUL:.*]] = linalg.matmul ins(%[[LHS]], %[[RHS]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ZEROFILL]] : tensor<?x?xf32>) -> tensor<?x?xf32>
// CHECK: %[[CASTED:.*]] = tensor.cast %[[MATMUL]] : tensor<?x?xf32> to tensor<?x2xf32>
// CHECK: %[[RESULT_VTENSOR:.*]] = torch.from_builtin_tensor %[[CASTED]] : tensor<?x2xf32> -> !torch.vtensor<[?,2],f32>

View File

@ -18,7 +18,7 @@ func @tcp_broadcast_to(%arg0: tensor<?xf32>, %arg1: tensor<?xindex>) -> tensor<?
// CHECK-SAME: %[[SPLAT_VAL:.*]]: f32,
// CHECK-SAME: %[[SHAPE:.*]]: tensor<?xindex>) -> tensor<?x?xf32> {
// CHECK: %[[RESULT:.*]] = refback.alloc_memref %[[SHAPE]] : memref<?x?xf32>
// CHECK: linalg.fill(%[[RESULT]], %[[SPLAT_VAL]]) : memref<?x?xf32>, f32
// CHECK: linalg.fill(%[[SPLAT_VAL]], %[[RESULT]]) : f32, memref<?x?xf32>
// CHECK: %[[RESULT_TENSOR:.*]] = memref.tensor_load %[[RESULT]] : memref<?x?xf32>
// CHECK: return %[[RESULT_TENSOR]] : tensor<?x?xf32>
func @tcp_splatted(%arg0: f32, %arg1: tensor<?xindex>) -> tensor<?x?xf32> {
@ -48,7 +48,7 @@ func @tcp_splatted(%arg0: f32, %arg1: tensor<?xindex>) -> tensor<?x?xf32> {
// CHECK: %[[LOWER_EXTENT_D1_1:.*]] = tensor.extract %[[LOWER_EXPANSION]][%[[C0_1]]] : tensor<?xindex>
// CHECK: %[[C0_2:.*]] = constant 0 : index
// CHECK: %[[D1_1:.*]] = memref.dim %[[TENSOR]], %[[C0_2]] : tensor<?xf32>
// CHECK: linalg.fill(%[[D1_OUT_MREF]], %[[FILL_VAL]]) : memref<?xf32>, f32
// CHECK: linalg.fill(%[[FILL_VAL]], %[[D1_OUT_MREF]]) : f32, memref<?xf32>
// CHECK: %[[SUBVIEW:.*]] = memref.subview %[[D1_OUT_MREF]][%[[LOWER_EXTENT_D1_1]]] [%[[D1_1]]] [%[[C1]]] : memref<?xf32> to memref<?xf32, #map>
// CHECK: linalg.copy(%0, %[[SUBVIEW]]) : memref<?xf32>, memref<?xf32, #map>
// CHECK: %[[RESULT_TENSOR:.*]] = memref.tensor_load %[[D1_OUT_MREF]] : memref<?xf32>