Bump llvm-project to da289a174fc6617c7be37be2947480510fd4f02a

- Build adjustments for `.cpp.inc` dialect files.
- Renaming of `memref.dim` to `tensor.dim` for tensor case.

Minor changes:
- Renaming of `mlir::linalg::ReassociationIndices` to
  `mlir::ReassociationIndices`.
- Adjust command line option parsing in npcomp-run-mlir.
pull/247/head
Sean Silva 2021-07-07 11:55:31 -07:00
parent ef118eb1e1
commit 83b5b5456d
22 changed files with 86 additions and 70 deletions

@ -1 +1 @@
Subproject commit a37cf17834d39411ed1d669098b428f8374c5b45
Subproject commit da289a174fc6617c7be37be2947480510fd4f02a

View File

@ -5,6 +5,7 @@ set(LLVM_TARGET_DEFINITIONS ${dialect}.td)
mlir_tablegen(${dialect}.h.inc -gen-op-decls)
mlir_tablegen(${dialect}.cpp.inc -gen-op-defs)
mlir_tablegen(${dialect}Dialect.h.inc -gen-dialect-decls -dialect=${dialect_namespace})
mlir_tablegen(${dialect}Dialect.cpp.inc -gen-dialect-defs -dialect=${dialect_namespace})
mlir_tablegen(${dialect}Enums.h.inc -gen-enum-decls)
mlir_tablegen(${dialect}Enums.cpp.inc -gen-enum-defs)
add_public_tablegen_target(MLIR${dialect}IncGen)

View File

@ -2,6 +2,7 @@ set(LLVM_TARGET_DEFINITIONS TorchOps.td)
mlir_tablegen(TorchOps.h.inc -gen-op-decls)
mlir_tablegen(TorchOps.cpp.inc -gen-op-defs)
mlir_tablegen(TorchDialect.h.inc -gen-dialect-decls -dialect=torch)
mlir_tablegen(TorchDialect.cpp.inc -gen-dialect-defs -dialect=torch)
add_public_tablegen_target(MLIRTorchOpsIncGen)
add_dependencies(mlir-headers MLIRTorchOpsIncGen)

View File

@ -14,7 +14,7 @@ add_npcomp_library(NPCOMPCommonBackend
LINK_LIBS PUBLIC
MLIRIR
MLIRLinalg
MLIRMemRef
MLIRTensor
MLIRStandard
MLIRMath
)

View File

@ -9,7 +9,7 @@
#include "PassDetail.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Transforms/DialectConversion.h"
#include "npcomp/Backend/Common/Passes.h"
@ -60,7 +60,7 @@ class VerifyBackendContractPass
target.addDynamicallyLegalDialect<linalg::LinalgDialect>(opHasLegalTypes);
target.addDynamicallyLegalDialect<tensor::TensorDialect>(opHasLegalTypes);
// DimOp is used to query tensor sizes.
target.addDynamicallyLegalOp<memref::DimOp>(opHasLegalTypes);
target.addDynamicallyLegalOp<tensor::DimOp>(opHasLegalTypes);
// AssertOp is used to terminate the program for error guards.
target.addLegalOp<AssertOp>();

View File

@ -15,6 +15,6 @@ add_npcomp_conversion_library(NPCOMPTCFToLinalg
MLIRPass
MLIRTransforms
MLIRShape
MLIRMemRef
MLIRTensor
NPCOMPTCFDialect
)

View File

@ -14,8 +14,7 @@
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Traits.h"
// TODO: Remove when memref.dim is split into tensor.dim for the tensor case.
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "npcomp/Dialect/TCF/IR/TCFOps.h"
@ -29,8 +28,8 @@ static SmallVector<Value, 6> bypassResultShapes(Operation *op,
OpBuilder &builder) {
if (auto matmul = dyn_cast<tcf::MatmulOp>(op)) {
auto lhsRows = builder.create<memref::DimOp>(op->getLoc(), matmul.lhs(), 0);
auto rhsCols = builder.create<memref::DimOp>(op->getLoc(), matmul.rhs(), 1);
auto lhsRows = builder.create<tensor::DimOp>(op->getLoc(), matmul.lhs(), 0);
auto rhsCols = builder.create<tensor::DimOp>(op->getLoc(), matmul.rhs(), 1);
auto shape = builder.create<tensor::FromElementsOp>(
op->getLoc(), ValueRange({lhsRows, rhsCols}));
return {shape};
@ -52,17 +51,17 @@ static SmallVector<Value, 6> bypassResultShapes(Operation *op,
auto paddingHeight = padding;
auto paddingWidth = padding;
auto batch =
builder.create<memref::DimOp>(op->getLoc(), conv2dNCHW.in(), 0);
builder.create<tensor::DimOp>(op->getLoc(), conv2dNCHW.in(), 0);
auto height =
builder.create<memref::DimOp>(op->getLoc(), conv2dNCHW.in(), 2);
builder.create<tensor::DimOp>(op->getLoc(), conv2dNCHW.in(), 2);
auto width =
builder.create<memref::DimOp>(op->getLoc(), conv2dNCHW.in(), 3);
builder.create<tensor::DimOp>(op->getLoc(), conv2dNCHW.in(), 3);
auto filterOutChannels =
builder.create<memref::DimOp>(op->getLoc(), conv2dNCHW.filter(), 0);
builder.create<tensor::DimOp>(op->getLoc(), conv2dNCHW.filter(), 0);
auto filterHeight =
builder.create<memref::DimOp>(op->getLoc(), conv2dNCHW.filter(), 2);
builder.create<tensor::DimOp>(op->getLoc(), conv2dNCHW.filter(), 2);
auto filterWidth =
builder.create<memref::DimOp>(op->getLoc(), conv2dNCHW.filter(), 3);
builder.create<tensor::DimOp>(op->getLoc(), conv2dNCHW.filter(), 3);
// Output height
auto twicePaddingHeight = builder.create<MulIOp>(op->getLoc(), paddingHeight, cI2);
auto heightPlusTwicePadding = builder.create<SubIOp>(op->getLoc(), height, twicePaddingHeight);
@ -99,8 +98,8 @@ public:
LogicalResult matchAndRewrite(tcf::MatmulOp op,
PatternRewriter &rewriter) const override {
// Create the constraints, and the assuming region.
Value lhsK = rewriter.create<memref::DimOp>(op.getLoc(), op.lhs(), 1);
Value rhsK = rewriter.create<memref::DimOp>(op.getLoc(), op.rhs(), 0);
Value lhsK = rewriter.create<tensor::DimOp>(op.getLoc(), op.lhs(), 1);
Value rhsK = rewriter.create<tensor::DimOp>(op.getLoc(), op.rhs(), 0);
Value matchingK =
rewriter.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq, lhsK, rhsK);
Value witness = rewriter.create<shape::CstrRequireOp>(
@ -138,15 +137,15 @@ public:
LogicalResult matchAndRewrite(tcf::ConvNCHWOp op,
PatternRewriter &rewriter) const override {
// Create the constraints, and the assuming region.
Value inputCin = rewriter.create<memref::DimOp>(op.getLoc(), op.in(), 1);
Value inputH = rewriter.create<memref::DimOp>(op.getLoc(), op.in(), 2);
Value inputW = rewriter.create<memref::DimOp>(op.getLoc(), op.in(), 3);
Value inputCin = rewriter.create<tensor::DimOp>(op.getLoc(), op.in(), 1);
Value inputH = rewriter.create<tensor::DimOp>(op.getLoc(), op.in(), 2);
Value inputW = rewriter.create<tensor::DimOp>(op.getLoc(), op.in(), 3);
Value filterCin =
rewriter.create<memref::DimOp>(op.getLoc(), op.filter(), 1);
rewriter.create<tensor::DimOp>(op.getLoc(), op.filter(), 1);
Value filterKH =
rewriter.create<memref::DimOp>(op.getLoc(), op.filter(), 2);
rewriter.create<tensor::DimOp>(op.getLoc(), op.filter(), 2);
Value filterKW =
rewriter.create<memref::DimOp>(op.getLoc(), op.filter(), 3);
rewriter.create<tensor::DimOp>(op.getLoc(), op.filter(), 3);
Value matchingCin =
rewriter.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq, inputCin, filterCin);
Value validFilterH =

View File

@ -11,7 +11,7 @@
#include "../PassDetail.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h" // TODO: For `memref.dim`.
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Traits.h"
#include "mlir/IR/Matchers.h"
#include "mlir/Transforms/DialectConversion.h"
@ -109,9 +109,9 @@ public:
rewriter.getStringAttr("training is not supported for now"));
// num_features C from an expected input of size (N,C,D,H,W ...)
Value numFeatures = rewriter.create<memref::DimOp>(loc, input, 1);
Value numFeatures = rewriter.create<tensor::DimOp>(loc, input, 1);
auto contractingDim0EqualsNumFeatures = [&](Value v) {
auto dim0 = rewriter.create<memref::DimOp>(loc, v, 0);
auto dim0 = rewriter.create<tensor::DimOp>(loc, v, 0);
auto dim0Equal =
rewriter.create<CmpIOp>(loc, CmpIPredicate::eq, numFeatures, dim0);
rewriter.create<AssertOp>(
@ -195,10 +195,10 @@ public:
op, "expected both operands to aten.mm to be rank 2");
}
Value lhsDim0 = rewriter.create<memref::DimOp>(loc, lhs, 0);
Value lhsDim1 = rewriter.create<memref::DimOp>(loc, lhs, 1);
Value rhsDim0 = rewriter.create<memref::DimOp>(loc, rhs, 0);
Value rhsDim1 = rewriter.create<memref::DimOp>(loc, rhs, 1);
Value lhsDim0 = rewriter.create<tensor::DimOp>(loc, lhs, 0);
Value lhsDim1 = rewriter.create<tensor::DimOp>(loc, lhs, 1);
Value rhsDim0 = rewriter.create<tensor::DimOp>(loc, rhs, 0);
Value rhsDim1 = rewriter.create<tensor::DimOp>(loc, rhs, 1);
Value contractingDimEqual =
rewriter.create<CmpIOp>(loc, CmpIPredicate::eq, lhsDim1, rhsDim0);
rewriter.create<AssertOp>(
@ -276,7 +276,7 @@ public:
op, "unimplemented: size-1 broadcasting for aten::LinearOp");
auto getDimOp = [&](Value v, int dimension) {
return rewriter.create<memref::DimOp>(loc, v, dimension);
return rewriter.create<tensor::DimOp>(loc, v, dimension);
};
Value inputDim0 = getDimOp(input, 0);
Value inputDim1 = getDimOp(input, 1);
@ -539,7 +539,7 @@ struct ConvertElementwiseOp : ConversionPattern {
// undefined behavior, by doing appropriate checks against the current
// dimension size.
auto currentDimSize =
rewriter.create<memref::DimOp>(loc, tensorOperand, size.index());
rewriter.create<tensor::DimOp>(loc, tensorOperand, size.index());
// If the result size of this dimension has so far only hit the
// statically-known-to-be-1 case above (i.e., we have not yet assigned a
@ -614,7 +614,7 @@ public:
if (!(0 <= dim && dim <= inputRank))
return rewriter.notifyMatchFailure(op, "statically invalid");
SmallVector<linalg::ReassociationIndices> reassociationMap(inputRank);
SmallVector<ReassociationIndices> reassociationMap(inputRank);
// From the perspective of the reassociation map, the situation of
// unsqueezing before or after the last dimension is symmetrical.
// Normalize it to the "before" case.

View File

@ -17,6 +17,8 @@ using namespace mlir;
using namespace mlir::NPCOMP;
using namespace mlir::NPCOMP::Basicpy;
#include "npcomp/Dialect/Basicpy/IR/BasicpyOpsDialect.cpp.inc"
//===----------------------------------------------------------------------===//
// Dialect Interfaces
//===----------------------------------------------------------------------===//

View File

@ -18,6 +18,8 @@ using namespace mlir;
using namespace mlir::NPCOMP;
using namespace mlir::NPCOMP::Numpy;
#include "npcomp/Dialect/Numpy/IR/NumpyOpsDialect.cpp.inc"
void NumpyDialect::initialize() {
addOperations<
#define GET_OP_LIST

View File

@ -13,6 +13,8 @@
using namespace mlir;
using namespace mlir::NPCOMP::refback;
#include "npcomp/Dialect/Refback/IR/RefbackOpsDialect.cpp.inc"
//===----------------------------------------------------------------------===//
// RefbackDialect Dialect Interfaces
//===----------------------------------------------------------------------===//

View File

@ -14,6 +14,8 @@
using namespace mlir;
using namespace mlir::NPCOMP::refbackrt;
#include "npcomp/Dialect/Refbackrt/IR/RefbackrtOpsDialect.cpp.inc"
void RefbackrtDialect::initialize() {
addOperations<
#define GET_OP_LIST

View File

@ -12,6 +12,8 @@
using namespace mlir;
using namespace mlir::NPCOMP::tcf;
#include "npcomp/Dialect/TCF/IR/TCFOpsDialect.cpp.inc"
void TCFDialect::initialize() {
addOperations<
#define GET_OP_LIST

View File

@ -13,6 +13,8 @@
using namespace mlir;
using namespace mlir::NPCOMP::tcp;
#include "npcomp/Dialect/TCP/IR/TCPOpsDialect.cpp.inc"
//===----------------------------------------------------------------------===//
// TCPDialect Dialect Interfaces
//===----------------------------------------------------------------------===//

View File

@ -50,7 +50,7 @@ static SmallVector<Value, 6> bypassResultShapes(Operation &op) {
builder.create<tensor::ExtractOp>(op.getLoc(), pad.upperExpansion(),
ValueRange({dimIndex}));
auto operandDim =
builder.create<memref::DimOp>(op.getLoc(), pad.operand(), i);
builder.create<tensor::DimOp>(op.getLoc(), pad.operand(), i);
auto totalExpansion =
builder.create<AddIOp>(op.getLoc(), lowerExpansion, upperExpansion);
auto outDim =
@ -119,7 +119,7 @@ public:
for (int i = 0, e = inputType.getRank(); i < e; i++) {
// Calculate the relevant extents.
Value inputExtent =
rewriter.create<memref::DimOp>(op.getLoc(), op.operand(), i);
rewriter.create<tensor::DimOp>(op.getLoc(), op.operand(), i);
inputDimRequiresBroadcasting.push_back(
rewriter.create<CmpIOp>(op.getLoc(), CmpIPredicate::ne, inputExtent,
outputExtents[rankDiff + i]));
@ -204,7 +204,7 @@ public:
auto offset =
rewriter.create<tensor::ExtractOp>(op.getLoc(), op.lowerExpansion(),
ValueRange({dimIndex}));
auto size = rewriter.create<memref::DimOp>(op.getLoc(), op.operand(), i);
auto size = rewriter.create<tensor::DimOp>(op.getLoc(), op.operand(), i);
auto stride = c1;
offsets.push_back(offset);
sizes.push_back(size);

View File

@ -19,6 +19,8 @@ using namespace mlir;
using namespace mlir::NPCOMP;
using namespace mlir::NPCOMP::Torch;
#include "npcomp/Dialect/Torch/IR/TorchDialect.cpp.inc"
//===----------------------------------------------------------------------===//
// Dialect Interfaces
//===----------------------------------------------------------------------===//

View File

@ -5,10 +5,10 @@ func @mm(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> attr
%c0 = constant 0 : index
%c1 = constant 1 : index
%cst = constant 0.000000e+00 : f32
%0 = memref.dim %arg0, %c0 : tensor<?x?xf32>
%1 = memref.dim %arg0, %c1 : tensor<?x?xf32>
%2 = memref.dim %arg1, %c0 : tensor<?x?xf32>
%3 = memref.dim %arg1, %c1 : tensor<?x?xf32>
%0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%1 = tensor.dim %arg0, %c1 : tensor<?x?xf32>
%2 = tensor.dim %arg1, %c0 : tensor<?x?xf32>
%3 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
%4 = cmpi eq, %1, %2 : index
assert %4, "mismatching contracting dimension for aten.mm"
%5 = linalg.init_tensor [%0, %3] : tensor<?x?xf32>

View File

@ -6,13 +6,13 @@
// CHECK: %[[C0F32:.*]] = constant 0.000000e+00 : f32
// CHECK: %[[C0:.*]] = constant 0 : index
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[LHSK:.*]] = memref.dim %[[LHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[RHSK:.*]] = memref.dim %[[RHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[LHSK:.*]] = tensor.dim %[[LHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[RHSK:.*]] = tensor.dim %[[RHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[KEQUAL:.*]] = cmpi eq, %[[LHSK]], %[[RHSK]] : index
// CHECK: %[[WINESS:.*]] = shape.cstr_require %[[KEQUAL]], "mismatching contracting dimension for matmul"
// CHECK: %[[RET:.*]] = shape.assuming %[[WINESS]] -> (tensor<?x?xf32>) {
// CHECK: %[[LHSROWS:.*]] = memref.dim %[[LHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[RHSCOLS:.*]] = memref.dim %[[RHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[LHSROWS:.*]] = tensor.dim %[[LHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[RHSCOLS:.*]] = tensor.dim %[[RHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[SHAPE:.*]] = tensor.from_elements %[[LHSROWS]], %[[RHSCOLS]] : tensor<2xindex>
// CHECK: %[[INIT_TENSOR:.*]] = tcp.splatted %[[C0F32]], %[[SHAPE]] : (f32, tensor<2xindex>) -> tensor<?x?xf32>
// CHECK: %[[MATMUL:.*]] = linalg.matmul ins(%[[LHS]], %[[RHS]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[INIT_TENSOR]] : tensor<?x?xf32>) -> tensor<?x?xf32>
@ -32,12 +32,12 @@ func @tcf_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf
// CHECK: %[[C0:.*]] = constant 0 : index
// CHECK: %[[C2:.*]] = constant 2 : index
// CHECK: %[[C3:.*]] = constant 3 : index
// CHECK: %[[CHANNELS:.*]] = memref.dim %[[IN]], %[[C1]] : tensor<?x?x?x?xf32>
// CHECK: %[[HEIGHT:.*]] = memref.dim %[[IN]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[WIDTH:.*]] = memref.dim %[[IN]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERCHANNELS:.*]] = memref.dim %[[FILTER]], %[[C1]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERHEIGHT:.*]] = memref.dim %[[FILTER]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERWIDTH:.*]] = memref.dim %[[FILTER]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[CHANNELS:.*]] = tensor.dim %[[IN]], %[[C1]] : tensor<?x?x?x?xf32>
// CHECK: %[[HEIGHT:.*]] = tensor.dim %[[IN]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[WIDTH:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERCHANNELS:.*]] = tensor.dim %[[FILTER]], %[[C1]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERHEIGHT:.*]] = tensor.dim %[[FILTER]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERWIDTH:.*]] = tensor.dim %[[FILTER]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[CMPCHANNELS:.*]] = cmpi eq, %[[CHANNELS]], %[[FILTERCHANNELS]] : index
// CHECK: %[[CMPHEIGHT:.*]] = cmpi uge, %[[HEIGHT]], %[[FILTERHEIGHT]] : index
// CHECK: %[[CMPWIDTH:.*]] = cmpi uge, %[[WIDTH]], %[[FILTERWIDTH]] : index
@ -46,12 +46,12 @@ func @tcf_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf
// CHECK: %[[CSTRWIDTH:.*]] = shape.cstr_require %[[CMPWIDTH]], "input width must be greater than or equal to filter KW-dimension"
// CHECK: %[[WITNESS:.*]] = shape.assuming_all %[[CSTRCHANNELS]], %[[CSTRHEIGHT]], %[[CSTRWIDTH]]
// CHECK: %[[RET:.*]] = shape.assuming %[[WITNESS]] -> (tensor<?x?x?x?xf32>) {
// CHECK: %[[BATCH:.*]] = memref.dim %[[IN]], %[[C0]] : tensor<?x?x?x?xf32>
// CHECK: %[[HEIGHT:.*]] = memref.dim %[[IN]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[WIDTH:.*]] = memref.dim %[[IN]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[OUTCHANNELS:.*]] = memref.dim %[[FILTER]], %[[C0]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERHEIGHT:.*]] = memref.dim %[[FILTER]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERWIDTH:.*]] = memref.dim %[[FILTER]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[BATCH:.*]] = tensor.dim %[[IN]], %[[C0]] : tensor<?x?x?x?xf32>
// CHECK: %[[HEIGHT:.*]] = tensor.dim %[[IN]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[WIDTH:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[OUTCHANNELS:.*]] = tensor.dim %[[FILTER]], %[[C0]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERHEIGHT:.*]] = tensor.dim %[[FILTER]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERWIDTH:.*]] = tensor.dim %[[FILTER]], %[[C3]] : tensor<?x?x?x?xf32>
// CHECK: %[[FILTERHEIGHTM1:.*]] = subi %[[FILTERHEIGHT]], %[[C1]] : index
// CHECK: %[[HEIGHTV0:.*]] = subi %[[HEIGHT]], %[[FILTERHEIGHTM1]] : index
// CHECK: %[[HEIGHTV0M1:.*]] = subi %[[HEIGHTV0]], %[[C1]] : index

View File

@ -6,13 +6,13 @@
// CHECK: %[[LHS:.*]] = torch.to_builtin_tensor %[[LHS_VTENSOR]] : !torch.vtensor<[?,?],f32> -> tensor<?x?xf32>
// CHECK: %[[RHS:.*]] = torch.to_builtin_tensor %[[RHS_VTENSOR]] : !torch.vtensor<[?,?],f32> -> tensor<?x?xf32>
// CHECK: %[[C0:.*]] = constant 0 : index
// CHECK: %[[LHS_DIM_0:.*]] = memref.dim %[[LHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[LHS_DIM_0:.*]] = tensor.dim %[[LHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[LHS_DIM_1:.*]] = memref.dim %[[LHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[LHS_DIM_1:.*]] = tensor.dim %[[LHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[C0:.*]] = constant 0 : index
// CHECK: %[[RHS_DIM_0:.*]] = memref.dim %[[RHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[RHS_DIM_0:.*]] = tensor.dim %[[RHS]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[RHS_DIM_1:.*]] = memref.dim %[[RHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[RHS_DIM_1:.*]] = tensor.dim %[[RHS]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[EQ:.*]] = cmpi eq, %[[LHS_DIM_1]], %[[RHS_DIM_0]] : index
// CHECK: assert %[[EQ]], "mismatching contracting dimension for torch.aten.mm"
// CHECK: %[[INIT_TENSOR:.*]] = linalg.init_tensor [%[[LHS_DIM_0]], %[[RHS_DIM_1]]] : tensor<?x?xf32>

View File

@ -25,11 +25,11 @@ func @elementwise$unary(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32>
// CHECK: %[[BUILTIN_ARG0:.*]] = torch.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?],f32> -> tensor<?x?xf32>
// CHECK: %[[BUILTIN_ARG1:.*]] = torch.to_builtin_tensor %[[ARG1]] : !torch.vtensor<[?],f32> -> tensor<?xf32>
// CHECK: %[[C0:.*]] = constant 0 : index
// CHECK: %[[ARG0_DIM0:.*]] = memref.dim %[[BUILTIN_ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[ARG0_DIM0:.*]] = tensor.dim %[[BUILTIN_ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[ARG0_DIM1:.*]] = memref.dim %[[BUILTIN_ARG0]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[ARG0_DIM1:.*]] = tensor.dim %[[BUILTIN_ARG0]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[C0_2:.*]] = constant 0 : index
// CHECK: %[[ARG1_DIM0:.*]] = memref.dim %[[BUILTIN_ARG1]], %[[C0_2]] : tensor<?xf32>
// CHECK: %[[ARG1_DIM0:.*]] = tensor.dim %[[BUILTIN_ARG1]], %[[C0_2]] : tensor<?xf32>
// CHECK: %[[LEGAL_SIZES:.*]] = cmpi eq, %[[ARG0_DIM1]], %[[ARG1_DIM0]] : index
// CHECK: assert %[[LEGAL_SIZES]], "mismatched size for broadcast"
// CHECK: %[[INIT_TENSOR:.*]] = linalg.init_tensor [%[[ARG0_DIM0]], %[[ARG0_DIM1]]] : tensor<?x?xf32>

View File

@ -38,7 +38,7 @@ func @tcp_splatted(%arg0: f32, %arg1: tensor<?xindex>) -> tensor<?x?xf32> {
// CHECK: %[[LOWER_EXTENT_D1:.*]] = tensor.extract %[[LOWER_EXPANSION]][%[[C0]]] : tensor<?xindex>
// CHECK: %[[UPPER_EXTENT_D1:.*]] = tensor.extract %[[UPPER_EXPANSION]][%[[C0]]] : tensor<?xindex>
// CHECK: %[[C0_0:.*]] = constant 0 : index
// CHECK: %[[D1:.*]] = memref.dim %[[TENSOR]], %[[C0_0]] : tensor<?xf32>
// CHECK: %[[D1:.*]] = tensor.dim %[[TENSOR]], %[[C0_0]] : tensor<?xf32>
// CHECK: %[[D1_EXPANSION:.*]] = addi %[[LOWER_EXTENT_D1]], %[[UPPER_EXTENT_D1]] : index
// CHECK: %[[D1_OUT:.*]] = addi %[[D1_EXPANSION]], %[[D1]] : index
// CHECK: %[[D1_OUT_TENSOR:.*]] = tensor.from_elements %[[D1_OUT]] : tensor<1xindex>
@ -47,7 +47,7 @@ func @tcp_splatted(%arg0: f32, %arg1: tensor<?xindex>) -> tensor<?x?xf32> {
// CHECK: %[[C0_1:.*]] = constant 0 : index
// CHECK: %[[LOWER_EXTENT_D1_1:.*]] = tensor.extract %[[LOWER_EXPANSION]][%[[C0_1]]] : tensor<?xindex>
// CHECK: %[[C0_2:.*]] = constant 0 : index
// CHECK: %[[D1_1:.*]] = memref.dim %[[TENSOR]], %[[C0_2]] : tensor<?xf32>
// CHECK: %[[D1_1:.*]] = tensor.dim %[[TENSOR]], %[[C0_2]] : tensor<?xf32>
// CHECK: linalg.fill(%[[FILL_VAL]], %[[D1_OUT_MREF]]) : f32, memref<?xf32>
// CHECK: %[[SUBVIEW:.*]] = memref.subview %[[D1_OUT_MREF]][%[[LOWER_EXTENT_D1_1]]] [%[[D1_1]]] [%[[C1]]] : memref<?xf32> to memref<?xf32, #map>
// CHECK: linalg.copy(%0, %[[SUBVIEW]]) : memref<?xf32>, memref<?xf32, #map>

View File

@ -210,6 +210,12 @@ struct Options {
} // namespace
int main(int argc, char **argv) {
mlir::registerMLIRContextCLOptions();
mlir::registerAsmPrinterCLOptions();
mlir::registerPassManagerCLOptions();
Options options;
llvm::cl::ParseCommandLineOptions(argc, argv, "npcomp compile+run utility\n");
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::registerAllPasses();
@ -222,11 +228,6 @@ int main(int argc, char **argv) {
llvm::InitLLVM y(argc, argv);
npcompInitializeLLVMCodegen();
mlir::registerAsmPrinterCLOptions();
mlir::registerPassManagerCLOptions();
Options options;
llvm::cl::ParseCommandLineOptions(argc, argv, "npcomp compile+run utility\n");
SmallVector<StringRef, 6> sharedLibs(options.sharedLibs.begin(),
options.sharedLibs.end());
SmallVector<StringRef, 6> argValues(options.argValues.begin(),