From 83b5b5456d3f4125e64e6822713bc36ab73d8273 Mon Sep 17 00:00:00 2001 From: Sean Silva Date: Wed, 7 Jul 2021 11:55:31 -0700 Subject: [PATCH] Bump llvm-project to da289a174fc6617c7be37be2947480510fd4f02a - Build adjustments for `.cpp.inc` dialect files. - Renaming of `memref.dim` to `tensor.dim` for tensor case. Minor changes: - Renaming of `mlir::linalg::ReassociationIndices` to `mlir::ReassociationIndices`. - Adjust command line option parsing in npcomp-run-mlir. --- external/llvm-project | 2 +- .../npcomp/Dialect/Basicpy/IR/CMakeLists.txt | 1 + .../npcomp/Dialect/Torch/IR/CMakeLists.txt | 1 + lib/Backend/Common/CMakeLists.txt | 2 +- lib/Backend/Common/VerifyBackendContract.cpp | 4 +-- lib/Conversion/TCFToLinalg/CMakeLists.txt | 2 +- lib/Conversion/TCFToLinalg/TCFToLinalg.cpp | 35 +++++++++---------- .../TorchToLinalg/TorchToLinalg.cpp | 20 +++++------ lib/Dialect/Basicpy/IR/BasicpyDialect.cpp | 2 ++ lib/Dialect/Numpy/IR/NumpyDialect.cpp | 2 ++ lib/Dialect/Refback/IR/RefbackDialect.cpp | 2 ++ lib/Dialect/Refbackrt/IR/RefbackrtDialect.cpp | 2 ++ lib/Dialect/TCF/IR/TCFDialect.cpp | 2 ++ lib/Dialect/TCP/IR/TCPDialect.cpp | 2 ++ lib/Dialect/TCP/Transforms/Bufferize.cpp | 6 ++-- lib/Dialect/Torch/IR/TorchDialect.cpp | 2 ++ .../Common/verify-backend-contract.mlir | 8 ++--- test/Conversion/TCFToLinalg/basic.mlir | 32 ++++++++--------- test/Conversion/TorchToLinalg/basic.mlir | 8 ++--- .../Conversion/TorchToLinalg/elementwise.mlir | 6 ++-- test/Dialect/TCP/bufferize.mlir | 4 +-- tools/npcomp-run-mlir/npcomp-run-mlir.cpp | 11 +++--- 22 files changed, 86 insertions(+), 70 deletions(-) diff --git a/external/llvm-project b/external/llvm-project index a37cf1783..da289a174 160000 --- a/external/llvm-project +++ b/external/llvm-project @@ -1 +1 @@ -Subproject commit a37cf17834d39411ed1d669098b428f8374c5b45 +Subproject commit da289a174fc6617c7be37be2947480510fd4f02a diff --git a/include/npcomp/Dialect/Basicpy/IR/CMakeLists.txt b/include/npcomp/Dialect/Basicpy/IR/CMakeLists.txt index fd3dec678..08e54a253 100644 --- a/include/npcomp/Dialect/Basicpy/IR/CMakeLists.txt +++ b/include/npcomp/Dialect/Basicpy/IR/CMakeLists.txt @@ -5,6 +5,7 @@ set(LLVM_TARGET_DEFINITIONS ${dialect}.td) mlir_tablegen(${dialect}.h.inc -gen-op-decls) mlir_tablegen(${dialect}.cpp.inc -gen-op-defs) mlir_tablegen(${dialect}Dialect.h.inc -gen-dialect-decls -dialect=${dialect_namespace}) +mlir_tablegen(${dialect}Dialect.cpp.inc -gen-dialect-defs -dialect=${dialect_namespace}) mlir_tablegen(${dialect}Enums.h.inc -gen-enum-decls) mlir_tablegen(${dialect}Enums.cpp.inc -gen-enum-defs) add_public_tablegen_target(MLIR${dialect}IncGen) diff --git a/include/npcomp/Dialect/Torch/IR/CMakeLists.txt b/include/npcomp/Dialect/Torch/IR/CMakeLists.txt index 754295852..3302a75f8 100644 --- a/include/npcomp/Dialect/Torch/IR/CMakeLists.txt +++ b/include/npcomp/Dialect/Torch/IR/CMakeLists.txt @@ -2,6 +2,7 @@ set(LLVM_TARGET_DEFINITIONS TorchOps.td) mlir_tablegen(TorchOps.h.inc -gen-op-decls) mlir_tablegen(TorchOps.cpp.inc -gen-op-defs) mlir_tablegen(TorchDialect.h.inc -gen-dialect-decls -dialect=torch) +mlir_tablegen(TorchDialect.cpp.inc -gen-dialect-defs -dialect=torch) add_public_tablegen_target(MLIRTorchOpsIncGen) add_dependencies(mlir-headers MLIRTorchOpsIncGen) diff --git a/lib/Backend/Common/CMakeLists.txt b/lib/Backend/Common/CMakeLists.txt index 0937aa2f7..c4bce4cc7 100644 --- a/lib/Backend/Common/CMakeLists.txt +++ b/lib/Backend/Common/CMakeLists.txt @@ -14,7 +14,7 @@ add_npcomp_library(NPCOMPCommonBackend LINK_LIBS PUBLIC MLIRIR MLIRLinalg - MLIRMemRef + MLIRTensor MLIRStandard MLIRMath ) diff --git a/lib/Backend/Common/VerifyBackendContract.cpp b/lib/Backend/Common/VerifyBackendContract.cpp index b8dbb8582..895898beb 100644 --- a/lib/Backend/Common/VerifyBackendContract.cpp +++ b/lib/Backend/Common/VerifyBackendContract.cpp @@ -9,7 +9,7 @@ #include "PassDetail.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/Math/IR/Math.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/OpDefinition.h" #include "mlir/Transforms/DialectConversion.h" #include "npcomp/Backend/Common/Passes.h" @@ -60,7 +60,7 @@ class VerifyBackendContractPass target.addDynamicallyLegalDialect(opHasLegalTypes); target.addDynamicallyLegalDialect(opHasLegalTypes); // DimOp is used to query tensor sizes. - target.addDynamicallyLegalOp(opHasLegalTypes); + target.addDynamicallyLegalOp(opHasLegalTypes); // AssertOp is used to terminate the program for error guards. target.addLegalOp(); diff --git a/lib/Conversion/TCFToLinalg/CMakeLists.txt b/lib/Conversion/TCFToLinalg/CMakeLists.txt index 65e40c5c7..b29b995dc 100644 --- a/lib/Conversion/TCFToLinalg/CMakeLists.txt +++ b/lib/Conversion/TCFToLinalg/CMakeLists.txt @@ -15,6 +15,6 @@ add_npcomp_conversion_library(NPCOMPTCFToLinalg MLIRPass MLIRTransforms MLIRShape - MLIRMemRef + MLIRTensor NPCOMPTCFDialect ) diff --git a/lib/Conversion/TCFToLinalg/TCFToLinalg.cpp b/lib/Conversion/TCFToLinalg/TCFToLinalg.cpp index 4e69df379..b43b0564a 100644 --- a/lib/Conversion/TCFToLinalg/TCFToLinalg.cpp +++ b/lib/Conversion/TCFToLinalg/TCFToLinalg.cpp @@ -14,8 +14,7 @@ #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Traits.h" -// TODO: Remove when memref.dim is split into tensor.dim for the tensor case. -#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "npcomp/Dialect/TCF/IR/TCFOps.h" @@ -29,8 +28,8 @@ static SmallVector bypassResultShapes(Operation *op, OpBuilder &builder) { if (auto matmul = dyn_cast(op)) { - auto lhsRows = builder.create(op->getLoc(), matmul.lhs(), 0); - auto rhsCols = builder.create(op->getLoc(), matmul.rhs(), 1); + auto lhsRows = builder.create(op->getLoc(), matmul.lhs(), 0); + auto rhsCols = builder.create(op->getLoc(), matmul.rhs(), 1); auto shape = builder.create( op->getLoc(), ValueRange({lhsRows, rhsCols})); return {shape}; @@ -52,17 +51,17 @@ static SmallVector bypassResultShapes(Operation *op, auto paddingHeight = padding; auto paddingWidth = padding; auto batch = - builder.create(op->getLoc(), conv2dNCHW.in(), 0); + builder.create(op->getLoc(), conv2dNCHW.in(), 0); auto height = - builder.create(op->getLoc(), conv2dNCHW.in(), 2); + builder.create(op->getLoc(), conv2dNCHW.in(), 2); auto width = - builder.create(op->getLoc(), conv2dNCHW.in(), 3); + builder.create(op->getLoc(), conv2dNCHW.in(), 3); auto filterOutChannels = - builder.create(op->getLoc(), conv2dNCHW.filter(), 0); + builder.create(op->getLoc(), conv2dNCHW.filter(), 0); auto filterHeight = - builder.create(op->getLoc(), conv2dNCHW.filter(), 2); + builder.create(op->getLoc(), conv2dNCHW.filter(), 2); auto filterWidth = - builder.create(op->getLoc(), conv2dNCHW.filter(), 3); + builder.create(op->getLoc(), conv2dNCHW.filter(), 3); // Output height auto twicePaddingHeight = builder.create(op->getLoc(), paddingHeight, cI2); auto heightPlusTwicePadding = builder.create(op->getLoc(), height, twicePaddingHeight); @@ -99,8 +98,8 @@ public: LogicalResult matchAndRewrite(tcf::MatmulOp op, PatternRewriter &rewriter) const override { // Create the constraints, and the assuming region. - Value lhsK = rewriter.create(op.getLoc(), op.lhs(), 1); - Value rhsK = rewriter.create(op.getLoc(), op.rhs(), 0); + Value lhsK = rewriter.create(op.getLoc(), op.lhs(), 1); + Value rhsK = rewriter.create(op.getLoc(), op.rhs(), 0); Value matchingK = rewriter.create(op.getLoc(), CmpIPredicate::eq, lhsK, rhsK); Value witness = rewriter.create( @@ -138,15 +137,15 @@ public: LogicalResult matchAndRewrite(tcf::ConvNCHWOp op, PatternRewriter &rewriter) const override { // Create the constraints, and the assuming region. - Value inputCin = rewriter.create(op.getLoc(), op.in(), 1); - Value inputH = rewriter.create(op.getLoc(), op.in(), 2); - Value inputW = rewriter.create(op.getLoc(), op.in(), 3); + Value inputCin = rewriter.create(op.getLoc(), op.in(), 1); + Value inputH = rewriter.create(op.getLoc(), op.in(), 2); + Value inputW = rewriter.create(op.getLoc(), op.in(), 3); Value filterCin = - rewriter.create(op.getLoc(), op.filter(), 1); + rewriter.create(op.getLoc(), op.filter(), 1); Value filterKH = - rewriter.create(op.getLoc(), op.filter(), 2); + rewriter.create(op.getLoc(), op.filter(), 2); Value filterKW = - rewriter.create(op.getLoc(), op.filter(), 3); + rewriter.create(op.getLoc(), op.filter(), 3); Value matchingCin = rewriter.create(op.getLoc(), CmpIPredicate::eq, inputCin, filterCin); Value validFilterH = diff --git a/lib/Conversion/TorchToLinalg/TorchToLinalg.cpp b/lib/Conversion/TorchToLinalg/TorchToLinalg.cpp index 5a6c548f7..98d605593 100644 --- a/lib/Conversion/TorchToLinalg/TorchToLinalg.cpp +++ b/lib/Conversion/TorchToLinalg/TorchToLinalg.cpp @@ -11,7 +11,7 @@ #include "../PassDetail.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/Math/IR/Math.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" // TODO: For `memref.dim`. +#include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Traits.h" #include "mlir/IR/Matchers.h" #include "mlir/Transforms/DialectConversion.h" @@ -109,9 +109,9 @@ public: rewriter.getStringAttr("training is not supported for now")); // num_features – C from an expected input of size (N,C,D,H,W ...) - Value numFeatures = rewriter.create(loc, input, 1); + Value numFeatures = rewriter.create(loc, input, 1); auto contractingDim0EqualsNumFeatures = [&](Value v) { - auto dim0 = rewriter.create(loc, v, 0); + auto dim0 = rewriter.create(loc, v, 0); auto dim0Equal = rewriter.create(loc, CmpIPredicate::eq, numFeatures, dim0); rewriter.create( @@ -195,10 +195,10 @@ public: op, "expected both operands to aten.mm to be rank 2"); } - Value lhsDim0 = rewriter.create(loc, lhs, 0); - Value lhsDim1 = rewriter.create(loc, lhs, 1); - Value rhsDim0 = rewriter.create(loc, rhs, 0); - Value rhsDim1 = rewriter.create(loc, rhs, 1); + Value lhsDim0 = rewriter.create(loc, lhs, 0); + Value lhsDim1 = rewriter.create(loc, lhs, 1); + Value rhsDim0 = rewriter.create(loc, rhs, 0); + Value rhsDim1 = rewriter.create(loc, rhs, 1); Value contractingDimEqual = rewriter.create(loc, CmpIPredicate::eq, lhsDim1, rhsDim0); rewriter.create( @@ -276,7 +276,7 @@ public: op, "unimplemented: size-1 broadcasting for aten::LinearOp"); auto getDimOp = [&](Value v, int dimension) { - return rewriter.create(loc, v, dimension); + return rewriter.create(loc, v, dimension); }; Value inputDim0 = getDimOp(input, 0); Value inputDim1 = getDimOp(input, 1); @@ -539,7 +539,7 @@ struct ConvertElementwiseOp : ConversionPattern { // undefined behavior, by doing appropriate checks against the current // dimension size. auto currentDimSize = - rewriter.create(loc, tensorOperand, size.index()); + rewriter.create(loc, tensorOperand, size.index()); // If the result size of this dimension has so far only hit the // statically-known-to-be-1 case above (i.e., we have not yet assigned a @@ -614,7 +614,7 @@ public: if (!(0 <= dim && dim <= inputRank)) return rewriter.notifyMatchFailure(op, "statically invalid"); - SmallVector reassociationMap(inputRank); + SmallVector reassociationMap(inputRank); // From the perspective of the reassociation map, the situation of // unsqueezing before or after the last dimension is symmetrical. // Normalize it to the "before" case. diff --git a/lib/Dialect/Basicpy/IR/BasicpyDialect.cpp b/lib/Dialect/Basicpy/IR/BasicpyDialect.cpp index aba6d518c..227a9e22e 100644 --- a/lib/Dialect/Basicpy/IR/BasicpyDialect.cpp +++ b/lib/Dialect/Basicpy/IR/BasicpyDialect.cpp @@ -17,6 +17,8 @@ using namespace mlir; using namespace mlir::NPCOMP; using namespace mlir::NPCOMP::Basicpy; +#include "npcomp/Dialect/Basicpy/IR/BasicpyOpsDialect.cpp.inc" + //===----------------------------------------------------------------------===// // Dialect Interfaces //===----------------------------------------------------------------------===// diff --git a/lib/Dialect/Numpy/IR/NumpyDialect.cpp b/lib/Dialect/Numpy/IR/NumpyDialect.cpp index 467ec138c..1fae40499 100644 --- a/lib/Dialect/Numpy/IR/NumpyDialect.cpp +++ b/lib/Dialect/Numpy/IR/NumpyDialect.cpp @@ -18,6 +18,8 @@ using namespace mlir; using namespace mlir::NPCOMP; using namespace mlir::NPCOMP::Numpy; +#include "npcomp/Dialect/Numpy/IR/NumpyOpsDialect.cpp.inc" + void NumpyDialect::initialize() { addOperations< #define GET_OP_LIST diff --git a/lib/Dialect/Refback/IR/RefbackDialect.cpp b/lib/Dialect/Refback/IR/RefbackDialect.cpp index 8444278da..56c1a9dcf 100644 --- a/lib/Dialect/Refback/IR/RefbackDialect.cpp +++ b/lib/Dialect/Refback/IR/RefbackDialect.cpp @@ -13,6 +13,8 @@ using namespace mlir; using namespace mlir::NPCOMP::refback; +#include "npcomp/Dialect/Refback/IR/RefbackOpsDialect.cpp.inc" + //===----------------------------------------------------------------------===// // RefbackDialect Dialect Interfaces //===----------------------------------------------------------------------===// diff --git a/lib/Dialect/Refbackrt/IR/RefbackrtDialect.cpp b/lib/Dialect/Refbackrt/IR/RefbackrtDialect.cpp index 7d534da46..9158a566c 100644 --- a/lib/Dialect/Refbackrt/IR/RefbackrtDialect.cpp +++ b/lib/Dialect/Refbackrt/IR/RefbackrtDialect.cpp @@ -14,6 +14,8 @@ using namespace mlir; using namespace mlir::NPCOMP::refbackrt; +#include "npcomp/Dialect/Refbackrt/IR/RefbackrtOpsDialect.cpp.inc" + void RefbackrtDialect::initialize() { addOperations< #define GET_OP_LIST diff --git a/lib/Dialect/TCF/IR/TCFDialect.cpp b/lib/Dialect/TCF/IR/TCFDialect.cpp index 572a3802b..cb15d91e3 100644 --- a/lib/Dialect/TCF/IR/TCFDialect.cpp +++ b/lib/Dialect/TCF/IR/TCFDialect.cpp @@ -12,6 +12,8 @@ using namespace mlir; using namespace mlir::NPCOMP::tcf; +#include "npcomp/Dialect/TCF/IR/TCFOpsDialect.cpp.inc" + void TCFDialect::initialize() { addOperations< #define GET_OP_LIST diff --git a/lib/Dialect/TCP/IR/TCPDialect.cpp b/lib/Dialect/TCP/IR/TCPDialect.cpp index 29f657c57..57dbfa749 100644 --- a/lib/Dialect/TCP/IR/TCPDialect.cpp +++ b/lib/Dialect/TCP/IR/TCPDialect.cpp @@ -13,6 +13,8 @@ using namespace mlir; using namespace mlir::NPCOMP::tcp; +#include "npcomp/Dialect/TCP/IR/TCPOpsDialect.cpp.inc" + //===----------------------------------------------------------------------===// // TCPDialect Dialect Interfaces //===----------------------------------------------------------------------===// diff --git a/lib/Dialect/TCP/Transforms/Bufferize.cpp b/lib/Dialect/TCP/Transforms/Bufferize.cpp index 85badc761..a1949c240 100644 --- a/lib/Dialect/TCP/Transforms/Bufferize.cpp +++ b/lib/Dialect/TCP/Transforms/Bufferize.cpp @@ -50,7 +50,7 @@ static SmallVector bypassResultShapes(Operation &op) { builder.create(op.getLoc(), pad.upperExpansion(), ValueRange({dimIndex})); auto operandDim = - builder.create(op.getLoc(), pad.operand(), i); + builder.create(op.getLoc(), pad.operand(), i); auto totalExpansion = builder.create(op.getLoc(), lowerExpansion, upperExpansion); auto outDim = @@ -119,7 +119,7 @@ public: for (int i = 0, e = inputType.getRank(); i < e; i++) { // Calculate the relevant extents. Value inputExtent = - rewriter.create(op.getLoc(), op.operand(), i); + rewriter.create(op.getLoc(), op.operand(), i); inputDimRequiresBroadcasting.push_back( rewriter.create(op.getLoc(), CmpIPredicate::ne, inputExtent, outputExtents[rankDiff + i])); @@ -204,7 +204,7 @@ public: auto offset = rewriter.create(op.getLoc(), op.lowerExpansion(), ValueRange({dimIndex})); - auto size = rewriter.create(op.getLoc(), op.operand(), i); + auto size = rewriter.create(op.getLoc(), op.operand(), i); auto stride = c1; offsets.push_back(offset); sizes.push_back(size); diff --git a/lib/Dialect/Torch/IR/TorchDialect.cpp b/lib/Dialect/Torch/IR/TorchDialect.cpp index 82ea3f104..40522dbbd 100644 --- a/lib/Dialect/Torch/IR/TorchDialect.cpp +++ b/lib/Dialect/Torch/IR/TorchDialect.cpp @@ -19,6 +19,8 @@ using namespace mlir; using namespace mlir::NPCOMP; using namespace mlir::NPCOMP::Torch; +#include "npcomp/Dialect/Torch/IR/TorchDialect.cpp.inc" + //===----------------------------------------------------------------------===// // Dialect Interfaces //===----------------------------------------------------------------------===// diff --git a/test/Backend/Common/verify-backend-contract.mlir b/test/Backend/Common/verify-backend-contract.mlir index a1ca8bd98..5f9a721dc 100644 --- a/test/Backend/Common/verify-backend-contract.mlir +++ b/test/Backend/Common/verify-backend-contract.mlir @@ -5,10 +5,10 @@ func @mm(%arg0: tensor, %arg1: tensor) -> tensor attr %c0 = constant 0 : index %c1 = constant 1 : index %cst = constant 0.000000e+00 : f32 - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor - %2 = memref.dim %arg1, %c0 : tensor - %3 = memref.dim %arg1, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor + %2 = tensor.dim %arg1, %c0 : tensor + %3 = tensor.dim %arg1, %c1 : tensor %4 = cmpi eq, %1, %2 : index assert %4, "mismatching contracting dimension for aten.mm" %5 = linalg.init_tensor [%0, %3] : tensor diff --git a/test/Conversion/TCFToLinalg/basic.mlir b/test/Conversion/TCFToLinalg/basic.mlir index 024a834ee..2dccd3630 100644 --- a/test/Conversion/TCFToLinalg/basic.mlir +++ b/test/Conversion/TCFToLinalg/basic.mlir @@ -6,13 +6,13 @@ // CHECK: %[[C0F32:.*]] = constant 0.000000e+00 : f32 // CHECK: %[[C0:.*]] = constant 0 : index // CHECK: %[[C1:.*]] = constant 1 : index -// CHECK: %[[LHSK:.*]] = memref.dim %[[LHS]], %[[C1]] : tensor -// CHECK: %[[RHSK:.*]] = memref.dim %[[RHS]], %[[C0]] : tensor +// CHECK: %[[LHSK:.*]] = tensor.dim %[[LHS]], %[[C1]] : tensor +// CHECK: %[[RHSK:.*]] = tensor.dim %[[RHS]], %[[C0]] : tensor // CHECK: %[[KEQUAL:.*]] = cmpi eq, %[[LHSK]], %[[RHSK]] : index // CHECK: %[[WINESS:.*]] = shape.cstr_require %[[KEQUAL]], "mismatching contracting dimension for matmul" // CHECK: %[[RET:.*]] = shape.assuming %[[WINESS]] -> (tensor) { -// CHECK: %[[LHSROWS:.*]] = memref.dim %[[LHS]], %[[C0]] : tensor -// CHECK: %[[RHSCOLS:.*]] = memref.dim %[[RHS]], %[[C1]] : tensor +// CHECK: %[[LHSROWS:.*]] = tensor.dim %[[LHS]], %[[C0]] : tensor +// CHECK: %[[RHSCOLS:.*]] = tensor.dim %[[RHS]], %[[C1]] : tensor // CHECK: %[[SHAPE:.*]] = tensor.from_elements %[[LHSROWS]], %[[RHSCOLS]] : tensor<2xindex> // CHECK: %[[INIT_TENSOR:.*]] = tcp.splatted %[[C0F32]], %[[SHAPE]] : (f32, tensor<2xindex>) -> tensor // CHECK: %[[MATMUL:.*]] = linalg.matmul ins(%[[LHS]], %[[RHS]] : tensor, tensor) outs(%[[INIT_TENSOR]] : tensor) -> tensor @@ -32,12 +32,12 @@ func @tcf_matmul(%arg0: tensor, %arg1: tensor) -> tensor -// CHECK: %[[HEIGHT:.*]] = memref.dim %[[IN]], %[[C2]] : tensor -// CHECK: %[[WIDTH:.*]] = memref.dim %[[IN]], %[[C3]] : tensor -// CHECK: %[[FILTERCHANNELS:.*]] = memref.dim %[[FILTER]], %[[C1]] : tensor -// CHECK: %[[FILTERHEIGHT:.*]] = memref.dim %[[FILTER]], %[[C2]] : tensor -// CHECK: %[[FILTERWIDTH:.*]] = memref.dim %[[FILTER]], %[[C3]] : tensor +// CHECK: %[[CHANNELS:.*]] = tensor.dim %[[IN]], %[[C1]] : tensor +// CHECK: %[[HEIGHT:.*]] = tensor.dim %[[IN]], %[[C2]] : tensor +// CHECK: %[[WIDTH:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor +// CHECK: %[[FILTERCHANNELS:.*]] = tensor.dim %[[FILTER]], %[[C1]] : tensor +// CHECK: %[[FILTERHEIGHT:.*]] = tensor.dim %[[FILTER]], %[[C2]] : tensor +// CHECK: %[[FILTERWIDTH:.*]] = tensor.dim %[[FILTER]], %[[C3]] : tensor // CHECK: %[[CMPCHANNELS:.*]] = cmpi eq, %[[CHANNELS]], %[[FILTERCHANNELS]] : index // CHECK: %[[CMPHEIGHT:.*]] = cmpi uge, %[[HEIGHT]], %[[FILTERHEIGHT]] : index // CHECK: %[[CMPWIDTH:.*]] = cmpi uge, %[[WIDTH]], %[[FILTERWIDTH]] : index @@ -46,12 +46,12 @@ func @tcf_matmul(%arg0: tensor, %arg1: tensor) -> tensor (tensor) { -// CHECK: %[[BATCH:.*]] = memref.dim %[[IN]], %[[C0]] : tensor -// CHECK: %[[HEIGHT:.*]] = memref.dim %[[IN]], %[[C2]] : tensor -// CHECK: %[[WIDTH:.*]] = memref.dim %[[IN]], %[[C3]] : tensor -// CHECK: %[[OUTCHANNELS:.*]] = memref.dim %[[FILTER]], %[[C0]] : tensor -// CHECK: %[[FILTERHEIGHT:.*]] = memref.dim %[[FILTER]], %[[C2]] : tensor -// CHECK: %[[FILTERWIDTH:.*]] = memref.dim %[[FILTER]], %[[C3]] : tensor +// CHECK: %[[BATCH:.*]] = tensor.dim %[[IN]], %[[C0]] : tensor +// CHECK: %[[HEIGHT:.*]] = tensor.dim %[[IN]], %[[C2]] : tensor +// CHECK: %[[WIDTH:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor +// CHECK: %[[OUTCHANNELS:.*]] = tensor.dim %[[FILTER]], %[[C0]] : tensor +// CHECK: %[[FILTERHEIGHT:.*]] = tensor.dim %[[FILTER]], %[[C2]] : tensor +// CHECK: %[[FILTERWIDTH:.*]] = tensor.dim %[[FILTER]], %[[C3]] : tensor // CHECK: %[[FILTERHEIGHTM1:.*]] = subi %[[FILTERHEIGHT]], %[[C1]] : index // CHECK: %[[HEIGHTV0:.*]] = subi %[[HEIGHT]], %[[FILTERHEIGHTM1]] : index // CHECK: %[[HEIGHTV0M1:.*]] = subi %[[HEIGHTV0]], %[[C1]] : index diff --git a/test/Conversion/TorchToLinalg/basic.mlir b/test/Conversion/TorchToLinalg/basic.mlir index 158ec9d5f..c99ce2b96 100644 --- a/test/Conversion/TorchToLinalg/basic.mlir +++ b/test/Conversion/TorchToLinalg/basic.mlir @@ -6,13 +6,13 @@ // CHECK: %[[LHS:.*]] = torch.to_builtin_tensor %[[LHS_VTENSOR]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[RHS:.*]] = torch.to_builtin_tensor %[[RHS_VTENSOR]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[C0:.*]] = constant 0 : index -// CHECK: %[[LHS_DIM_0:.*]] = memref.dim %[[LHS]], %[[C0]] : tensor +// CHECK: %[[LHS_DIM_0:.*]] = tensor.dim %[[LHS]], %[[C0]] : tensor // CHECK: %[[C1:.*]] = constant 1 : index -// CHECK: %[[LHS_DIM_1:.*]] = memref.dim %[[LHS]], %[[C1]] : tensor +// CHECK: %[[LHS_DIM_1:.*]] = tensor.dim %[[LHS]], %[[C1]] : tensor // CHECK: %[[C0:.*]] = constant 0 : index -// CHECK: %[[RHS_DIM_0:.*]] = memref.dim %[[RHS]], %[[C0]] : tensor +// CHECK: %[[RHS_DIM_0:.*]] = tensor.dim %[[RHS]], %[[C0]] : tensor // CHECK: %[[C1:.*]] = constant 1 : index -// CHECK: %[[RHS_DIM_1:.*]] = memref.dim %[[RHS]], %[[C1]] : tensor +// CHECK: %[[RHS_DIM_1:.*]] = tensor.dim %[[RHS]], %[[C1]] : tensor // CHECK: %[[EQ:.*]] = cmpi eq, %[[LHS_DIM_1]], %[[RHS_DIM_0]] : index // CHECK: assert %[[EQ]], "mismatching contracting dimension for torch.aten.mm" // CHECK: %[[INIT_TENSOR:.*]] = linalg.init_tensor [%[[LHS_DIM_0]], %[[RHS_DIM_1]]] : tensor diff --git a/test/Conversion/TorchToLinalg/elementwise.mlir b/test/Conversion/TorchToLinalg/elementwise.mlir index ba626f93e..4f34bede4 100644 --- a/test/Conversion/TorchToLinalg/elementwise.mlir +++ b/test/Conversion/TorchToLinalg/elementwise.mlir @@ -25,11 +25,11 @@ func @elementwise$unary(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> // CHECK: %[[BUILTIN_ARG0:.*]] = torch.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[BUILTIN_ARG1:.*]] = torch.to_builtin_tensor %[[ARG1]] : !torch.vtensor<[?],f32> -> tensor // CHECK: %[[C0:.*]] = constant 0 : index -// CHECK: %[[ARG0_DIM0:.*]] = memref.dim %[[BUILTIN_ARG0]], %[[C0]] : tensor +// CHECK: %[[ARG0_DIM0:.*]] = tensor.dim %[[BUILTIN_ARG0]], %[[C0]] : tensor // CHECK: %[[C1:.*]] = constant 1 : index -// CHECK: %[[ARG0_DIM1:.*]] = memref.dim %[[BUILTIN_ARG0]], %[[C1]] : tensor +// CHECK: %[[ARG0_DIM1:.*]] = tensor.dim %[[BUILTIN_ARG0]], %[[C1]] : tensor // CHECK: %[[C0_2:.*]] = constant 0 : index -// CHECK: %[[ARG1_DIM0:.*]] = memref.dim %[[BUILTIN_ARG1]], %[[C0_2]] : tensor +// CHECK: %[[ARG1_DIM0:.*]] = tensor.dim %[[BUILTIN_ARG1]], %[[C0_2]] : tensor // CHECK: %[[LEGAL_SIZES:.*]] = cmpi eq, %[[ARG0_DIM1]], %[[ARG1_DIM0]] : index // CHECK: assert %[[LEGAL_SIZES]], "mismatched size for broadcast" // CHECK: %[[INIT_TENSOR:.*]] = linalg.init_tensor [%[[ARG0_DIM0]], %[[ARG0_DIM1]]] : tensor diff --git a/test/Dialect/TCP/bufferize.mlir b/test/Dialect/TCP/bufferize.mlir index c5ec67996..2a652b0c4 100644 --- a/test/Dialect/TCP/bufferize.mlir +++ b/test/Dialect/TCP/bufferize.mlir @@ -38,7 +38,7 @@ func @tcp_splatted(%arg0: f32, %arg1: tensor) -> tensor { // CHECK: %[[LOWER_EXTENT_D1:.*]] = tensor.extract %[[LOWER_EXPANSION]][%[[C0]]] : tensor // CHECK: %[[UPPER_EXTENT_D1:.*]] = tensor.extract %[[UPPER_EXPANSION]][%[[C0]]] : tensor // CHECK: %[[C0_0:.*]] = constant 0 : index -// CHECK: %[[D1:.*]] = memref.dim %[[TENSOR]], %[[C0_0]] : tensor +// CHECK: %[[D1:.*]] = tensor.dim %[[TENSOR]], %[[C0_0]] : tensor // CHECK: %[[D1_EXPANSION:.*]] = addi %[[LOWER_EXTENT_D1]], %[[UPPER_EXTENT_D1]] : index // CHECK: %[[D1_OUT:.*]] = addi %[[D1_EXPANSION]], %[[D1]] : index // CHECK: %[[D1_OUT_TENSOR:.*]] = tensor.from_elements %[[D1_OUT]] : tensor<1xindex> @@ -47,7 +47,7 @@ func @tcp_splatted(%arg0: f32, %arg1: tensor) -> tensor { // CHECK: %[[C0_1:.*]] = constant 0 : index // CHECK: %[[LOWER_EXTENT_D1_1:.*]] = tensor.extract %[[LOWER_EXPANSION]][%[[C0_1]]] : tensor // CHECK: %[[C0_2:.*]] = constant 0 : index -// CHECK: %[[D1_1:.*]] = memref.dim %[[TENSOR]], %[[C0_2]] : tensor +// CHECK: %[[D1_1:.*]] = tensor.dim %[[TENSOR]], %[[C0_2]] : tensor // CHECK: linalg.fill(%[[FILL_VAL]], %[[D1_OUT_MREF]]) : f32, memref // CHECK: %[[SUBVIEW:.*]] = memref.subview %[[D1_OUT_MREF]][%[[LOWER_EXTENT_D1_1]]] [%[[D1_1]]] [%[[C1]]] : memref to memref // CHECK: linalg.copy(%0, %[[SUBVIEW]]) : memref, memref diff --git a/tools/npcomp-run-mlir/npcomp-run-mlir.cpp b/tools/npcomp-run-mlir/npcomp-run-mlir.cpp index 42b5036c2..a5066399e 100644 --- a/tools/npcomp-run-mlir/npcomp-run-mlir.cpp +++ b/tools/npcomp-run-mlir/npcomp-run-mlir.cpp @@ -210,6 +210,12 @@ struct Options { } // namespace int main(int argc, char **argv) { + mlir::registerMLIRContextCLOptions(); + mlir::registerAsmPrinterCLOptions(); + mlir::registerPassManagerCLOptions(); + Options options; + llvm::cl::ParseCommandLineOptions(argc, argv, "npcomp compile+run utility\n"); + mlir::DialectRegistry registry; mlir::registerAllDialects(registry); mlir::registerAllPasses(); @@ -222,11 +228,6 @@ int main(int argc, char **argv) { llvm::InitLLVM y(argc, argv); npcompInitializeLLVMCodegen(); - mlir::registerAsmPrinterCLOptions(); - mlir::registerPassManagerCLOptions(); - Options options; - llvm::cl::ParseCommandLineOptions(argc, argv, "npcomp compile+run utility\n"); - SmallVector sharedLibs(options.sharedLibs.begin(), options.sharedLibs.end()); SmallVector argValues(options.argValues.begin(),