diff --git a/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp b/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp index f9339301f..89726a21e 100644 --- a/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp +++ b/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp @@ -120,8 +120,8 @@ LogicalResult ScanOp::verify() { } if (llvm::any_of(llvm::zip(expectedAccumulatorShape, accumulatorShape), [](std::tuple s) { - return std::get<0>(s) != ShapedType::kDynamicSize && - std::get<1>(s) != ShapedType::kDynamicSize && + return std::get<0>(s) != ShapedType::kDynamic && + std::get<1>(s) != ShapedType::kDynamic && std::get<0>(s) != std::get<1>(s); })) { return emitOpError("incompatible input/accumulator shapes"); @@ -134,8 +134,8 @@ LogicalResult ScanOp::verify() { } if (llvm::any_of(llvm::zip(inputShapes, outputShapes), [](std::tuple s) { - return std::get<0>(s) != ShapedType::kDynamicSize && - std::get<1>(s) != ShapedType::kDynamicSize && + return std::get<0>(s) != ShapedType::kDynamic && + std::get<1>(s) != ShapedType::kDynamic && std::get<0>(s) != std::get<1>(s); })) { return emitOpError("incompatible input/output shapes"); @@ -291,7 +291,7 @@ LogicalResult ScatterOp::verify() { return emitOpError("expected indices to be of rank 2 of i32 element type"); } auto indexDepth = getIndexDepth(); - if (indexDepth == ShapedType::kDynamicSize) { + if (indexDepth == ShapedType::kDynamic) { return emitOpError("expected index depth is static"); } diff --git a/externals/llvm-project b/externals/llvm-project index 147fe9de2..2c1fa7345 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 147fe9de29dc13c14835127b35280c4d95c8e8ba +Subproject commit 2c1fa734598c9470139720565fbf624a5156ec03 diff --git a/externals/mlir-hlo b/externals/mlir-hlo index 1944b5fa6..dabc58a95 160000 --- a/externals/mlir-hlo +++ b/externals/mlir-hlo @@ -1 +1 @@ -Subproject commit 1944b5fa6062ec4c065d726c9c5d64f1487ee8c5 +Subproject commit dabc58a95b8bcd1140ffb0062730b81745d41724 diff --git a/include/torch-mlir/Conversion/Utils/Utils.h b/include/torch-mlir/Conversion/Utils/Utils.h index 5a41311e2..ca967c05e 100644 --- a/include/torch-mlir/Conversion/Utils/Utils.h +++ b/include/torch-mlir/Conversion/Utils/Utils.h @@ -84,7 +84,7 @@ SmallVector getTypeConvertedValues(OpBuilder &b, Location loc, // should be converted builtin types. Value convertScalarToDtype( OpBuilder &b, Location loc, Value scalar, Type dtype, - llvm::Optional srcOriginalDtype = llvm::NoneType()); + llvm::Optional srcOriginalDtype = llvm::None); // Return the number of elements of a tensor if the shape is static; otherwise, // return -1. diff --git a/lib/Conversion/TorchToMhlo/Linear.cpp b/lib/Conversion/TorchToMhlo/Linear.cpp index a62d5edbc..d124e13ea 100644 --- a/lib/Conversion/TorchToMhlo/Linear.cpp +++ b/lib/Conversion/TorchToMhlo/Linear.cpp @@ -88,12 +88,12 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op, auto lhsContractingDimSize = lhsShape[lhsContractingDim]; auto rhsContractingDimSize = rhsShape[rhsContractingDim]; if (lhsContractingDimSize != rhsContractingDimSize) { - if (lhsContractingDimSize == ShapedType::kDynamicSize && + if (lhsContractingDimSize == ShapedType::kDynamic && rhsContractingDimSize >= 0) { lhsShape[lhsContractingDim] = rhsContractingDimSize; auto newRankTy = RankedTensorType::get(lhsShape, lhsTy.getElementType()); lhs = rewriter.create(op->getLoc(), newRankTy, lhs); - } else if (rhsContractingDimSize == ShapedType::kDynamicSize && + } else if (rhsContractingDimSize == ShapedType::kDynamic && lhsContractingDimSize >= 0) { rhsShape[rhsContractingDim] = lhsContractingDimSize; auto newRankTy = RankedTensorType::get(rhsShape, rhsTy.getElementType()); @@ -112,7 +112,7 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op, break; if (k == rhsResultDim || k == rhsContractingDim) continue; - if (outShape[b] == ShapedType::kDynamicSize && rhsShape[k] >= 0) { + if (outShape[b] == ShapedType::kDynamic && rhsShape[k] >= 0) { outShape[b] = rhsShape[k]; } b++; @@ -477,7 +477,7 @@ public: weightShapeVec[0] = ICDivGValue; weightShapeVec.insert(weightShapeVec.begin(), GValue); - if (weightShapeInt[0] == ShapedType::kDynamicSize) { + if (weightShapeInt[0] == ShapedType::kDynamic) { weightShapeInt.insert(weightShapeInt.begin(), groups); } else { weightShapeInt[0] /= groups; @@ -499,7 +499,7 @@ public: // 3. [IC//G, G, OC, H, W, ...] => [IC//G, G*OC, H, W, ...] weightShapeInt.erase(weightShapeInt.begin()); - if (weightShapeInt[1] != ShapedType::kDynamicSize) { + if (weightShapeInt[1] != ShapedType::kDynamic) { weightShapeInt[1] *= groups; } weightShapeVec.erase(weightShapeVec.begin()); @@ -533,7 +533,7 @@ public: auto finalOutShape = outType.getShape(); std::copy(finalOutShape.begin(), finalOutShape.end(), outShape.begin()); for (int i = 2; i < nDims; ++i) { - if (finalOutShape[i] == ShapedType::kDynamicSize) + if (finalOutShape[i] == ShapedType::kDynamic) continue; outShape[i] = finalOutShape[i] - outputPadding[i - 2]; } @@ -741,11 +741,11 @@ public: auto nSpatialDims = padding.size(); auto nDims = inputTy.getRank(); - + // Kernel size must be constant. auto weightShape = weightTy.getShape(); for (int i = 2; i < nDims; ++i) { - if (weightShape[i] == ShapedType::kDynamicSize) { + if (weightShape[i] == ShapedType::kDynamic) { return rewriter.notifyMatchFailure( op, "only constant kernel size is supported"); } diff --git a/lib/Conversion/TorchToMhlo/Pooling.cpp b/lib/Conversion/TorchToMhlo/Pooling.cpp index b32aeea9a..b17f3c61f 100644 --- a/lib/Conversion/TorchToMhlo/Pooling.cpp +++ b/lib/Conversion/TorchToMhlo/Pooling.cpp @@ -279,9 +279,9 @@ LogicalResult ConvertAtenOp::matchAndRewrite( SmallVector initIndexShapeForType(inputShape.begin(), inputShape.end() - 2); - if (inputShape[inputRank - 1] == ShapedType::kDynamicSize || - inputShape[inputRank - 2] == ShapedType::kDynamicSize) { - initIndexShapeForType.push_back(ShapedType::kDynamicSize); + if (inputShape[inputRank - 1] == ShapedType::kDynamic || + inputShape[inputRank - 2] == ShapedType::kDynamic) { + initIndexShapeForType.push_back(ShapedType::kDynamic); } else { initIndexShapeForType.push_back(inputShape[inputRank - 1] * inputShape[inputRank - 2]); @@ -501,7 +501,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( op->getLoc(), RankedTensorType::get(inputTy.getShape(), outTy.getElementType()), windowSizeConst, inputShapeTensor, rewriter.getI64TensorAttr({})); - + Value zero = createInitialValueForAtenPoolingOp(op, inputElemTy, rewriter); auto reduceWindowSize = rewriter.create( op->getLoc(), RankedTensorType::get(outShape, inputElemTy), diff --git a/lib/Conversion/TorchToMhlo/ViewLike.cpp b/lib/Conversion/TorchToMhlo/ViewLike.cpp index a28667cd1..e8102dcec 100644 --- a/lib/Conversion/TorchToMhlo/ViewLike.cpp +++ b/lib/Conversion/TorchToMhlo/ViewLike.cpp @@ -308,7 +308,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( dims.reserve(rank); for (int r = 0; r < rank; ++r) { auto dSize = selfTy.getShape()[r]; - if (dSize == ShapedType::kDynamicSize) + if (dSize == ShapedType::kDynamic) return rewriter.notifyMatchFailure( op, "the size of the dimension being squeezed can't be unknown"); if (dSize != 1) @@ -353,7 +353,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( dim = toPositiveDim(dim, rank); if (selfTy.getShape()[dim] != 1) { - if (selfTy.getShape()[dim] == ShapedType::kDynamicSize) + if (selfTy.getShape()[dim] == ShapedType::kDynamic) return rewriter.notifyMatchFailure( op, "the size of the dimension being squeezed is can't be unknown"); diff --git a/lib/Conversion/Utils/Utils.cpp b/lib/Conversion/Utils/Utils.cpp index cc2b164c2..8def43845 100644 --- a/lib/Conversion/Utils/Utils.cpp +++ b/lib/Conversion/Utils/Utils.cpp @@ -331,21 +331,21 @@ int64_t getNumberOfElements(RankedTensorType inputType) { SmallVector makeShapeLLVMCompatible(ArrayRef shape) { SmallVector updatedShape(shape); - int64_t kDynamicSize = ShapedType::kDynamicSize; + int64_t kDynamic = ShapedType::kDynamic; for (unsigned i = 0; i < shape.size(); i++) { assert(shape[i] >= 0 || shape[i] == kUnknownSize); if (shape[i] == kUnknownSize) - updatedShape[i] = kDynamicSize; + updatedShape[i] = kDynamic; } return updatedShape; } SmallVector makeShapeTorchCompatible(ArrayRef shape) { SmallVector updatedShape(shape); - int64_t kDynamicSize = ShapedType::kDynamicSize; + int64_t kDynamic = ShapedType::kDynamic; for (unsigned i = 0; i < shape.size(); i++) { - assert(shape[i] >= 0 || shape[i] == kDynamicSize); - if (shape[i] == kDynamicSize) + assert(shape[i] >= 0 || shape[i] == kDynamic); + if (shape[i] == kDynamic) updatedShape[i] = kUnknownSize; } return updatedShape;