mirror of https://github.com/llvm/torch-mlir
Update LLVM Tag to 2c1fa734 (#1670)
Summary of changes: - Change ShapedType::kDynamicSize -> ShapedType::kDynamic - llvm::NoneType has been deprecated, change convertScalarToDtype to use llvm::Nonepull/1672/head
parent
88db99946b
commit
3fc27cf6ca
|
@ -120,8 +120,8 @@ LogicalResult ScanOp::verify() {
|
|||
}
|
||||
if (llvm::any_of(llvm::zip(expectedAccumulatorShape, accumulatorShape),
|
||||
[](std::tuple<int64_t, int64_t> s) {
|
||||
return std::get<0>(s) != ShapedType::kDynamicSize &&
|
||||
std::get<1>(s) != ShapedType::kDynamicSize &&
|
||||
return std::get<0>(s) != ShapedType::kDynamic &&
|
||||
std::get<1>(s) != ShapedType::kDynamic &&
|
||||
std::get<0>(s) != std::get<1>(s);
|
||||
})) {
|
||||
return emitOpError("incompatible input/accumulator shapes");
|
||||
|
@ -134,8 +134,8 @@ LogicalResult ScanOp::verify() {
|
|||
}
|
||||
if (llvm::any_of(llvm::zip(inputShapes, outputShapes),
|
||||
[](std::tuple<int64_t, int64_t> s) {
|
||||
return std::get<0>(s) != ShapedType::kDynamicSize &&
|
||||
std::get<1>(s) != ShapedType::kDynamicSize &&
|
||||
return std::get<0>(s) != ShapedType::kDynamic &&
|
||||
std::get<1>(s) != ShapedType::kDynamic &&
|
||||
std::get<0>(s) != std::get<1>(s);
|
||||
})) {
|
||||
return emitOpError("incompatible input/output shapes");
|
||||
|
@ -291,7 +291,7 @@ LogicalResult ScatterOp::verify() {
|
|||
return emitOpError("expected indices to be of rank 2 of i32 element type");
|
||||
}
|
||||
auto indexDepth = getIndexDepth();
|
||||
if (indexDepth == ShapedType::kDynamicSize) {
|
||||
if (indexDepth == ShapedType::kDynamic) {
|
||||
return emitOpError("expected index depth is static");
|
||||
}
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 147fe9de29dc13c14835127b35280c4d95c8e8ba
|
||||
Subproject commit 2c1fa734598c9470139720565fbf624a5156ec03
|
|
@ -1 +1 @@
|
|||
Subproject commit 1944b5fa6062ec4c065d726c9c5d64f1487ee8c5
|
||||
Subproject commit dabc58a95b8bcd1140ffb0062730b81745d41724
|
|
@ -84,7 +84,7 @@ SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
|
|||
// should be converted builtin types.
|
||||
Value convertScalarToDtype(
|
||||
OpBuilder &b, Location loc, Value scalar, Type dtype,
|
||||
llvm::Optional<Type> srcOriginalDtype = llvm::NoneType());
|
||||
llvm::Optional<Type> srcOriginalDtype = llvm::None);
|
||||
|
||||
// Return the number of elements of a tensor if the shape is static; otherwise,
|
||||
// return -1.
|
||||
|
|
|
@ -88,12 +88,12 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op,
|
|||
auto lhsContractingDimSize = lhsShape[lhsContractingDim];
|
||||
auto rhsContractingDimSize = rhsShape[rhsContractingDim];
|
||||
if (lhsContractingDimSize != rhsContractingDimSize) {
|
||||
if (lhsContractingDimSize == ShapedType::kDynamicSize &&
|
||||
if (lhsContractingDimSize == ShapedType::kDynamic &&
|
||||
rhsContractingDimSize >= 0) {
|
||||
lhsShape[lhsContractingDim] = rhsContractingDimSize;
|
||||
auto newRankTy = RankedTensorType::get(lhsShape, lhsTy.getElementType());
|
||||
lhs = rewriter.create<tensor::CastOp>(op->getLoc(), newRankTy, lhs);
|
||||
} else if (rhsContractingDimSize == ShapedType::kDynamicSize &&
|
||||
} else if (rhsContractingDimSize == ShapedType::kDynamic &&
|
||||
lhsContractingDimSize >= 0) {
|
||||
rhsShape[rhsContractingDim] = lhsContractingDimSize;
|
||||
auto newRankTy = RankedTensorType::get(rhsShape, rhsTy.getElementType());
|
||||
|
@ -112,7 +112,7 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op,
|
|||
break;
|
||||
if (k == rhsResultDim || k == rhsContractingDim)
|
||||
continue;
|
||||
if (outShape[b] == ShapedType::kDynamicSize && rhsShape[k] >= 0) {
|
||||
if (outShape[b] == ShapedType::kDynamic && rhsShape[k] >= 0) {
|
||||
outShape[b] = rhsShape[k];
|
||||
}
|
||||
b++;
|
||||
|
@ -477,7 +477,7 @@ public:
|
|||
weightShapeVec[0] = ICDivGValue;
|
||||
weightShapeVec.insert(weightShapeVec.begin(), GValue);
|
||||
|
||||
if (weightShapeInt[0] == ShapedType::kDynamicSize) {
|
||||
if (weightShapeInt[0] == ShapedType::kDynamic) {
|
||||
weightShapeInt.insert(weightShapeInt.begin(), groups);
|
||||
} else {
|
||||
weightShapeInt[0] /= groups;
|
||||
|
@ -499,7 +499,7 @@ public:
|
|||
|
||||
// 3. [IC//G, G, OC, H, W, ...] => [IC//G, G*OC, H, W, ...]
|
||||
weightShapeInt.erase(weightShapeInt.begin());
|
||||
if (weightShapeInt[1] != ShapedType::kDynamicSize) {
|
||||
if (weightShapeInt[1] != ShapedType::kDynamic) {
|
||||
weightShapeInt[1] *= groups;
|
||||
}
|
||||
weightShapeVec.erase(weightShapeVec.begin());
|
||||
|
@ -533,7 +533,7 @@ public:
|
|||
auto finalOutShape = outType.getShape();
|
||||
std::copy(finalOutShape.begin(), finalOutShape.end(), outShape.begin());
|
||||
for (int i = 2; i < nDims; ++i) {
|
||||
if (finalOutShape[i] == ShapedType::kDynamicSize)
|
||||
if (finalOutShape[i] == ShapedType::kDynamic)
|
||||
continue;
|
||||
outShape[i] = finalOutShape[i] - outputPadding[i - 2];
|
||||
}
|
||||
|
@ -741,11 +741,11 @@ public:
|
|||
|
||||
auto nSpatialDims = padding.size();
|
||||
auto nDims = inputTy.getRank();
|
||||
|
||||
|
||||
// Kernel size must be constant.
|
||||
auto weightShape = weightTy.getShape();
|
||||
for (int i = 2; i < nDims; ++i) {
|
||||
if (weightShape[i] == ShapedType::kDynamicSize) {
|
||||
if (weightShape[i] == ShapedType::kDynamic) {
|
||||
return rewriter.notifyMatchFailure(
|
||||
op, "only constant kernel size is supported");
|
||||
}
|
||||
|
|
|
@ -279,9 +279,9 @@ LogicalResult ConvertAtenOp<AtenMaxPool2dWithIndicesOp>::matchAndRewrite(
|
|||
|
||||
SmallVector<int64_t> initIndexShapeForType(inputShape.begin(),
|
||||
inputShape.end() - 2);
|
||||
if (inputShape[inputRank - 1] == ShapedType::kDynamicSize ||
|
||||
inputShape[inputRank - 2] == ShapedType::kDynamicSize) {
|
||||
initIndexShapeForType.push_back(ShapedType::kDynamicSize);
|
||||
if (inputShape[inputRank - 1] == ShapedType::kDynamic ||
|
||||
inputShape[inputRank - 2] == ShapedType::kDynamic) {
|
||||
initIndexShapeForType.push_back(ShapedType::kDynamic);
|
||||
} else {
|
||||
initIndexShapeForType.push_back(inputShape[inputRank - 1] *
|
||||
inputShape[inputRank - 2]);
|
||||
|
@ -501,7 +501,7 @@ LogicalResult ConvertAtenOp<AtenAvgPool2dOp>::matchAndRewrite(
|
|||
op->getLoc(),
|
||||
RankedTensorType::get(inputTy.getShape(), outTy.getElementType()),
|
||||
windowSizeConst, inputShapeTensor, rewriter.getI64TensorAttr({}));
|
||||
|
||||
|
||||
Value zero = createInitialValueForAtenPoolingOp(op, inputElemTy, rewriter);
|
||||
auto reduceWindowSize = rewriter.create<mhlo::ReduceWindowOp>(
|
||||
op->getLoc(), RankedTensorType::get(outShape, inputElemTy),
|
||||
|
|
|
@ -308,7 +308,7 @@ LogicalResult ConvertAtenOp<AtenSqueezeOp>::matchAndRewrite(
|
|||
dims.reserve(rank);
|
||||
for (int r = 0; r < rank; ++r) {
|
||||
auto dSize = selfTy.getShape()[r];
|
||||
if (dSize == ShapedType::kDynamicSize)
|
||||
if (dSize == ShapedType::kDynamic)
|
||||
return rewriter.notifyMatchFailure(
|
||||
op, "the size of the dimension being squeezed can't be unknown");
|
||||
if (dSize != 1)
|
||||
|
@ -353,7 +353,7 @@ LogicalResult ConvertAtenOp<AtenSqueezeDimOp>::matchAndRewrite(
|
|||
|
||||
dim = toPositiveDim(dim, rank);
|
||||
if (selfTy.getShape()[dim] != 1) {
|
||||
if (selfTy.getShape()[dim] == ShapedType::kDynamicSize)
|
||||
if (selfTy.getShape()[dim] == ShapedType::kDynamic)
|
||||
return rewriter.notifyMatchFailure(
|
||||
op, "the size of the dimension being squeezed is can't be unknown");
|
||||
|
||||
|
|
|
@ -331,21 +331,21 @@ int64_t getNumberOfElements(RankedTensorType inputType) {
|
|||
|
||||
SmallVector<int64_t> makeShapeLLVMCompatible(ArrayRef<int64_t> shape) {
|
||||
SmallVector<int64_t> updatedShape(shape);
|
||||
int64_t kDynamicSize = ShapedType::kDynamicSize;
|
||||
int64_t kDynamic = ShapedType::kDynamic;
|
||||
for (unsigned i = 0; i < shape.size(); i++) {
|
||||
assert(shape[i] >= 0 || shape[i] == kUnknownSize);
|
||||
if (shape[i] == kUnknownSize)
|
||||
updatedShape[i] = kDynamicSize;
|
||||
updatedShape[i] = kDynamic;
|
||||
}
|
||||
return updatedShape;
|
||||
}
|
||||
|
||||
SmallVector<int64_t> makeShapeTorchCompatible(ArrayRef<int64_t> shape) {
|
||||
SmallVector<int64_t> updatedShape(shape);
|
||||
int64_t kDynamicSize = ShapedType::kDynamicSize;
|
||||
int64_t kDynamic = ShapedType::kDynamic;
|
||||
for (unsigned i = 0; i < shape.size(); i++) {
|
||||
assert(shape[i] >= 0 || shape[i] == kDynamicSize);
|
||||
if (shape[i] == kDynamicSize)
|
||||
assert(shape[i] >= 0 || shape[i] == kDynamic);
|
||||
if (shape[i] == kDynamic)
|
||||
updatedShape[i] = kUnknownSize;
|
||||
}
|
||||
return updatedShape;
|
||||
|
|
Loading…
Reference in New Issue