2022-03-10 08:44:22 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "PassDetail.h"
|
|
|
|
|
2022-12-14 00:25:41 +08:00
|
|
|
#include "SimplifyAbstractInterpCalculationsUtils.h"
|
2022-03-10 08:44:22 +08:00
|
|
|
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/Transforms/Passes.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::torch;
|
|
|
|
using namespace mlir::torch::Torch;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class DecomposeAtenSizeOp : public OpRewritePattern<AtenSizeOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern::OpRewritePattern;
|
|
|
|
LogicalResult matchAndRewrite(AtenSizeOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
Location loc = op.getLoc();
|
2022-12-08 04:20:41 +08:00
|
|
|
Value self = op.getSelf();
|
2022-03-10 08:44:22 +08:00
|
|
|
MLIRContext *context = op.getContext();
|
|
|
|
auto tensorType = self.getType().cast<BaseTensorType>();
|
|
|
|
if (!tensorType.hasSizes())
|
|
|
|
return rewriter.notifyMatchFailure(op, "unranked tensor");
|
|
|
|
int64_t rank = tensorType.getSizes().size();
|
|
|
|
SmallVector<Value> sizes;
|
|
|
|
for (int i = 0; i < rank; i++) {
|
|
|
|
Value dim = rewriter.create<Torch::ConstantIntOp>(
|
|
|
|
loc, rewriter.getI64IntegerAttr(i));
|
|
|
|
sizes.push_back(rewriter.create<AtenSizeIntOp>(loc, self, dim));
|
|
|
|
}
|
|
|
|
|
|
|
|
Value sizeList = rewriter.create<PrimListConstructOp>(
|
|
|
|
loc, Torch::ListType::get(Torch::IntType::get(context)), sizes);
|
|
|
|
rewriter.replaceOp(op, sizeList);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-12-14 00:25:41 +08:00
|
|
|
static LogicalResult refineShapeCalculateResult(ShapeCalculateOp op,
|
|
|
|
int resultNum,
|
|
|
|
PatternRewriter &rewriter) {
|
|
|
|
auto yieldShapes = op.getCalculation().front().getTerminator();
|
2022-03-10 08:44:22 +08:00
|
|
|
auto shape = yieldShapes->getOperand(resultNum);
|
|
|
|
auto result = op->getResult(resultNum);
|
|
|
|
|
|
|
|
// If the yielded shape is not a list literal, we can't analyze it.
|
|
|
|
// AbstractlyInterpretListOpsWithinABlock should already have converted as
|
|
|
|
// much as possible to literals.
|
|
|
|
auto listConstruct = shape.getDefiningOp<PrimListConstructOp>();
|
|
|
|
if (!listConstruct)
|
2022-12-14 00:25:41 +08:00
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "Expected result from ShapeCalculateOp calculation to be a "
|
|
|
|
"`PrimListConstructOp`");
|
2022-03-10 08:44:22 +08:00
|
|
|
llvm::BitVector clobberedElements(listConstruct->getNumOperands());
|
|
|
|
// Analyze the users to determine if we can refine the shape.
|
|
|
|
for (Operation *user : listConstruct->getUsers()) {
|
|
|
|
// If an op doesn't mutate the list, then we can handle it.
|
|
|
|
if (!potentiallyMutatesListOperands(user))
|
|
|
|
continue;
|
|
|
|
// We can handle Aten_SetItemTOp specially, since we know that it doesn't
|
|
|
|
// change the size of the list. It might clobber some elements, which then
|
|
|
|
// become dimensions with unknown size.
|
|
|
|
if (auto setItem = dyn_cast<Aten_SetItemTOp>(user)) {
|
|
|
|
// If the index is statically known, we can clobber only a single index.
|
|
|
|
// Otherwise, we conservatively clobber all of them.
|
2022-12-20 18:17:27 +08:00
|
|
|
std::optional<int64_t> indexOpt = matchLegalConstantIndexIntoListOfSize(
|
2022-12-08 04:20:41 +08:00
|
|
|
setItem.getIdx(), listConstruct->getNumOperands());
|
2022-03-30 04:21:47 +08:00
|
|
|
if (indexOpt)
|
|
|
|
clobberedElements.set(*indexOpt);
|
|
|
|
else
|
2022-03-10 08:44:22 +08:00
|
|
|
clobberedElements.set();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// An unhandled op! We can't make any assumptions about the shape.
|
2022-12-14 00:25:41 +08:00
|
|
|
return rewriter.notifyMatchFailure(op, "Unhandled op that mutates lists");
|
2022-03-10 08:44:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the list of sizes implied by the yielded shape.
|
|
|
|
SmallVector<int64_t> sizes;
|
|
|
|
for (auto operand : llvm::enumerate(listConstruct->getOperands())) {
|
|
|
|
int64_t size;
|
|
|
|
if (matchPattern(operand.value(), m_TorchConstantInt(&size)) &&
|
|
|
|
!clobberedElements[operand.index()])
|
|
|
|
sizes.push_back(size);
|
|
|
|
else
|
|
|
|
sizes.push_back(kUnknownSize);
|
|
|
|
}
|
|
|
|
|
2022-12-14 00:25:41 +08:00
|
|
|
auto originalResultType = result.getType().cast<BaseTensorType>();
|
2022-03-10 08:44:22 +08:00
|
|
|
auto impliedTypesFromShape =
|
2022-12-14 00:25:41 +08:00
|
|
|
originalResultType.cast<BaseTensorType>()
|
2023-01-25 09:29:42 +08:00
|
|
|
.getWithSizesAndDtype(ArrayRef(sizes),
|
2022-12-14 00:25:41 +08:00
|
|
|
originalResultType.getOptionalDtype())
|
|
|
|
.cast<BaseTensorType>();
|
2022-03-10 08:44:22 +08:00
|
|
|
|
2022-12-14 00:25:41 +08:00
|
|
|
return updateCalculateOpResultTypes(op, resultNum, impliedTypesFromShape,
|
|
|
|
rewriter);
|
2022-03-10 08:44:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// This pattern propagates information out of the shape calculation region and
|
|
|
|
// into the ShapeCalculateOp result types.
|
|
|
|
class RefineShapeCalculateOp : public OpRewritePattern<ShapeCalculateOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern::OpRewritePattern;
|
|
|
|
LogicalResult matchAndRewrite(ShapeCalculateOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2022-12-14 00:25:41 +08:00
|
|
|
LogicalResult result = failure();
|
2022-03-10 08:44:22 +08:00
|
|
|
for (int i = 0, e = op->getNumResults(); i != e; i++)
|
2022-12-14 00:25:41 +08:00
|
|
|
if (succeeded(refineShapeCalculateResult(op, i, rewriter)))
|
|
|
|
result = success();
|
|
|
|
return result;
|
2022-03-10 08:44:22 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class SimplifyShapeCalculationsPass
|
|
|
|
: public SimplifyShapeCalculationsBase<SimplifyShapeCalculationsPass> {
|
|
|
|
void runOnOperation() override {
|
|
|
|
MLIRContext *context = &getContext();
|
|
|
|
|
|
|
|
RewritePatternSet patterns(context);
|
2023-05-13 04:40:45 +08:00
|
|
|
populateFullyUnrollPrimLoopOpPattern(patterns, context);
|
|
|
|
populateAbstractlyInterpretListOpsWithinABlockPattern(patterns, context);
|
|
|
|
populateFoldPrimUncheckedCastOpPattern(patterns, context);
|
2022-03-10 08:44:22 +08:00
|
|
|
patterns.insert<DecomposeAtenSizeOp>(context);
|
|
|
|
patterns.insert<RefineShapeCalculateOp>(context);
|
|
|
|
|
|
|
|
PrimIfOp::getCanonicalizationPatterns(patterns, context);
|
|
|
|
Aten__Getitem__TOp::getCanonicalizationPatterns(patterns, context);
|
|
|
|
AtenSizeOp::getCanonicalizationPatterns(patterns, context);
|
|
|
|
AtenLenTOp::getCanonicalizationPatterns(patterns, context);
|
2022-07-29 07:00:02 +08:00
|
|
|
AtenAddTOp::getCanonicalizationPatterns(patterns, context);
|
2022-03-10 08:44:22 +08:00
|
|
|
|
|
|
|
// TODO: Debug visitation order to make this more efficient.
|
|
|
|
// A single linear scan should suffice.
|
|
|
|
GreedyRewriteConfig config;
|
|
|
|
config.useTopDownTraversal = true;
|
2023-01-11 07:07:19 +08:00
|
|
|
config.maxIterations = GreedyRewriteConfig::kNoLimit;
|
2022-03-10 08:44:22 +08:00
|
|
|
if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns),
|
|
|
|
config))) {
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-04-27 03:27:51 +08:00
|
|
|
std::unique_ptr<OperationPass<func::FuncOp>>
|
2022-03-10 08:44:22 +08:00
|
|
|
mlir::torch::Torch::createSimplifyShapeCalculationsPass() {
|
|
|
|
return std::make_unique<SimplifyShapeCalculationsPass>();
|
|
|
|
}
|