torch-mlir/lib/Conversion/TorchToStd/TorchToStd.cpp

122 lines
4.5 KiB
C++

//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "npcomp/Conversion/TorchToStd/TorchToStd.h"
#include "../PassDetail.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Dialect/Traits.h"
#include "mlir/Transforms/DialectConversion.h"
#include "npcomp/Dialect/Torch/IR/TorchDialect.h"
#include "npcomp/Dialect/Torch/IR/TorchOps.h"
#include "npcomp/Dialect/Torch/Transforms/BackendTypeConversion.h"
using namespace mlir;
using namespace mlir::NPCOMP;
using namespace mlir::NPCOMP::Torch;
// -----------------------------------------------------------------------------
// Patterns (as this grows, it should be organized into multiple files)
// -----------------------------------------------------------------------------
// This is going to eventually be O(#torch operators), which is in the 100s.
namespace {
// Note: Confusingly, ATen's "dim" means "number of dimensions" which is what
// MLIR calls "rank".
class ConvertAtenDimOp : public OpConversionPattern<AtenDimOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(AtenDimOp op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
auto rank = rewriter.create<RankOp>(op->getLoc(), operands[0]);
rewriter.replaceOpWithNewOp<IndexCastOp>(
op, getTypeConverter()->convertType(op.getType()), rank);
return success();
}
};
} // namespace
namespace {
class ConvertAtenNeIntOp : public OpConversionPattern<AtenNeIntOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(AtenNeIntOp op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<CmpIOp>(op, CmpIPredicate::ne, operands[0],
operands[1]);
return success();
}
};
} // namespace
namespace {
class ConvertAtenGtIntOp : public OpConversionPattern<AtenGtIntOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(AtenGtIntOp op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<CmpIOp>(op, CmpIPredicate::sgt, operands[0],
operands[1]);
return success();
}
};
} // namespace
LogicalResult convertTensorOp(TensorOp op, PatternRewriter &rewriter) {
auto constant = rewriter.create<ConstantOp>(op->getLoc(), op.value());
auto vtensor = rewriter.create<FromBuiltinTensorOp>(op->getLoc(), constant);
Value result = copyTensorToType(rewriter, op->getLoc(),
op.getType().cast<BaseTensorType>(), vtensor);
rewriter.replaceOp(op, {result});
return success();
}
// -----------------------------------------------------------------------------
// The pass
// -----------------------------------------------------------------------------
namespace {
class ConvertTorchToStd : public ConvertTorchToStdBase<ConvertTorchToStd> {
public:
void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<StandardOpsDialect>();
}
void runOnOperation() override {
MLIRContext *context = &getContext();
ConversionTarget target(*context);
target.addLegalDialect<Torch::TorchDialect, StandardOpsDialect>();
TypeConverter typeConverter;
typeConverter.addConversion([](Type type) { return type; });
setupBackendTypeConversion(target, typeConverter);
RewritePatternSet patterns(context);
target.addIllegalOp<AtenDimOp>();
patterns.add<ConvertAtenDimOp>(typeConverter, context);
target.addIllegalOp<AtenNeIntOp>();
patterns.add<ConvertAtenNeIntOp>(typeConverter, context);
target.addIllegalOp<AtenGtIntOp>();
patterns.add<ConvertAtenGtIntOp>(typeConverter, context);
target.addIllegalOp<TensorOp>();
patterns.add(convertTensorOp);
if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
return signalPassFailure();
}
};
} // namespace
std::unique_ptr<OperationPass<FuncOp>>
mlir::NPCOMP::createConvertTorchToStdPass() {
return std::make_unique<ConvertTorchToStd>();
}