2021-09-23 00:55:09 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2021-09-30 00:03:40 +08:00
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
2021-09-23 00:55:09 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The torch-mlir "reference backend" requires a few passes to glue things
|
|
|
|
// together so that the final IR will work with ExecutionEngine.
|
|
|
|
//
|
|
|
|
// There is no actual "backend".
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "PassDetail.h"
|
2022-10-05 21:28:06 +08:00
|
|
|
#include "mlir/Dialect/Arith/IR/Arith.h"
|
2022-11-24 12:33:47 +08:00
|
|
|
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
|
2022-03-16 18:44:23 +08:00
|
|
|
#include "mlir/Dialect/Func/IR/FuncOps.h"
|
2022-02-13 02:47:12 +08:00
|
|
|
#include "mlir/Dialect/Linalg/IR/Linalg.h"
|
2022-03-16 18:44:23 +08:00
|
|
|
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
|
2023-12-16 04:45:32 +08:00
|
|
|
#include "mlir/Dialect/MLProgram/IR/MLProgram.h"
|
2021-09-23 00:55:09 +08:00
|
|
|
#include "mlir/Dialect/Math/IR/Math.h"
|
2021-10-26 07:16:01 +08:00
|
|
|
#include "mlir/Dialect/Math/Transforms/Approximation.h"
|
2021-09-23 00:55:09 +08:00
|
|
|
#include "mlir/Dialect/Math/Transforms/Passes.h"
|
2023-12-16 04:45:32 +08:00
|
|
|
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
|
|
|
#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
|
2021-09-23 00:55:09 +08:00
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
2022-02-13 02:47:12 +08:00
|
|
|
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
|
2022-01-12 09:59:42 +08:00
|
|
|
#include "torch-mlir/Dialect/TorchConversion/IR/TorchConversionOps.h"
|
2022-02-13 02:47:12 +08:00
|
|
|
#include "torch-mlir/Dialect/TorchConversion/Transforms/BackendTypeConversion.h"
|
2021-09-23 00:55:09 +08:00
|
|
|
#include "torch-mlir/RefBackend/Passes.h"
|
2021-11-08 23:56:40 +08:00
|
|
|
#include <numeric>
|
2022-01-12 09:59:42 +08:00
|
|
|
#include <set>
|
2021-09-23 00:55:09 +08:00
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::torch;
|
|
|
|
using namespace mlir::torch::RefBackend;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Pass registration
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
#define GEN_PASS_REGISTRATION
|
|
|
|
#include "torch-mlir/RefBackend/Passes.h.inc"
|
|
|
|
} // end namespace
|
|
|
|
|
|
|
|
void mlir::torch::RefBackend::registerRefBackendPasses() { ::registerPasses(); }
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MungeCallingConventions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-09-24 03:22:28 +08:00
|
|
|
static bool isArgMemRefTypeValid(Type type) {
|
2024-04-11 21:47:35 +08:00
|
|
|
if (auto memRefType = dyn_cast<MemRefType>(type)) {
|
2021-09-24 03:22:28 +08:00
|
|
|
Type elemTy = memRefType.getElementType();
|
2022-12-01 23:46:17 +08:00
|
|
|
if (elemTy.isa<Float16Type, Float32Type, Float64Type>()) {
|
2021-10-16 06:23:59 +08:00
|
|
|
return true;
|
2024-04-11 21:47:35 +08:00
|
|
|
} else if (auto integerTy = dyn_cast<IntegerType>(elemTy)) {
|
2021-09-24 03:22:28 +08:00
|
|
|
if (integerTy.isSignlessInteger(64))
|
|
|
|
return true;
|
2021-10-16 06:23:40 +08:00
|
|
|
if (integerTy.isSignlessInteger(32))
|
|
|
|
return true;
|
2022-09-20 02:50:51 +08:00
|
|
|
if (integerTy.isSignlessInteger(8))
|
|
|
|
return true;
|
|
|
|
if (integerTy.isSignedInteger(8))
|
|
|
|
return true;
|
2021-12-08 22:05:02 +08:00
|
|
|
if (integerTy.isSignlessInteger(1))
|
|
|
|
return true;
|
2024-04-11 21:47:35 +08:00
|
|
|
} else if (auto complexTy = dyn_cast<ComplexType>(elemTy)) {
|
2023-05-05 01:51:48 +08:00
|
|
|
return complexTy.getElementType().isa<Float32Type, Float64Type>();
|
2021-09-24 03:22:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-04-27 03:27:51 +08:00
|
|
|
static void addEmitCInterfaceAttr(func::FuncOp func) {
|
2021-09-23 00:55:09 +08:00
|
|
|
func->setAttr("llvm.emit_c_interface", UnitAttr::get(func.getContext()));
|
|
|
|
}
|
|
|
|
|
|
|
|
static Type getAbiTypeForMemRef(Type type) {
|
2024-04-11 21:47:35 +08:00
|
|
|
return UnrankedMemRefType::get(cast<MemRefType>(type).getElementType(), 0);
|
2021-09-23 00:55:09 +08:00
|
|
|
}
|
|
|
|
|
2021-11-08 23:56:40 +08:00
|
|
|
// Helper function to get the type string for one return value like i32, f64,
|
|
|
|
// mri32 etc. The strings from multiple return values are concatenated to get
|
|
|
|
// the consumeFuncReturnFunc name.
|
|
|
|
static std::string getTypeToken(Type type) {
|
|
|
|
if (type.isSignlessInteger())
|
|
|
|
return ("i" + Twine(type.getIntOrFloatBitWidth())).str();
|
2024-04-11 21:47:35 +08:00
|
|
|
else if (isa<mlir::FloatType>(type))
|
2021-11-08 23:56:40 +08:00
|
|
|
return ("f" + Twine(type.getIntOrFloatBitWidth())).str();
|
2024-04-11 21:47:35 +08:00
|
|
|
else if (auto complexTy = dyn_cast<mlir::ComplexType>(type))
|
2023-05-05 01:51:48 +08:00
|
|
|
return ("c" + Twine(complexTy.getElementType().getIntOrFloatBitWidth()))
|
|
|
|
.str();
|
2024-04-11 21:47:35 +08:00
|
|
|
else if (auto memRefType = dyn_cast<UnrankedMemRefType>(type))
|
2021-11-08 23:56:40 +08:00
|
|
|
return "mr" + getTypeToken(memRefType.getElementType());
|
|
|
|
|
|
|
|
llvm_unreachable(
|
|
|
|
"Type token should handle all types: memref, float and int type");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Systematically derive the consumeFuncReturnFunc name from return value types.
|
|
|
|
static std::string getConsumeReturnFunctionNameForReturnTypes(TypeRange types) {
|
|
|
|
SmallVector<std::string> tokens = {"refbackend_consume_func_return"};
|
|
|
|
for (auto type : types)
|
|
|
|
tokens.push_back(getTypeToken(type));
|
|
|
|
|
|
|
|
return std::accumulate(tokens.begin(), tokens.end(), std::string(),
|
|
|
|
[](std::string &a, std::string &b) {
|
|
|
|
return a.empty() ? b : (a + "_" + b);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace the original returnOp with a call to consumeFuncReturnFunc and add
|
|
|
|
// the op to the `toErase` vector.
|
2022-03-16 18:44:23 +08:00
|
|
|
static void replaceReturnWithCall(OpBuilder b, func::ReturnOp op,
|
|
|
|
StringRef funcName, TypeRange retTypes,
|
2021-11-08 23:56:40 +08:00
|
|
|
SmallVectorImpl<Value> &vals,
|
2021-11-07 03:25:06 +08:00
|
|
|
SmallVectorImpl<Operation *> &toErase) {
|
2022-03-16 18:44:23 +08:00
|
|
|
b.create<mlir::func::CallOp>(op.getLoc(), funcName, TypeRange({}), vals);
|
|
|
|
b.create<mlir::func::ReturnOp>(op.getLoc());
|
2021-11-07 03:25:06 +08:00
|
|
|
toErase.push_back(op);
|
|
|
|
}
|
|
|
|
|
2021-10-05 10:06:59 +08:00
|
|
|
static LogicalResult mungeFunction(
|
2022-04-27 03:27:51 +08:00
|
|
|
func::FuncOp func,
|
2021-11-08 23:56:40 +08:00
|
|
|
std::map<std::string, std::vector<Type>> &invokedConsumeFuncReturnFuncs) {
|
2022-01-12 09:59:42 +08:00
|
|
|
// Only need to call mungeFunction for functions callable from outside of the
|
|
|
|
// module.
|
|
|
|
if (func.isPrivate())
|
|
|
|
return success();
|
2021-09-23 00:55:09 +08:00
|
|
|
// Add `llvm.emit_c_interface`.
|
|
|
|
// This allows ExecutionEngine to resolve the symbol properly.
|
|
|
|
addEmitCInterfaceAttr(func);
|
|
|
|
|
|
|
|
// Rewrite the function as follows:
|
|
|
|
// - replace all memref arguments with unranked memref
|
|
|
|
// - replace all returns with a call to a function, which is going to be
|
|
|
|
// supplied by the code setting up the ExecutionEngine to process the
|
|
|
|
// result. Additionally, ensure that all results are passed as unranked
|
|
|
|
// memrefs.
|
|
|
|
// - replace the function signature accordingly (unranked inputs, no returns).
|
|
|
|
OpBuilder b(func.getBody());
|
|
|
|
|
|
|
|
SmallVector<Type> newArgTypes;
|
|
|
|
for (auto arg : func.getArguments()) {
|
|
|
|
auto type = arg.getType();
|
2022-11-15 23:25:39 +08:00
|
|
|
if (!isArgMemRefTypeValid(type)) {
|
|
|
|
return emitError(arg.getLoc())
|
2023-05-05 01:51:48 +08:00
|
|
|
.append("argument must be a memref of f32, f64, i32, i64, i8, i1, "
|
|
|
|
"c32, c64, but "
|
2022-11-15 23:25:39 +08:00
|
|
|
"got ",
|
|
|
|
type);
|
|
|
|
}
|
2022-02-13 02:47:12 +08:00
|
|
|
auto cast = b.create<memref::CastOp>(arg.getLoc(), type, arg);
|
2021-09-23 00:55:09 +08:00
|
|
|
arg.replaceAllUsesExcept(cast, cast);
|
|
|
|
arg.setType(getAbiTypeForMemRef(type));
|
|
|
|
newArgTypes.push_back(arg.getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<Operation *> toErase;
|
2022-03-16 18:44:23 +08:00
|
|
|
func.walk([&](func::ReturnOp op) {
|
2021-11-08 23:56:40 +08:00
|
|
|
auto types = op.getOperandTypes();
|
2021-09-23 00:55:09 +08:00
|
|
|
b.setInsertionPoint(op);
|
2021-11-07 03:25:06 +08:00
|
|
|
// Memref Types.
|
2021-11-08 23:56:40 +08:00
|
|
|
std::vector<Type> retTypes;
|
|
|
|
SmallVector<Value> retVals;
|
|
|
|
for (auto en : llvm::enumerate(types)) {
|
|
|
|
Type retType = en.value();
|
|
|
|
Value retVal = op.getOperand(en.index());
|
2024-04-11 21:47:35 +08:00
|
|
|
if (auto memrefReturnType = dyn_cast<MemRefType>(retType)) {
|
2021-11-08 23:56:40 +08:00
|
|
|
auto elemType = memrefReturnType.getElementType();
|
|
|
|
retType = UnrankedMemRefType::get(elemType, 0);
|
|
|
|
// Cast to unranked memref type before sending it as a function
|
|
|
|
// argument.
|
|
|
|
retVal = b.create<memref::CastOp>(
|
2022-02-13 02:47:12 +08:00
|
|
|
op.getLoc(), getAbiTypeForMemRef(types[en.index()]), retVal);
|
2021-11-08 23:56:40 +08:00
|
|
|
}
|
|
|
|
retTypes.push_back(retType);
|
|
|
|
retVals.push_back(retVal);
|
2021-11-07 03:25:06 +08:00
|
|
|
}
|
2021-11-08 23:56:40 +08:00
|
|
|
|
|
|
|
std::string funcName = getConsumeReturnFunctionNameForReturnTypes(retTypes);
|
|
|
|
|
|
|
|
auto invokedFuncsEnd = invokedConsumeFuncReturnFuncs.end();
|
|
|
|
if (invokedConsumeFuncReturnFuncs.find(funcName) == invokedFuncsEnd)
|
|
|
|
invokedConsumeFuncReturnFuncs.insert({funcName, retTypes});
|
|
|
|
replaceReturnWithCall(b, op, funcName, retTypes, retVals, toErase);
|
2021-09-23 00:55:09 +08:00
|
|
|
});
|
|
|
|
func.setType(FunctionType::get(func.getContext(), newArgTypes, {}));
|
|
|
|
for (Operation *op : toErase)
|
|
|
|
op->erase();
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class MungeCallingConventions
|
|
|
|
: public MungeCallingConventionsBase<MungeCallingConventions> {
|
|
|
|
void runOnOperation() override {
|
|
|
|
auto module = getOperation();
|
|
|
|
OpBuilder b(module.getBodyRegion());
|
2021-11-08 23:56:40 +08:00
|
|
|
std::map<std::string, std::vector<Type>> invokedConsumeFuncReturnFuncs;
|
2022-04-27 03:27:51 +08:00
|
|
|
for (auto func : module.getOps<func::FuncOp>()) {
|
2022-04-19 21:47:47 +08:00
|
|
|
if (failed(mungeFunction(func, invokedConsumeFuncReturnFuncs)))
|
2021-09-23 00:55:09 +08:00
|
|
|
return signalPassFailure();
|
|
|
|
}
|
2021-11-08 23:56:40 +08:00
|
|
|
|
|
|
|
// Create FuncOp for consumeFuncReturnFuncs that are used.
|
|
|
|
for (auto &p : invokedConsumeFuncReturnFuncs) {
|
2022-04-27 03:27:51 +08:00
|
|
|
auto consumeFuncReturnFunc = b.create<func::FuncOp>(
|
|
|
|
module.getLoc(), p.first,
|
2022-12-14 16:06:39 +08:00
|
|
|
FunctionType::get(module.getContext(), p.second, {}));
|
|
|
|
consumeFuncReturnFunc.setPrivate();
|
2021-11-08 23:56:40 +08:00
|
|
|
addEmitCInterfaceAttr(consumeFuncReturnFunc);
|
|
|
|
}
|
2021-09-23 00:55:09 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<ModuleOp>>
|
|
|
|
mlir::torch::RefBackend::createMungeCallingConventionsPass() {
|
|
|
|
return std::make_unique<MungeCallingConventions>();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2022-11-24 12:33:47 +08:00
|
|
|
// MLProgramBufferize
|
2022-01-12 09:59:42 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-11-24 12:33:47 +08:00
|
|
|
static LogicalResult bufferizeMLProgramGlobalOp(ml_program::GlobalOp globalOp,
|
|
|
|
OpBuilder &b) {
|
|
|
|
if (!globalOp.getValue().has_value())
|
|
|
|
return globalOp.emitError("global op must have a value");
|
|
|
|
|
2024-04-28 05:00:56 +08:00
|
|
|
RankedTensorType tensorType = cast<RankedTensorType>(globalOp.getType());
|
2022-11-24 12:33:47 +08:00
|
|
|
MemRefType memrefType =
|
|
|
|
MemRefType::get(tensorType.getShape(), tensorType.getElementType());
|
2022-01-12 09:59:42 +08:00
|
|
|
|
2022-11-24 12:33:47 +08:00
|
|
|
b.setInsertionPointToStart(globalOp->getParentOfType<ModuleOp>().getBody());
|
2022-01-12 09:59:42 +08:00
|
|
|
b.create<memref::GlobalOp>(
|
2022-11-24 12:33:47 +08:00
|
|
|
UnknownLoc::get(b.getContext()), globalOp.getSymName(),
|
|
|
|
/*sym_visibility=*/globalOp.getSymVisibilityAttr(),
|
|
|
|
/*type=*/memrefType,
|
|
|
|
/*initial_value=*/globalOp.getValue().value(),
|
|
|
|
/*constant=*/globalOp.getIsMutable() ? false : true,
|
2022-01-12 09:59:42 +08:00
|
|
|
/*alignment=*/nullptr);
|
2022-11-24 12:33:47 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult
|
|
|
|
bufferizeMLProgramGlobaLoadOp(ml_program::GlobalLoadOp globalLoadOp,
|
|
|
|
OpBuilder &b, SmallVector<Operation *> &toErase) {
|
2024-04-28 05:00:56 +08:00
|
|
|
RankedTensorType tensorType = cast<RankedTensorType>(globalLoadOp.getType());
|
2022-11-24 12:33:47 +08:00
|
|
|
MemRefType memrefType =
|
|
|
|
MemRefType::get(tensorType.getShape(), tensorType.getElementType());
|
|
|
|
|
|
|
|
b.setInsertionPoint(globalLoadOp);
|
|
|
|
Value globalVal = b.create<memref::GetGlobalOp>(
|
|
|
|
globalLoadOp.getLoc(), memrefType,
|
|
|
|
globalLoadOp.getGlobalAttr().getLeafReference());
|
|
|
|
globalVal = b.create<bufferization::ToTensorOp>(globalLoadOp->getLoc(),
|
|
|
|
tensorType, globalVal);
|
|
|
|
globalLoadOp->getResult(0).replaceAllUsesWith(globalVal);
|
|
|
|
return success();
|
2022-01-12 09:59:42 +08:00
|
|
|
}
|
|
|
|
|
2022-11-24 12:33:47 +08:00
|
|
|
static LogicalResult
|
|
|
|
bufferizeMLProgramGlobaStoreOp(ml_program::GlobalStoreOp globalStoreOp,
|
|
|
|
OpBuilder &b,
|
|
|
|
SmallVector<Operation *> &toErase) {
|
|
|
|
RankedTensorType tensorType =
|
2024-04-28 05:00:56 +08:00
|
|
|
cast<RankedTensorType>(globalStoreOp.getValue().getType());
|
2022-11-24 12:33:47 +08:00
|
|
|
MemRefType memrefType =
|
|
|
|
MemRefType::get(tensorType.getShape(), tensorType.getElementType());
|
|
|
|
|
|
|
|
b.setInsertionPoint(globalStoreOp);
|
|
|
|
Value memref = b.create<memref::GetGlobalOp>(
|
|
|
|
globalStoreOp.getLoc(), memrefType,
|
|
|
|
globalStoreOp.getGlobalAttr().getLeafReference());
|
|
|
|
Value copyValue = b.create<bufferization::ToMemrefOp>(
|
|
|
|
globalStoreOp->getLoc(), memrefType, globalStoreOp.getValue());
|
|
|
|
b.create<memref::CopyOp>(globalStoreOp->getLoc(), copyValue, memref);
|
|
|
|
return success();
|
2022-01-12 09:59:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2022-11-24 12:33:47 +08:00
|
|
|
/// Converts MLProgram operations that work on tensor-type operands or results
|
|
|
|
/// to work on buffers.
|
|
|
|
class MLProgramBufferize : public MLProgramBufferizeBase<MLProgramBufferize> {
|
|
|
|
void getDependentDialects(DialectRegistry ®istry) const override {
|
|
|
|
registry
|
|
|
|
.insert<bufferization::BufferizationDialect, memref::MemRefDialect>();
|
|
|
|
}
|
|
|
|
|
2022-01-12 09:59:42 +08:00
|
|
|
void runOnOperation() override {
|
|
|
|
auto module = getOperation();
|
|
|
|
OpBuilder b(module.getBodyRegion());
|
|
|
|
SmallVector<Operation *> toErase;
|
2022-11-24 12:33:47 +08:00
|
|
|
|
|
|
|
auto walkResult = module.walk([&](ml_program::GlobalOp op) {
|
2024-04-28 05:00:56 +08:00
|
|
|
if (auto type = dyn_cast<RankedTensorType>(op.getType())) {
|
2022-11-24 12:33:47 +08:00
|
|
|
if (!type.hasStaticShape()) {
|
|
|
|
// If the ml_program.global has dynamically shaped tensor.
|
|
|
|
op.emitError(
|
|
|
|
"unimplemented: global op bufferization with dynamic shape");
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the ml_program.global is of non-tensor type.
|
|
|
|
op.emitError("unsupported global op type");
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (failed(bufferizeMLProgramGlobalOp(op, b))) {
|
|
|
|
op.emitError("bufferization for this op failed");
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
}
|
|
|
|
toErase.push_back(op);
|
|
|
|
return WalkResult::advance();
|
|
|
|
});
|
|
|
|
|
|
|
|
if (walkResult.wasInterrupted())
|
|
|
|
return signalPassFailure();
|
|
|
|
|
|
|
|
module.walk([&](ml_program::GlobalLoadOp op) {
|
|
|
|
if (failed(bufferizeMLProgramGlobaLoadOp(op, b, toErase))) {
|
|
|
|
op.emitError("bufferization for this op failed");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
toErase.push_back(op);
|
|
|
|
});
|
|
|
|
|
|
|
|
module.walk([&](ml_program::GlobalStoreOp op) {
|
|
|
|
if (failed(bufferizeMLProgramGlobaStoreOp(op, b, toErase))) {
|
|
|
|
op.emitError("bufferization for this op failed");
|
|
|
|
return;
|
|
|
|
}
|
2022-01-12 09:59:42 +08:00
|
|
|
toErase.push_back(op);
|
|
|
|
});
|
|
|
|
|
2022-11-24 12:33:47 +08:00
|
|
|
for (auto op : llvm::reverse(toErase))
|
2022-01-12 09:59:42 +08:00
|
|
|
op->erase();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<ModuleOp>>
|
2022-11-24 12:33:47 +08:00
|
|
|
mlir::torch::RefBackend::createMLProgramBufferizePass() {
|
|
|
|
return std::make_unique<MLProgramBufferize>();
|
2022-01-12 09:59:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2021-09-23 00:55:09 +08:00
|
|
|
// ExpandOpsForLLVM
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class ExpandOpsForLLVM : public ExpandOpsForLLVMBase<ExpandOpsForLLVM> {
|
|
|
|
void runOnOperation() override {
|
|
|
|
auto func = getOperation();
|
|
|
|
auto *context = &getContext();
|
|
|
|
RewritePatternSet patterns(context);
|
|
|
|
populateExpandTanhPattern(patterns);
|
2021-10-26 07:16:01 +08:00
|
|
|
patterns.add<math::ErfPolynomialApproximation>(patterns.getContext());
|
2021-09-23 00:55:09 +08:00
|
|
|
ConversionTarget target(*context);
|
2022-03-16 18:44:23 +08:00
|
|
|
target.addLegalDialect<func::FuncDialect>();
|
2021-09-23 00:55:09 +08:00
|
|
|
target.addLegalDialect<math::MathDialect>();
|
2022-10-05 21:28:06 +08:00
|
|
|
target.addLegalDialect<arith::ArithDialect>();
|
2021-09-23 00:55:09 +08:00
|
|
|
target.addIllegalOp<math::TanhOp>();
|
2021-10-26 07:16:01 +08:00
|
|
|
target.addIllegalOp<math::ErfOp>();
|
2021-09-23 00:55:09 +08:00
|
|
|
if (failed(applyPartialConversion(func, target, std::move(patterns)))) {
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-04-27 03:27:51 +08:00
|
|
|
std::unique_ptr<OperationPass<func::FuncOp>>
|
2021-09-23 00:55:09 +08:00
|
|
|
mlir::torch::RefBackend::createExpandOpsForLLVMPass() {
|
|
|
|
return std::make_unique<ExpandOpsForLLVM>();
|
|
|
|
}
|
2022-02-13 02:47:12 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MungeMemrefCopy
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Operation *createLinalgCopyOp(OpBuilder &b, Location loc, Value from,
|
|
|
|
Value to) {
|
2024-04-28 05:00:56 +08:00
|
|
|
auto memrefTypeFrom = cast<MemRefType>(from.getType());
|
|
|
|
auto memrefTypeTo = cast<MemRefType>(to.getType());
|
2022-02-13 02:47:12 +08:00
|
|
|
(void)memrefTypeFrom;
|
|
|
|
assert(memrefTypeFrom && memrefTypeTo &&
|
|
|
|
memrefTypeFrom.getRank() == memrefTypeTo.getRank());
|
|
|
|
AffineMap id =
|
|
|
|
AffineMap::getMultiDimIdentityMap(memrefTypeTo.getRank(), b.getContext());
|
2022-11-17 06:40:36 +08:00
|
|
|
SmallVector<utils::IteratorType> iteratorTypes(memrefTypeTo.getRank(),
|
|
|
|
utils::IteratorType::parallel);
|
2022-02-13 02:47:12 +08:00
|
|
|
return b.create<linalg::GenericOp>(
|
|
|
|
loc,
|
|
|
|
/*inputs=*/from,
|
|
|
|
/*outputs=*/to,
|
2023-01-25 09:29:42 +08:00
|
|
|
/*indexingMaps=*/llvm::ArrayRef({id, id}),
|
2022-02-13 02:47:12 +08:00
|
|
|
/*iteratorTypes=*/iteratorTypes,
|
|
|
|
[](OpBuilder &b, Location loc, ValueRange args) {
|
|
|
|
b.create<linalg::YieldOp>(loc, args.front());
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class MemrefCopyOpToLinalg : public OpRewritePattern<memref::CopyOp> {
|
|
|
|
using OpRewritePattern<memref::CopyOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(memref::CopyOp copyOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
Operation *linalgCopy = createLinalgCopyOp(
|
2022-10-05 21:28:06 +08:00
|
|
|
rewriter, copyOp.getLoc(), copyOp.getSource(), copyOp.getTarget());
|
2022-02-13 02:47:12 +08:00
|
|
|
rewriter.replaceOp(copyOp, linalgCopy->getResults());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class MungeMemrefCopy : public MungeMemrefCopyBase<MungeMemrefCopy> {
|
2022-03-16 18:44:23 +08:00
|
|
|
void runOnOperation() override {
|
|
|
|
MLIRContext *context = &getContext();
|
|
|
|
RewritePatternSet patterns(&getContext());
|
|
|
|
patterns.insert<MemrefCopyOpToLinalg>(context);
|
|
|
|
if (failed(applyPatternsAndFoldGreedily(getOperation(),
|
|
|
|
std::move(patterns)))) {
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-04-27 03:27:51 +08:00
|
|
|
std::unique_ptr<OperationPass<func::FuncOp>>
|
2022-03-16 18:44:23 +08:00
|
|
|
mlir::torch::RefBackend::createMungeMemrefCopyPass() {
|
|
|
|
return std::make_unique<MungeMemrefCopy>();
|
|
|
|
}
|
|
|
|
|
2023-12-16 04:45:32 +08:00
|
|
|
namespace {
|
|
|
|
class GeneralizeTensorConcat
|
|
|
|
: public GeneralizeTensorConcatBase<GeneralizeTensorConcat> {
|
|
|
|
void getDependentDialects(DialectRegistry ®istry) const override {
|
|
|
|
registry.insert<tensor::TensorDialect>();
|
|
|
|
}
|
|
|
|
|
|
|
|
void runOnOperation() override {
|
|
|
|
RewritePatternSet patterns(&getContext());
|
|
|
|
tensor::populateDecomposeTensorConcatPatterns(patterns);
|
|
|
|
if (failed(applyPatternsAndFoldGreedily(getOperation(),
|
|
|
|
std::move(patterns)))) {
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<func::FuncOp>>
|
|
|
|
mlir::torch::RefBackend::createGeneralizeTensorConcatPass() {
|
|
|
|
return std::make_unique<GeneralizeTensorConcat>();
|
|
|
|
}
|
|
|
|
|
2022-03-16 18:44:23 +08:00
|
|
|
namespace {
|
|
|
|
class GeneralizeTensorPad
|
|
|
|
: public GeneralizeTensorPadBase<GeneralizeTensorPad> {
|
2022-02-13 02:47:12 +08:00
|
|
|
void getDependentDialects(DialectRegistry ®istry) const override {
|
|
|
|
registry.insert<linalg::LinalgDialect>();
|
|
|
|
}
|
|
|
|
|
|
|
|
void runOnOperation() override {
|
|
|
|
MLIRContext *context = &getContext();
|
|
|
|
RewritePatternSet patterns(&getContext());
|
2022-03-16 18:44:23 +08:00
|
|
|
patterns.insert<linalg::GeneralizePadOpPattern>(context);
|
2022-02-13 02:47:12 +08:00
|
|
|
if (failed(applyPatternsAndFoldGreedily(getOperation(),
|
|
|
|
std::move(patterns)))) {
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-04-27 03:27:51 +08:00
|
|
|
std::unique_ptr<OperationPass<func::FuncOp>>
|
2022-03-16 18:44:23 +08:00
|
|
|
mlir::torch::RefBackend::createGeneralizeTensorPadPass() {
|
|
|
|
return std::make_unique<GeneralizeTensorPad>();
|
2022-02-13 02:47:12 +08:00
|
|
|
}
|