2020-05-07 09:41:54 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2020-10-10 06:03:57 +08:00
|
|
|
// This is the base file for npcomp's "reference backend".
|
|
|
|
//
|
|
|
|
// The input to this backend is a layer that we call "TCP" + a mix of scalar
|
|
|
|
// ops. TCP is currently a concrete dialect, but more generally it refers to a
|
|
|
|
// layer of the compilation stack consisting of named ops on entire tensors,
|
|
|
|
// with their preconditions checked. For example, a "matmul" op that assumes
|
|
|
|
// that the contracting ("k") dimensions of both operands are equal. Earlier
|
|
|
|
// code in the compilation stack should ensure that these preconditions are met
|
|
|
|
// (such as during TCF->TCP lowering).
|
|
|
|
//
|
|
|
|
// The output of this backend is LLVM IR suitable for JITing.
|
|
|
|
//
|
|
|
|
// We expect that other backends will appear that have a similar kind of
|
|
|
|
// interface (TCP + scalar ops ---> LLVM IR / other "executable").
|
2020-05-07 09:41:54 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-10-07 07:14:37 +08:00
|
|
|
#include "npcomp/RefBackend/RefBackend.h"
|
2020-05-07 09:41:54 +08:00
|
|
|
#include "PassDetail.h"
|
|
|
|
|
2020-05-22 04:09:06 +08:00
|
|
|
#include "mlir/Conversion/SCFToStandard/SCFToStandard.h"
|
2020-09-17 08:31:40 +08:00
|
|
|
#include "mlir/Conversion/ShapeToStandard/ShapeToStandard.h"
|
2020-05-07 09:41:54 +08:00
|
|
|
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
|
|
|
|
#include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
|
2020-05-12 09:50:51 +08:00
|
|
|
#include "mlir/Dialect/Linalg/Passes.h"
|
2020-05-07 09:41:54 +08:00
|
|
|
#include "mlir/Dialect/Shape/IR/Shape.h"
|
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
|
|
|
#include "mlir/Pass/Pass.h"
|
|
|
|
#include "mlir/Pass/PassRegistry.h"
|
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
2020-05-12 06:27:37 +08:00
|
|
|
#include "mlir/Transforms/Passes.h"
|
2020-05-07 09:41:54 +08:00
|
|
|
#include "npcomp/Conversion/TCFToTCP/TCFToTCP.h"
|
2020-10-08 08:30:10 +08:00
|
|
|
#include "npcomp/Dialect/Refback/IR/RefbackOps.h"
|
2020-05-07 09:41:54 +08:00
|
|
|
#include "npcomp/Dialect/TCP/IR/TCPDialect.h"
|
|
|
|
#include "npcomp/Dialect/TCP/IR/TCPOps.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::NPCOMP;
|
|
|
|
|
2020-08-28 06:09:10 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Pass registration
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
#define GEN_PASS_REGISTRATION
|
2020-10-07 07:14:37 +08:00
|
|
|
#include "npcomp/RefBackend/Passes.h.inc"
|
2020-08-28 06:09:10 +08:00
|
|
|
} // end namespace
|
|
|
|
|
2020-10-07 07:14:37 +08:00
|
|
|
void mlir::NPCOMP::registerRefBackendPasses() {
|
2020-08-28 06:09:10 +08:00
|
|
|
::registerPasses();
|
|
|
|
|
2020-10-07 07:14:37 +08:00
|
|
|
mlir::PassPipelineRegistration<RefBackendLoweringPipelineOptions>(
|
|
|
|
"refback-lowering-pipeline", "RefBackend lowering pipeline.",
|
|
|
|
mlir::NPCOMP::createRefBackendLoweringPipeline);
|
2020-10-10 06:03:57 +08:00
|
|
|
mlir::PassPipelineRegistration<RefBackendLoweringPipelineOptions>(
|
|
|
|
"tcf-refback-lowering-pipeline",
|
|
|
|
"RefBackend lowering pipeline, starting from TCF.",
|
|
|
|
mlir::NPCOMP::createTCFRefBackendLoweringPipeline);
|
2020-05-12 09:50:51 +08:00
|
|
|
}
|
|
|
|
|
2020-05-19 03:50:16 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LowerAllocMemRefOps
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
[RefBackend] Split out RefBackend (refback) dialect from TCP.
This is the first in a patch series that is refactoring the
constellation of things variously called or associated with "E2E",
"RefE2E", "npcomprt", and "TCP" into a more cleanly layered result.
Concretely, this first patch fixes the fact that TCP was basically
acting like a dumping ground needed by the reference backend. This
splits it out, which is fairly mechanical, but touches a lot of lines of
code (basically replacing `tcp` with `refback` and `TCP` with
`RefBackend).
Now, the RefBackend dialect is that dumping ground, which
is slighly better, as it starts allowing TCP to become a nice clean
middle layer that is not related per se to the reference backend.
The previous name RefE2E or "reference e2e flow" was super confusing.
Now that we are seeing more clearly where the "backend" distinction
lies, the [RefBackend] commit tag is born :)
2020-10-07 06:44:18 +08:00
|
|
|
class LowerAllocMemRefOp : public OpRewritePattern<refback::AllocMemRefOp> {
|
2020-05-19 03:50:16 +08:00
|
|
|
public:
|
|
|
|
using OpRewritePattern::OpRewritePattern;
|
[RefBackend] Split out RefBackend (refback) dialect from TCP.
This is the first in a patch series that is refactoring the
constellation of things variously called or associated with "E2E",
"RefE2E", "npcomprt", and "TCP" into a more cleanly layered result.
Concretely, this first patch fixes the fact that TCP was basically
acting like a dumping ground needed by the reference backend. This
splits it out, which is fairly mechanical, but touches a lot of lines of
code (basically replacing `tcp` with `refback` and `TCP` with
`RefBackend).
Now, the RefBackend dialect is that dumping ground, which
is slighly better, as it starts allowing TCP to become a nice clean
middle layer that is not related per se to the reference backend.
The previous name RefE2E or "reference e2e flow" was super confusing.
Now that we are seeing more clearly where the "backend" distinction
lies, the [RefBackend] commit tag is born :)
2020-10-07 06:44:18 +08:00
|
|
|
LogicalResult matchAndRewrite(refback::AllocMemRefOp op,
|
2020-05-19 03:50:16 +08:00
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
auto memrefType = op.getType().cast<MemRefType>();
|
|
|
|
auto shape = op.getOperand();
|
|
|
|
// std.alloc only accepts the dynamic extents as operands, so only
|
|
|
|
// collect those.
|
|
|
|
SmallVector<Value, 6> dynamicExtents;
|
|
|
|
for (int i = 0, e = memrefType.getRank(); i < e; i++) {
|
|
|
|
if (memrefType.isDynamicDim(i)) {
|
2020-08-03 13:06:12 +08:00
|
|
|
auto extent =
|
|
|
|
rewriter.create<shape::GetExtentOp>(op.getLoc(), shape, i);
|
2020-05-19 03:50:16 +08:00
|
|
|
dynamicExtents.push_back(extent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rewriter.replaceOpWithNewOp<AllocOp>(op, memrefType, dynamicExtents);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class LowerAllocMemRefOps
|
|
|
|
: public LowerAllocMemRefOpsBase<LowerAllocMemRefOps> {
|
2020-09-10 15:23:46 +08:00
|
|
|
void getDependentDialects(DialectRegistry ®istry) const override {
|
|
|
|
registry.insert<shape::ShapeDialect>();
|
|
|
|
}
|
|
|
|
|
|
|
|
void runOnOperation() override {
|
2020-05-19 03:50:16 +08:00
|
|
|
auto func = getOperation();
|
|
|
|
auto *context = &getContext();
|
|
|
|
OwningRewritePatternList patterns;
|
|
|
|
patterns.insert<LowerAllocMemRefOp>(context);
|
|
|
|
ConversionTarget target(*context);
|
[RefBackend] Split out RefBackend (refback) dialect from TCP.
This is the first in a patch series that is refactoring the
constellation of things variously called or associated with "E2E",
"RefE2E", "npcomprt", and "TCP" into a more cleanly layered result.
Concretely, this first patch fixes the fact that TCP was basically
acting like a dumping ground needed by the reference backend. This
splits it out, which is fairly mechanical, but touches a lot of lines of
code (basically replacing `tcp` with `refback` and `TCP` with
`RefBackend).
Now, the RefBackend dialect is that dumping ground, which
is slighly better, as it starts allowing TCP to become a nice clean
middle layer that is not related per se to the reference backend.
The previous name RefE2E or "reference e2e flow" was super confusing.
Now that we are seeing more clearly where the "backend" distinction
lies, the [RefBackend] commit tag is born :)
2020-10-07 06:44:18 +08:00
|
|
|
target.addIllegalOp<refback::AllocMemRefOp>();
|
2020-08-03 13:06:12 +08:00
|
|
|
target.addLegalOp<shape::GetExtentOp>();
|
2020-05-19 03:50:16 +08:00
|
|
|
target.addLegalOp<AllocOp>();
|
2020-08-03 13:06:12 +08:00
|
|
|
target.addLegalOp<ConstantOp>();
|
2020-05-19 03:50:16 +08:00
|
|
|
if (failed(applyPartialConversion(func, target, patterns))) {
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<FuncOp>>
|
|
|
|
mlir::NPCOMP::createLowerAllocMemRefOpsPass() {
|
|
|
|
return std::make_unique<LowerAllocMemRefOps>();
|
|
|
|
}
|
|
|
|
|
2020-09-26 07:02:09 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// RestrictedCanonicalizer
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct RestrictedCanonicalizer
|
|
|
|
: public RestrictedCanonicalizerBase<RestrictedCanonicalizer> {
|
|
|
|
void runOnOperation() override {
|
|
|
|
auto *context = &getContext();
|
|
|
|
|
|
|
|
// Find the dialects from their names.
|
|
|
|
DenseSet<StringRef> neededDialects;
|
|
|
|
for (const std::string &dialectName : includedDialects)
|
|
|
|
neededDialects.insert(dialectName);
|
|
|
|
DenseSet<Dialect *> dialectsToCanonicalize;
|
|
|
|
for (Dialect *dialect : context->getLoadedDialects()) {
|
|
|
|
if (neededDialects.count(dialect->getNamespace())) {
|
|
|
|
dialectsToCanonicalize.insert(dialect);
|
|
|
|
// Erase the dialect so that we can report an error below for any
|
|
|
|
// dialect names that are not loaded.
|
|
|
|
neededDialects.erase(dialect->getNamespace());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Report a helpful error if a dialect is not found.
|
|
|
|
auto missingDialects = llvm::to_vector<6>(neededDialects);
|
|
|
|
if (!missingDialects.empty()) {
|
|
|
|
llvm::sort(missingDialects);
|
|
|
|
std::string buf;
|
|
|
|
llvm::raw_string_ostream os(buf);
|
|
|
|
llvm::interleaveComma(missingDialects, os);
|
|
|
|
llvm::report_fatal_error("restricted-canonicalize: unknown dialects: " +
|
|
|
|
os.str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect all canonicalization patterns from ops in the included dialects.
|
|
|
|
OwningRewritePatternList patterns;
|
|
|
|
for (AbstractOperation *op : context->getRegisteredOperations())
|
|
|
|
if (dialectsToCanonicalize.count(&op->dialect))
|
|
|
|
op->getCanonicalizationPatterns(patterns, context);
|
|
|
|
|
|
|
|
Operation *op = getOperation();
|
|
|
|
applyPatternsAndFoldGreedily(op->getRegions(), patterns);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
std::unique_ptr<Pass> mlir::NPCOMP::createRestrictedCanonicalizerPass() {
|
|
|
|
return std::make_unique<RestrictedCanonicalizer>();
|
|
|
|
}
|
|
|
|
|
2020-05-07 09:41:54 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-10-07 07:14:37 +08:00
|
|
|
// createRefBackendLoweringPipeline
|
2020-05-07 09:41:54 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-10-07 07:14:37 +08:00
|
|
|
void mlir::NPCOMP::createRefBackendLoweringPipeline(
|
|
|
|
OpPassManager &pm, const RefBackendLoweringPipelineOptions &options) {
|
2020-09-17 08:31:40 +08:00
|
|
|
// For operations with a shape transfer function, explicitly bypass their
|
[RefBackend] Split out RefBackend (refback) dialect from TCP.
This is the first in a patch series that is refactoring the
constellation of things variously called or associated with "E2E",
"RefE2E", "npcomprt", and "TCP" into a more cleanly layered result.
Concretely, this first patch fixes the fact that TCP was basically
acting like a dumping ground needed by the reference backend. This
splits it out, which is fairly mechanical, but touches a lot of lines of
code (basically replacing `tcp` with `refback` and `TCP` with
`RefBackend).
Now, the RefBackend dialect is that dumping ground, which
is slighly better, as it starts allowing TCP to become a nice clean
middle layer that is not related per se to the reference backend.
The previous name RefE2E or "reference e2e flow" was super confusing.
Now that we are seeing more clearly where the "backend" distinction
lies, the [RefBackend] commit tag is born :)
2020-10-07 06:44:18 +08:00
|
|
|
// shape computations with refback.shaped_results ops.
|
2020-05-12 05:24:57 +08:00
|
|
|
//
|
2020-09-17 08:31:40 +08:00
|
|
|
// Right now, our lowering flow depends heavily on descriptors, so technically
|
|
|
|
// we don't need to bypass shapes -- we can just splat out the shape
|
|
|
|
// calculations when lowering the ops themselves. However, this design keeps
|
|
|
|
// the door open to various future directions, and is an interesting example
|
|
|
|
// in its own right.
|
2020-05-12 05:24:57 +08:00
|
|
|
//
|
2020-09-17 08:31:40 +08:00
|
|
|
// For example, if we want to lower to command-buffer style API's like Vulkan,
|
|
|
|
// then we need (for correctness) to bypass the shapes (actually,
|
|
|
|
// something more sophisticated than just that) if we want to do command
|
|
|
|
// buffer formation while we are still on tensors (e.g. to record workgroup
|
|
|
|
// sizes). We might not care about pursuing that direction here though. So
|
|
|
|
// consider this pass as purely advisory now.
|
2020-05-12 05:24:57 +08:00
|
|
|
//
|
2020-09-17 08:31:40 +08:00
|
|
|
// One case where we might still be interested in this is dealing with
|
|
|
|
// linalg.generic ops and other types of "fusions" that have shape transfer
|
|
|
|
// functions that are not easily reconstructible and thus we have to capture
|
|
|
|
// the shape transfer functions earlier in the pipeline.
|
|
|
|
pm.addPass(createBypassShapesPass());
|
2020-05-07 09:41:54 +08:00
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
// Lower shape constraints before we enter tensor->memref conversion.
|
2020-09-26 07:02:09 +08:00
|
|
|
// That is, we expand shape.cstr_* ops to eager error handling code.
|
|
|
|
pm.addPass(createConvertShapeConstraintsPass());
|
|
|
|
// Run shape canonicalizations. In particular, this erases shape.assuming,
|
|
|
|
// now that we have converted shape constraints.
|
|
|
|
// TODO: This is kind of ugly. Either we use pass options or a constructor
|
|
|
|
// that takes C++ data structures. The former makes the pass usable on the
|
|
|
|
// command line (including reproducers), the latter makes the pass more
|
|
|
|
// convenient.
|
|
|
|
std::unique_ptr<Pass> shapeCanonicalizer =
|
|
|
|
createRestrictedCanonicalizerPass();
|
|
|
|
if (failed(shapeCanonicalizer->initializeOptions("included-dialects=shape")))
|
|
|
|
llvm::report_fatal_error("couldn't initialize restricted-canonicalize");
|
|
|
|
pm.addPass(std::move(shapeCanonicalizer));
|
2020-05-07 09:41:54 +08:00
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
// --------------------------------------------------------------------------
|
|
|
|
// Lower the `tensor` type to `memref`.
|
|
|
|
// --------------------------------------------------------------------------
|
|
|
|
// We make a conscious effort here to do this as a sequence of separate passes
|
|
|
|
// rather than a single mega dialect conversion pass.
|
2020-05-12 05:24:57 +08:00
|
|
|
//
|
2020-09-17 08:31:40 +08:00
|
|
|
// This means that intermediate steps have source/target materializations
|
2020-10-15 10:28:43 +08:00
|
|
|
// (tensor_load / tensor_to_memref) in the IR.
|
2020-09-17 08:31:40 +08:00
|
|
|
|
[RefBackend] Split out RefBackend (refback) dialect from TCP.
This is the first in a patch series that is refactoring the
constellation of things variously called or associated with "E2E",
"RefE2E", "npcomprt", and "TCP" into a more cleanly layered result.
Concretely, this first patch fixes the fact that TCP was basically
acting like a dumping ground needed by the reference backend. This
splits it out, which is fairly mechanical, but touches a lot of lines of
code (basically replacing `tcp` with `refback` and `TCP` with
`RefBackend).
Now, the RefBackend dialect is that dumping ground, which
is slighly better, as it starts allowing TCP to become a nice clean
middle layer that is not related per se to the reference backend.
The previous name RefE2E or "reference e2e flow" was super confusing.
Now that we are seeing more clearly where the "backend" distinction
lies, the [RefBackend] commit tag is born :)
2020-10-07 06:44:18 +08:00
|
|
|
// Lower ops enclosed in refback.shaped_results regions.
|
2020-09-17 08:31:40 +08:00
|
|
|
// For now, this is covering the "tensor compute" ops like tcp.add /
|
|
|
|
// tcp.broadcast_to (the former being handled via a special subset of
|
|
|
|
// linalg.generic) -- we only handle those two, so having an isolated pass
|
|
|
|
// that hardcodes all of them is fine -- eventually we might want something
|
|
|
|
// more pluggable. The exact interface for this pluggability depends on
|
|
|
|
// what design we want to settle on for bypassing shape computations.
|
|
|
|
pm.addPass(createLowerShapedResultsToMemrefPass());
|
[RefBackend] Split out RefBackend (refback) dialect from TCP.
This is the first in a patch series that is refactoring the
constellation of things variously called or associated with "E2E",
"RefE2E", "npcomprt", and "TCP" into a more cleanly layered result.
Concretely, this first patch fixes the fact that TCP was basically
acting like a dumping ground needed by the reference backend. This
splits it out, which is fairly mechanical, but touches a lot of lines of
code (basically replacing `tcp` with `refback` and `TCP` with
`RefBackend).
Now, the RefBackend dialect is that dumping ground, which
is slighly better, as it starts allowing TCP to become a nice clean
middle layer that is not related per se to the reference backend.
The previous name RefE2E or "reference e2e flow" was super confusing.
Now that we are seeing more clearly where the "backend" distinction
lies, the [RefBackend] commit tag is born :)
2020-10-07 06:44:18 +08:00
|
|
|
// Lower tensor-valued constants to refback.global.
|
2020-09-17 08:31:40 +08:00
|
|
|
pm.addPass(createLowerConstantTensorsToMemrefPass());
|
[RefBackend] Split out RefBackend (refback) dialect from TCP.
This is the first in a patch series that is refactoring the
constellation of things variously called or associated with "E2E",
"RefE2E", "npcomprt", and "TCP" into a more cleanly layered result.
Concretely, this first patch fixes the fact that TCP was basically
acting like a dumping ground needed by the reference backend. This
splits it out, which is fairly mechanical, but touches a lot of lines of
code (basically replacing `tcp` with `refback` and `TCP` with
`RefBackend).
Now, the RefBackend dialect is that dumping ground, which
is slighly better, as it starts allowing TCP to become a nice clean
middle layer that is not related per se to the reference backend.
The previous name RefE2E or "reference e2e flow" was super confusing.
Now that we are seeing more clearly where the "backend" distinction
lies, the [RefBackend] commit tag is born :)
2020-10-07 06:44:18 +08:00
|
|
|
// refback::AllocMemRefOp takes a shape (i.e. extent tensor) as an argument.
|
|
|
|
// We need to resolve this to std.alloc which takes individual extents.
|
2020-09-17 08:31:40 +08:00
|
|
|
pm.addPass(createLowerAllocMemRefOpsPass());
|
|
|
|
// Lower shape ops to std.
|
|
|
|
// TODO: This should in principle be moved before tensor->memref conversion.
|
|
|
|
// But some of the tensor->memref lowerings above use shape.get_extent. For
|
|
|
|
// example, when lowering a broadcast, we need to get an extent from its shape
|
|
|
|
// operand to allocate the output.
|
|
|
|
pm.addPass(createConvertShapeToStandardPass());
|
|
|
|
// Lower std ops to memref.
|
|
|
|
// This includes ops like extract_element.
|
|
|
|
pm.addPass(createLowerStdToMemrefPass());
|
|
|
|
// Lower control flow and other "structural" ops.
|
|
|
|
//
|
|
|
|
// These ops are generally not sensitive to the types that they operate on
|
|
|
|
// (e.g. the types of block operands, function arguments, etc.). But they all
|
|
|
|
// need to be converted consistently. So it makes sense to do this as the
|
|
|
|
// final step of conversion, which also finalizes the elimination of all
|
|
|
|
// stray source/target materializations introduced by the incremental
|
|
|
|
// tensor->memref lowering.
|
|
|
|
//
|
|
|
|
// This completes conversion to memref. There are no `tensor`'s after
|
|
|
|
// this point.
|
|
|
|
pm.addPass(createLowerStructuralToMemrefPass());
|
2020-05-16 07:33:01 +08:00
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
// TODO: Do buffer assignment. We should be able to just drop in the upstream
|
|
|
|
// pass?
|
2020-05-07 09:41:54 +08:00
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
// At this point, we have lots of loose stuff floating around from lowering,
|
|
|
|
// so it's a good time to do some general cleanups.
|
2020-06-02 10:30:13 +08:00
|
|
|
if (options.optimize) {
|
|
|
|
pm.addPass(createCanonicalizerPass());
|
|
|
|
pm.addPass(createCSEPass());
|
|
|
|
}
|
2020-05-12 06:27:37 +08:00
|
|
|
|
2020-05-12 09:50:51 +08:00
|
|
|
// --------------------------------------------------------------------------
|
2020-05-21 09:48:53 +08:00
|
|
|
// Preparation for converting to an LLVM module.
|
2020-05-12 09:50:51 +08:00
|
|
|
// --------------------------------------------------------------------------
|
|
|
|
// Now, we begin the process of lowering to LLVM's level of abstraction
|
|
|
|
// (after which LLVM will take over lowering to machine code).
|
|
|
|
|
|
|
|
// Lower linalg ops to loops.
|
|
|
|
// TODO: Do some linalg optimizations like tiling here.
|
|
|
|
pm.addPass(createConvertLinalgToLoopsPass());
|
|
|
|
|
2020-07-14 07:15:42 +08:00
|
|
|
// Run a some cleanups.
|
2020-06-02 10:30:13 +08:00
|
|
|
if (options.optimize) {
|
2020-09-17 08:31:40 +08:00
|
|
|
pm.addPass(createCanonicalizerPass());
|
2020-06-02 10:30:13 +08:00
|
|
|
pm.addPass(createCSEPass());
|
|
|
|
}
|
2020-05-21 09:48:53 +08:00
|
|
|
|
|
|
|
// --------------------------------------------------------------------------
|
|
|
|
// Final conversion to an LLVM module.
|
|
|
|
// --------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// Convert scf to std control flow in preparation for going to LLVM.
|
|
|
|
pm.addPass(createLowerToCFGPass());
|
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
// Convert functions signatures and other constructs that interface with the
|
2020-10-08 08:12:52 +08:00
|
|
|
// runtime to the `refbackrt` dialect.
|
|
|
|
pm.addPass(createLowerToRefbackrtABIPass());
|
2020-09-17 08:31:40 +08:00
|
|
|
|
2020-05-21 09:48:53 +08:00
|
|
|
// Finally, convert to LLVM dialect using our custom LowerToLLVM pass
|
|
|
|
// which reuses the upstream patterns and gives us a place to add our own
|
2020-10-08 08:12:52 +08:00
|
|
|
// patterns for our own custom ops like the refbackrt ops.
|
2020-05-21 09:48:53 +08:00
|
|
|
pm.addPass(createLowerToLLVMPass());
|
2020-07-14 07:15:42 +08:00
|
|
|
|
|
|
|
// Although LLVM will clean everything up eventually, for the sake of IR
|
|
|
|
// clarity while still in MLIR, run some cleanups.
|
|
|
|
if (options.optimize) {
|
|
|
|
pm.addPass(createCanonicalizerPass());
|
|
|
|
pm.addPass(createCSEPass());
|
|
|
|
}
|
2020-05-07 09:41:54 +08:00
|
|
|
}
|
2020-10-10 06:03:57 +08:00
|
|
|
|
|
|
|
void mlir::NPCOMP::createTCFRefBackendLoweringPipeline(
|
|
|
|
OpPassManager &pm, const RefBackendLoweringPipelineOptions &options) {
|
|
|
|
// Convert from TCF to TCP.
|
|
|
|
//
|
|
|
|
// TCF has implicit broadcasting, and issues errors "inside the ops" in the
|
|
|
|
// case of invalid broadcasts.
|
|
|
|
//
|
|
|
|
// TCP does not. So we need to reify the broadcasting and error checking.
|
|
|
|
pm.addPass(createConvertTCFToTCPPass());
|
|
|
|
|
|
|
|
createRefBackendLoweringPipeline(pm, options);
|
|
|
|
}
|