2020-05-07 09:41:54 +08:00
|
|
|
//===-- Passes.td - Pass definition file -------------------*- tablegen -*-===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef NPCOMP_E2E_PASSES
|
|
|
|
#define NPCOMP_E2E_PASSES
|
|
|
|
|
|
|
|
include "mlir/Pass/PassBase.td"
|
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
def BypassShapes : Pass<"bypass-shapes", "FuncOp"> {
|
|
|
|
let summary = "Bypass shape calculations around ops";
|
|
|
|
let constructor = "mlir::NPCOMP::createBypassShapesPass()";
|
2020-05-07 09:41:54 +08:00
|
|
|
}
|
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
def LowerShapedResultsToMemref : Pass<"lower-shaped-results-to-memref", "FuncOp"> {
|
|
|
|
let summary = "Lower tcp.shaped_results regions";
|
|
|
|
let constructor = "mlir::NPCOMP::createLowerShapedResultsToMemrefPass()";
|
2020-07-11 08:31:24 +08:00
|
|
|
}
|
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
def LowerStdToMemref : Pass<"lower-std-to-memref", "FuncOp"> {
|
|
|
|
let summary = "Lower std ops to memref";
|
|
|
|
let constructor = "mlir::NPCOMP::createLowerStdToMemrefPass()";
|
2020-05-07 09:41:54 +08:00
|
|
|
}
|
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
def LowerConstantTensorsToMemref :
|
|
|
|
Pass<"lower-constant-tensors-to-memref", "ModuleOp"> {
|
|
|
|
let summary = "Lower std.constant of tensor type to memref";
|
|
|
|
let description = [{
|
|
|
|
This must be a module pass since it involves creating tcp.global ops.
|
|
|
|
}];
|
|
|
|
let constructor = "mlir::NPCOMP::createLowerConstantTensorsToMemrefPass()";
|
2020-05-12 05:24:57 +08:00
|
|
|
}
|
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
def LowerStructuralToMemref :
|
|
|
|
Pass<"lower-structural-to-memref", "FuncOp"> {
|
|
|
|
let summary = "Lower structural IR constructs to memref";
|
|
|
|
let description = [{
|
|
|
|
Structural constructs include:
|
|
|
|
- control flow ops (both CFG and SCF)
|
|
|
|
- function signatures
|
|
|
|
- TODO: calls
|
|
|
|
An op is "structural" if it doesn't really care about the types it operates
|
|
|
|
on, but the types just have to converted to be consistent.
|
2020-05-12 09:50:51 +08:00
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
This pass also cleans up any previous memref<->tensor materializations,
|
|
|
|
finalizing the conversion from tensor to memref.
|
|
|
|
}];
|
|
|
|
let constructor = "mlir::NPCOMP::createLowerStructuralToMemrefPass()";
|
2020-05-12 12:22:40 +08:00
|
|
|
}
|
|
|
|
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
def LowerToNpcomprtABI : Pass<"lower-to-npcomprt-abi", "ModuleOp"> {
|
2020-09-17 08:31:40 +08:00
|
|
|
let summary = "Lower constructs requiring runtime support to `npcomprt`";
|
|
|
|
let description = [{
|
|
|
|
We have a specialized dialect `npcomprt` which models our runtime's data
|
|
|
|
structures, and function signatures (and presumably eventually, other
|
|
|
|
ABI boundaries like external calls if we ever support it) will be
|
|
|
|
converted.
|
|
|
|
|
|
|
|
The constructs requiring runtime support are:
|
|
|
|
- function signatures / module metadata
|
|
|
|
- globals
|
|
|
|
- error handling
|
|
|
|
}];
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
let constructor = "mlir::NPCOMP::createLowerToNpcomprtABIPass()";
|
2020-05-16 07:33:01 +08:00
|
|
|
}
|
|
|
|
|
2020-05-19 03:50:16 +08:00
|
|
|
def LowerAllocMemRefOps : Pass<"lower-alloc-memref-ops", "FuncOp"> {
|
|
|
|
let summary = "Lower AllocMemRefOp's";
|
|
|
|
let constructor = "mlir::NPCOMP::createLowerAllocMemRefOpsPass()";
|
|
|
|
}
|
|
|
|
|
2020-05-21 09:48:53 +08:00
|
|
|
def LowerToLLVM : Pass<"e2e-lower-to-llvm", "ModuleOp"> {
|
|
|
|
let summary = "Lower everything to LLVM";
|
|
|
|
let constructor = "mlir::NPCOMP::createLowerToLLVMPass();";
|
|
|
|
}
|
|
|
|
|
2020-09-26 07:02:09 +08:00
|
|
|
// TODO: Move this pass to upstream.
|
|
|
|
// TODO: This pass will still do "folding" on all ops.
|
|
|
|
// The applyPatternsAndFoldGreedily driver will need to be changed to restrict
|
|
|
|
// folding to the specified dialects as well.
|
|
|
|
// Perhaps a better design is having a pass that uses the conversion framework.
|
|
|
|
// The the pass constructor would take a set of op names, and it would
|
|
|
|
// set up a conversion target that makes all those ops illegal, and uses
|
|
|
|
// the canonicalization patterns from those ops to legalize them.
|
|
|
|
def RestrictedCanonicalizer : Pass<"restricted-canonicalize"> {
|
|
|
|
let summary = "Canonicalize operations";
|
|
|
|
let description = [{
|
|
|
|
This pass is the same as the regular `canonicalize` pass, but it only
|
|
|
|
applies a restricted set of patterns.
|
|
|
|
|
|
|
|
This is useful when a particular canonicalization is actually needed for
|
|
|
|
correctness of a lowering flow. For such cases, running a restricted set of
|
|
|
|
canonicalizations makes it clearer which passes are needed for correctness
|
|
|
|
and which passes are "just optimizations". This helps when debugging
|
|
|
|
miscompiles and other situations where the compiler is not behaving as
|
|
|
|
expected.
|
|
|
|
}];
|
|
|
|
let constructor = "mlir::NPCOMP::createRestrictedCanonicalizerPass()";
|
|
|
|
let options = [
|
|
|
|
ListOption<"includedDialects", "included-dialects", "std::string",
|
|
|
|
"Which dialects should be canonicalized",
|
|
|
|
"llvm::cl::MiscFlags::CommaSeparated">
|
|
|
|
];
|
|
|
|
}
|
|
|
|
|
2020-05-07 09:41:54 +08:00
|
|
|
#endif // NPCOMP_E2E_PASSES
|