2020-05-21 09:48:53 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "PassDetail.h"
|
2020-10-07 07:14:37 +08:00
|
|
|
#include "npcomp/RefBackend/RefBackend.h"
|
2020-05-21 09:48:53 +08:00
|
|
|
|
|
|
|
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
|
|
|
|
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
|
|
|
|
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
2020-09-25 08:14:21 +08:00
|
|
|
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
|
2020-05-21 09:48:53 +08:00
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
|
2020-10-08 08:12:52 +08:00
|
|
|
#include "npcomp/Dialect/Refbackrt/IR/RefbackrtDialect.h"
|
|
|
|
#include "npcomp/Dialect/Refbackrt/IR/RefbackrtOps.h"
|
2020-05-21 09:48:53 +08:00
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::NPCOMP;
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
using mlir::LLVM::LLVMFuncOp;
|
2020-05-21 09:48:53 +08:00
|
|
|
using mlir::LLVM::LLVMType;
|
|
|
|
|
2020-07-11 08:31:24 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-07-11 08:50:55 +08:00
|
|
|
// Descriptor types shared with the runtime.
|
|
|
|
//
|
|
|
|
// These correspond to the types in CompilerDataStructures.h
|
2020-07-11 08:31:24 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-10-08 08:12:52 +08:00
|
|
|
// Get the LLVMType for refbackrt::FuncDescriptor.
|
2020-08-28 06:09:10 +08:00
|
|
|
static LLVMType getFuncDescriptorTy(MLIRContext *context) {
|
|
|
|
return LLVMType::getStructTy(context, {
|
|
|
|
// Name length.
|
|
|
|
LLVMType::getIntNTy(context, 32),
|
|
|
|
// Name chars.
|
|
|
|
LLVMType::getInt8PtrTy(context),
|
|
|
|
// Type-erased function pointer.
|
|
|
|
LLVMType::getInt8PtrTy(context),
|
|
|
|
// Number of inputs.
|
|
|
|
LLVMType::getIntNTy(context, 32),
|
|
|
|
// Number of outputs.
|
|
|
|
LLVMType::getIntNTy(context, 32),
|
|
|
|
});
|
2020-07-11 08:50:55 +08:00
|
|
|
}
|
|
|
|
|
2020-10-08 08:12:52 +08:00
|
|
|
// Get the LLVMType for refbackrt::ModuleDescriptor.
|
2020-08-28 06:09:10 +08:00
|
|
|
static LLVMType getModuleDescriptorTy(MLIRContext *context) {
|
|
|
|
return LLVMType::getStructTy(context,
|
|
|
|
{
|
|
|
|
// std::int32_t numFuncDescriptors;
|
|
|
|
LLVMType::getIntNTy(context, 32),
|
|
|
|
// FuncDescriptor *functionDescriptors;
|
|
|
|
getFuncDescriptorTy(context).getPointerTo(),
|
|
|
|
});
|
2020-07-11 08:50:55 +08:00
|
|
|
}
|
2020-07-11 08:31:24 +08:00
|
|
|
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Compiler runtime functions.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
template <typename T>
|
|
|
|
class TrivialCompilerRuntimeLowering : public OpConversionPattern<T> {
|
|
|
|
public:
|
|
|
|
TrivialCompilerRuntimeLowering(LLVM::LLVMFuncOp backingFunc)
|
|
|
|
: OpConversionPattern<T>(backingFunc.getContext()),
|
|
|
|
backingFunc(backingFunc) {}
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(T op, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
rewriter.replaceOpWithNewOp<LLVM::CallOp>(op, backingFunc, operands);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
LLVM::LLVMFuncOp backingFunc;
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2020-05-21 09:48:53 +08:00
|
|
|
namespace {
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
// FromMemrefOp requires special handling so that the unranked memref descriptor
|
|
|
|
// gets passed as two separate arguments instead of as a struct.
|
|
|
|
class FromMemrefOpCompilerRuntimeLowering
|
2020-10-08 08:12:52 +08:00
|
|
|
: public OpConversionPattern<refbackrt::FromMemrefOp> {
|
2020-05-21 09:48:53 +08:00
|
|
|
public:
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
FromMemrefOpCompilerRuntimeLowering(LLVM::LLVMFuncOp backingFunc)
|
2020-10-08 08:12:52 +08:00
|
|
|
: OpConversionPattern<refbackrt::FromMemrefOp>(backingFunc.getContext()),
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
backingFunc(backingFunc) {}
|
2020-05-21 09:48:53 +08:00
|
|
|
LogicalResult
|
2020-10-08 08:12:52 +08:00
|
|
|
matchAndRewrite(refbackrt::FromMemrefOp op, ArrayRef<Value> operands,
|
2020-05-21 09:48:53 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
auto structVal = operands[0];
|
|
|
|
Value rank = rewriter.create<LLVM::ExtractValueOp>(
|
|
|
|
op.getLoc(),
|
|
|
|
structVal.getType().cast<LLVMType>().getStructElementType(0), structVal,
|
|
|
|
rewriter.getI32ArrayAttr({0}));
|
|
|
|
Value descriptorPtr = rewriter.create<LLVM::ExtractValueOp>(
|
|
|
|
op.getLoc(),
|
|
|
|
structVal.getType().cast<LLVMType>().getStructElementType(1), structVal,
|
|
|
|
rewriter.getI32ArrayAttr({1}));
|
|
|
|
rewriter.replaceOpWithNewOp<LLVM::CallOp>(
|
|
|
|
op, backingFunc, ValueRange({rank, descriptorPtr}));
|
2020-05-21 09:48:53 +08:00
|
|
|
return success();
|
|
|
|
}
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
LLVM::LLVMFuncOp backingFunc;
|
2020-05-21 09:48:53 +08:00
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
static LLVM::GlobalOp createGlobalString(ModuleOp module, StringAttr msg,
|
|
|
|
OpBuilder &builder, Location loc) {
|
|
|
|
// TODO: Deduplicate strings.
|
2020-09-18 10:03:33 +08:00
|
|
|
std::string msgNulTerminated = msg.getValue().str();
|
|
|
|
msgNulTerminated.push_back('\0');
|
2020-09-17 08:31:40 +08:00
|
|
|
auto arrayTy = LLVMType::getArrayTy(LLVMType::getInt8Ty(module.getContext()),
|
2020-09-18 10:03:33 +08:00
|
|
|
msgNulTerminated.size());
|
2020-09-17 08:31:40 +08:00
|
|
|
OpBuilder::InsertionGuard guard(builder);
|
|
|
|
builder.setInsertionPointToStart(module.getBody());
|
2020-09-18 10:03:33 +08:00
|
|
|
|
2020-09-17 08:31:40 +08:00
|
|
|
// To get a unique symbol name, use a suffix derived from the current number
|
|
|
|
// of ops in the module.
|
|
|
|
// We can't use the SymbolTable's logic for this because the module
|
|
|
|
// transiently contains a `func` and `llvm.func` with the same name during
|
|
|
|
// conversion, preventing us from instantiating a SymbolTable.
|
|
|
|
std::string symbolName =
|
|
|
|
(Twine("__npcomp_string_") +
|
|
|
|
Twine(llvm::size(llvm::to_vector<6>(module.getOps<LLVM::GlobalOp>()))))
|
|
|
|
.str();
|
2020-09-18 10:03:33 +08:00
|
|
|
auto globalOp = builder.create<LLVM::GlobalOp>(
|
|
|
|
loc, arrayTy, /*isConstant=*/true, LLVM::Linkage::Internal, symbolName,
|
|
|
|
builder.getStringAttr(msgNulTerminated));
|
2020-09-17 08:31:40 +08:00
|
|
|
return globalOp;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class AbortIfOpCompilerRuntimeLowering
|
2020-10-08 08:12:52 +08:00
|
|
|
: public OpConversionPattern<refbackrt::AbortIfOp> {
|
2020-09-17 08:31:40 +08:00
|
|
|
public:
|
|
|
|
AbortIfOpCompilerRuntimeLowering(LLVM::LLVMFuncOp backingFunc)
|
2020-10-08 08:12:52 +08:00
|
|
|
: OpConversionPattern<refbackrt::AbortIfOp>(backingFunc.getContext()),
|
2020-09-17 08:31:40 +08:00
|
|
|
backingFunc(backingFunc) {}
|
|
|
|
LogicalResult
|
2020-10-08 08:12:52 +08:00
|
|
|
matchAndRewrite(refbackrt::AbortIfOp op, ArrayRef<Value> operands,
|
2020-09-17 08:31:40 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2020-10-08 08:12:52 +08:00
|
|
|
refbackrt::AbortIfOp::Adaptor adaptor(operands);
|
2020-09-17 08:31:40 +08:00
|
|
|
auto *context = op.getContext();
|
|
|
|
|
|
|
|
// Create the global string, take its address, and gep to get an `i8*`.
|
|
|
|
auto globalOp = createGlobalString(op.getParentOfType<ModuleOp>(),
|
|
|
|
op.msgAttr(), rewriter, op.getLoc());
|
|
|
|
auto msgArray = rewriter.create<LLVM::AddressOfOp>(op.getLoc(), globalOp);
|
|
|
|
auto c0 = rewriter.create<LLVM::ConstantOp>(
|
|
|
|
op.getLoc(), LLVMType::getIntNTy(context, 32),
|
|
|
|
rewriter.getI32IntegerAttr(0));
|
|
|
|
auto msg = rewriter.create<LLVM::GEPOp>(op.getLoc(),
|
|
|
|
LLVMType::getInt8PtrTy(context),
|
|
|
|
msgArray, ValueRange({c0, c0}));
|
|
|
|
rewriter.replaceOpWithNewOp<LLVM::CallOp>(
|
|
|
|
op, backingFunc, ValueRange({adaptor.pred(), msg}));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
LLVM::LLVMFuncOp backingFunc;
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2020-10-08 08:12:52 +08:00
|
|
|
// Create the LLVM runtime function backing the refbackrt op with name `name`
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
// and requiring `type`.
|
|
|
|
static LLVMFuncOp createCompilerRuntimeFuncDecl(StringRef name, LLVMType type,
|
|
|
|
OpBuilder &builder,
|
|
|
|
Location loc) {
|
|
|
|
assert(type.isFunctionTy());
|
|
|
|
std::string symbolName = (Twine("__npcomp_compiler_rt_") + name).str();
|
|
|
|
return builder.create<LLVM::LLVMFuncOp>(loc, symbolName, type,
|
|
|
|
LLVM::Linkage::External);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void populateCompilerRuntimePatterns(ModuleOp module,
|
|
|
|
OwningRewritePatternList &patterns,
|
|
|
|
LLVMTypeConverter &typeConverter) {
|
2020-08-28 06:09:10 +08:00
|
|
|
auto *context = module.getContext();
|
2020-05-21 09:48:53 +08:00
|
|
|
OpBuilder builder(module.getBodyRegion());
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
|
|
|
|
{
|
2020-09-17 08:31:40 +08:00
|
|
|
auto abortIfFuncTy = LLVMType::getFunctionTy(
|
|
|
|
LLVMType::getVoidTy(context),
|
|
|
|
{LLVMType::getInt1Ty(context), LLVMType::getInt8PtrTy(context)},
|
|
|
|
/*isVarArg=*/false);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
LLVMFuncOp abortIfFunc = createCompilerRuntimeFuncDecl(
|
|
|
|
"abort_if", abortIfFuncTy, builder, module.getLoc());
|
2020-09-17 08:31:40 +08:00
|
|
|
patterns.insert<AbortIfOpCompilerRuntimeLowering>(abortIfFunc);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
auto convertFunctionType = [&](FunctionType type) {
|
|
|
|
TypeConverter::SignatureConversion conversion(type.getNumInputs());
|
|
|
|
return typeConverter.convertFunctionSignature(type, /*isVariadic=*/false,
|
|
|
|
conversion);
|
|
|
|
};
|
|
|
|
|
|
|
|
{
|
|
|
|
auto mlirFunctionType = builder.getFunctionType(
|
2020-10-08 08:12:52 +08:00
|
|
|
{builder.getType<refbackrt::TensorType>()},
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
{UnrankedMemRefType::get(builder.getF32Type(), /*memorySpace=*/0)});
|
|
|
|
LLVMType funcTy = convertFunctionType(mlirFunctionType);
|
|
|
|
LLVMFuncOp toMemrefFunc = createCompilerRuntimeFuncDecl(
|
|
|
|
"to_memref", funcTy, builder, module.getLoc());
|
2020-10-08 08:12:52 +08:00
|
|
|
patterns.insert<TrivialCompilerRuntimeLowering<refbackrt::ToMemrefOp>>(
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
toMemrefFunc);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// TODO: Pass in an element type enum, since the unranked memref descriptor
|
|
|
|
// doesn't know its own dtype.
|
|
|
|
auto mlirFunctionType = builder.getFunctionType(
|
|
|
|
{UnrankedMemRefType::get(builder.getF32Type(), /*memorySpace=*/0)},
|
2020-10-08 08:12:52 +08:00
|
|
|
{builder.getType<refbackrt::TensorType>()});
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
LLVMType funcTy = convertFunctionType(mlirFunctionType);
|
|
|
|
LLVMFuncOp fromMemrefFunc = createCompilerRuntimeFuncDecl(
|
|
|
|
"from_memref", funcTy, builder, module.getLoc());
|
|
|
|
patterns.insert<FromMemrefOpCompilerRuntimeLowering>(fromMemrefFunc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Lowering for module metadata
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LLVM::GlobalOp
|
2020-10-08 08:12:52 +08:00
|
|
|
createFuncDescriptorArray(ArrayRef<refbackrt::FuncMetadataOp> funcMetadatas,
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
OpBuilder &builder, Location loc) {
|
2020-08-28 06:09:10 +08:00
|
|
|
auto llvmI32Ty = LLVMType::getIntNTy(builder.getContext(), 32);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
|
|
|
|
DenseMap<StringRef, LLVM::GlobalOp> globalsByName;
|
|
|
|
for (auto funcMetadata : funcMetadatas) {
|
2020-08-28 06:09:10 +08:00
|
|
|
auto arrayTy =
|
|
|
|
LLVMType::getArrayTy(LLVMType::getInt8Ty(builder.getContext()),
|
|
|
|
funcMetadata.funcName().size());
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
std::string llvmSymbolName =
|
|
|
|
(Twine("__npcomp_internal_constant_") + funcMetadata.funcName()).str();
|
|
|
|
auto global = builder.create<LLVM::GlobalOp>(
|
|
|
|
loc, arrayTy, /*isConstant=*/true, LLVM::Linkage::Internal,
|
|
|
|
llvmSymbolName, builder.getStringAttr(funcMetadata.funcName()));
|
|
|
|
globalsByName[funcMetadata.funcName()] = global;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This must match FuncDescriptor in the runtime.
|
2020-08-28 06:09:10 +08:00
|
|
|
auto funcDescriptorTy = getFuncDescriptorTy(builder.getContext());
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
auto funcDescriptorArrayTy =
|
|
|
|
LLVMType::getArrayTy(funcDescriptorTy, funcMetadatas.size());
|
|
|
|
auto funcDescriptorArrayGlobal = builder.create<LLVM::GlobalOp>(
|
|
|
|
loc, funcDescriptorArrayTy, /*isConstant=*/true, LLVM::Linkage::Internal,
|
|
|
|
"__npcomp_func_descriptors",
|
|
|
|
/*value=*/Attribute());
|
|
|
|
OpBuilder::InsertionGuard guard(builder);
|
|
|
|
builder.createBlock(&funcDescriptorArrayGlobal.initializer());
|
|
|
|
|
|
|
|
// Build the initializer.
|
|
|
|
Value funcDescriptorArray =
|
|
|
|
builder.create<LLVM::UndefOp>(loc, funcDescriptorArrayTy);
|
|
|
|
auto updateDescriptor = [&](Value value,
|
|
|
|
std::initializer_list<int32_t> position) {
|
|
|
|
funcDescriptorArray = builder.create<LLVM::InsertValueOp>(
|
|
|
|
loc, funcDescriptorArray, value,
|
|
|
|
/*position=*/builder.getI32ArrayAttr(position));
|
|
|
|
};
|
|
|
|
auto updateDescriptorWithI32Attr =
|
|
|
|
[&](Attribute attr, std::initializer_list<int32_t> position) {
|
|
|
|
auto constant = builder.create<LLVM::ConstantOp>(loc, llvmI32Ty, attr);
|
|
|
|
updateDescriptor(constant, position);
|
|
|
|
};
|
|
|
|
auto c0 = builder.create<LLVM::ConstantOp>(loc, llvmI32Ty,
|
|
|
|
builder.getI32IntegerAttr(0));
|
|
|
|
for (auto funcMetadataAndIndex : llvm::enumerate(funcMetadatas)) {
|
|
|
|
auto funcMetadata = funcMetadataAndIndex.value();
|
|
|
|
int32_t index = funcMetadataAndIndex.index();
|
|
|
|
|
|
|
|
// Name length.
|
|
|
|
updateDescriptorWithI32Attr(
|
|
|
|
builder.getI32IntegerAttr(funcMetadata.funcName().size()), {index, 0});
|
|
|
|
|
|
|
|
// Name chars.
|
|
|
|
auto funcNameArray = builder.create<LLVM::AddressOfOp>(
|
|
|
|
loc, globalsByName[funcMetadata.funcName()]);
|
2020-08-28 06:09:10 +08:00
|
|
|
auto funcNamePtr = builder.create<LLVM::GEPOp>(
|
|
|
|
loc, LLVMType::getInt8PtrTy(builder.getContext()), funcNameArray,
|
|
|
|
ValueRange({c0, c0}));
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
updateDescriptor(funcNamePtr, {index, 1});
|
|
|
|
|
|
|
|
// Function pointer.
|
|
|
|
//
|
|
|
|
// We create this reference to the original function (and use a dummy i8*
|
|
|
|
// type). We will fix this up after conversion to point at wrapper
|
|
|
|
// functions that satisfy the ABI requirements.
|
|
|
|
// The bitcast is required so that after conversion the inserted value is an
|
|
|
|
// i8* as expected by the descriptor struct.
|
|
|
|
auto funcAddress = builder.create<LLVM::AddressOfOp>(
|
2020-08-28 06:09:10 +08:00
|
|
|
loc, LLVMType::getInt8PtrTy(builder.getContext()),
|
|
|
|
funcMetadata.funcName());
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
auto typeErasedFuncAddress = builder.create<LLVM::BitcastOp>(
|
2020-08-28 06:09:10 +08:00
|
|
|
loc, LLVMType::getInt8PtrTy(builder.getContext()), funcAddress);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
updateDescriptor(typeErasedFuncAddress, {index, 2});
|
|
|
|
|
|
|
|
// Number of inputs.
|
|
|
|
updateDescriptorWithI32Attr(funcMetadata.numInputsAttr(), {index, 3});
|
|
|
|
|
|
|
|
// Number of outputs.
|
|
|
|
updateDescriptorWithI32Attr(funcMetadata.numOutputsAttr(), {index, 4});
|
|
|
|
}
|
|
|
|
|
|
|
|
builder.create<LLVM::ReturnOp>(loc, funcDescriptorArray);
|
|
|
|
|
|
|
|
return funcDescriptorArrayGlobal;
|
|
|
|
}
|
|
|
|
|
|
|
|
LLVM::GlobalOp createModuleDescriptor(LLVM::GlobalOp funcDescriptorArray,
|
|
|
|
OpBuilder &builder, Location loc) {
|
2020-08-28 06:09:10 +08:00
|
|
|
auto llvmI32Ty = LLVMType::getIntNTy(builder.getContext(), 32);
|
|
|
|
auto moduleDescriptorTy = getModuleDescriptorTy(builder.getContext());
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
// TODO: Ideally this symbol name would somehow be related to the module
|
|
|
|
// name, if we could consistently assume we had one.
|
|
|
|
// TODO: We prepend _mlir so that mlir::ExecutionEngine's lookup logic (which
|
|
|
|
// is typically only mean for function pointers) will find this raw symbol.
|
|
|
|
auto moduleDescriptorGlobal = builder.create<LLVM::GlobalOp>(
|
|
|
|
loc, moduleDescriptorTy, /*isConstant=*/true, LLVM::Linkage::External,
|
|
|
|
"_mlir___npcomp_module_descriptor",
|
|
|
|
/*value=*/Attribute());
|
|
|
|
OpBuilder::InsertionGuard guard(builder);
|
|
|
|
builder.createBlock(&moduleDescriptorGlobal.initializer());
|
|
|
|
|
|
|
|
Value moduleDescriptor =
|
|
|
|
builder.create<LLVM::UndefOp>(loc, moduleDescriptorTy);
|
|
|
|
|
|
|
|
auto updateDescriptor = [&](Value value,
|
|
|
|
std::initializer_list<int32_t> position) {
|
|
|
|
moduleDescriptor = builder.create<LLVM::InsertValueOp>(
|
|
|
|
loc, moduleDescriptor, value,
|
|
|
|
/*position=*/builder.getI32ArrayAttr(position));
|
|
|
|
};
|
|
|
|
|
|
|
|
updateDescriptor(
|
|
|
|
builder.create<LLVM::ConstantOp>(
|
|
|
|
loc, llvmI32Ty,
|
|
|
|
builder.getI32IntegerAttr(
|
|
|
|
funcDescriptorArray.getType().getArrayNumElements())),
|
|
|
|
{0});
|
2020-07-11 08:50:55 +08:00
|
|
|
|
|
|
|
auto funcDecriptorArrayAddress =
|
|
|
|
builder.create<LLVM::AddressOfOp>(loc, funcDescriptorArray);
|
|
|
|
auto rawFuncDescriptorPtr = builder.create<LLVM::BitcastOp>(
|
2020-08-28 06:09:10 +08:00
|
|
|
loc, getFuncDescriptorTy(builder.getContext()).getPointerTo(),
|
2020-07-11 08:50:55 +08:00
|
|
|
funcDecriptorArrayAddress);
|
|
|
|
updateDescriptor(rawFuncDescriptorPtr, {1});
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
builder.create<LLVM::ReturnOp>(loc, moduleDescriptor);
|
|
|
|
|
|
|
|
return moduleDescriptorGlobal;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class LowerModuleMetadata
|
2020-10-08 08:12:52 +08:00
|
|
|
: public OpConversionPattern<refbackrt::ModuleMetadataOp> {
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
public:
|
|
|
|
using OpConversionPattern::OpConversionPattern;
|
|
|
|
LogicalResult
|
2020-10-08 08:12:52 +08:00
|
|
|
matchAndRewrite(refbackrt::ModuleMetadataOp op, ArrayRef<Value> operands,
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
auto funcMetadatas =
|
2020-10-08 08:12:52 +08:00
|
|
|
llvm::to_vector<6>(op.metadatas().getOps<refbackrt::FuncMetadataOp>());
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
auto funcDescriptorArray =
|
|
|
|
createFuncDescriptorArray(funcMetadatas, rewriter, op.getLoc());
|
|
|
|
auto moduleDescriptor =
|
|
|
|
createModuleDescriptor(funcDescriptorArray, rewriter, op.getLoc());
|
|
|
|
|
|
|
|
// TODO: create get module descriptor wrapper (or upgrade
|
|
|
|
// mlir::ExecutionEngine to allow raw symbol lookup)
|
|
|
|
(void)moduleDescriptor;
|
|
|
|
|
|
|
|
rewriter.eraseOp(op);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Performs the calculation:
|
|
|
|
// ```
|
|
|
|
// ty *f(void **voidStarStar, int32_t i) {
|
|
|
|
// return reinterpret_cast<ty *>(voidStarStar[i]);
|
|
|
|
// }
|
|
|
|
// ```
|
|
|
|
static Value getTypedAddressFromVoidStarStar(Value voidStarStar, int32_t index,
|
|
|
|
LLVMType ty, OpBuilder &builder,
|
|
|
|
Location loc) {
|
|
|
|
Value ci = builder.create<LLVM::ConstantOp>(
|
2020-08-28 06:09:10 +08:00
|
|
|
loc, LLVMType::getIntNTy(builder.getContext(), 32),
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
builder.getI32IntegerAttr(index));
|
|
|
|
auto inputPtr = builder.create<LLVM::GEPOp>(
|
2020-08-28 06:09:10 +08:00
|
|
|
loc, LLVMType::getInt8PtrTy(builder.getContext()), voidStarStar,
|
|
|
|
ValueRange(ci));
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
return builder.create<LLVM::BitcastOp>(loc, ty.getPointerTo(), inputPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static SmallVector<Value, 6> loadCallArgs(Value inputsPtrPtr, LLVMType funcTy,
|
|
|
|
OpBuilder &builder, Location loc) {
|
|
|
|
SmallVector<Value, 6> callArgs;
|
|
|
|
// For each void* in the void**, cast it to the right type and load it.
|
|
|
|
for (int i = 0, e = funcTy.getFunctionNumParams(); i < e; i++) {
|
|
|
|
auto paramTy = funcTy.getFunctionParamType(i);
|
|
|
|
auto addr =
|
|
|
|
getTypedAddressFromVoidStarStar(inputsPtrPtr, i, paramTy, builder, loc);
|
|
|
|
callArgs.push_back(builder.create<LLVM::LoadOp>(loc, addr));
|
|
|
|
}
|
|
|
|
return callArgs;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Writes out the logical results of the wrapper function through the void**
|
|
|
|
// passed on the ABI boundary. Because LLVM (and hence llvm.func)
|
|
|
|
// only supports a single return type (or void/no results), the logic here needs
|
|
|
|
// to be aware of the convention used in the Std to LLVM conversion to map
|
|
|
|
// multiple return types. The details of this are in the function
|
|
|
|
// packFunctionResults and its callers:
|
|
|
|
// https://github.com/llvm/llvm-project/blob/fad9cba8f58ba9979f390a49cf174ec9fcec29a6/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp#L282
|
|
|
|
static void storeWrapperResults(LLVM::CallOp callToWrapped, Value resultsPtrPtr,
|
|
|
|
OpBuilder &builder, Location loc) {
|
|
|
|
// 0 results. Nothing to do.
|
|
|
|
if (callToWrapped.getNumResults() == 0)
|
|
|
|
return;
|
|
|
|
Value result = callToWrapped.getResult(0);
|
|
|
|
auto ty = result.getType().cast<LLVMType>();
|
|
|
|
// 1 logical result.
|
|
|
|
if (!ty.isStructTy()) {
|
|
|
|
Value addr =
|
|
|
|
getTypedAddressFromVoidStarStar(resultsPtrPtr, 0, ty, builder, loc);
|
|
|
|
builder.create<LLVM::StoreOp>(loc, result, addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// >=2 logical results.
|
|
|
|
for (int i = 0, e = ty.getStructNumElements(); i < e; i++) {
|
|
|
|
auto elementTy = ty.getStructElementType(i);
|
|
|
|
Value addr = getTypedAddressFromVoidStarStar(resultsPtrPtr, i, elementTy,
|
|
|
|
builder, loc);
|
|
|
|
int32_t i32I = i;
|
|
|
|
Value value = builder.create<LLVM::ExtractValueOp>(
|
|
|
|
loc, elementTy, result, builder.getI32ArrayAttr({i32I}));
|
|
|
|
builder.create<LLVM::StoreOp>(loc, value, addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a wrapper function.
|
|
|
|
// For an externally visible function f(T1, T2) -> T3, T4, we create a
|
|
|
|
// wrapper
|
2020-10-08 08:12:52 +08:00
|
|
|
// __refbackrt_wrapper_f(void **inputs, void ** outputs) {
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
// T3 t3;
|
|
|
|
// T4 t4;
|
|
|
|
// (t3, t4) = f(*cast<T1*>(inputs[0]), *cast<T2*>(inputs[1]));
|
|
|
|
// *cast<T3*>(outputs[0]) = t3;
|
|
|
|
// *cast<T4*>(outputs[1]) = t4;
|
|
|
|
// }
|
|
|
|
// This is very similar to MLIR's "packed" convention, but supporting
|
|
|
|
// outputs.
|
|
|
|
// TODO: Extend MLIR's void** wrappers to have outputs in this way.
|
|
|
|
static LLVMFuncOp createWrapperFunc(LLVMFuncOp func) {
|
2020-08-28 06:09:10 +08:00
|
|
|
auto *context = func.getContext();
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
LLVMType funcTy = func.getType();
|
2020-08-28 06:09:10 +08:00
|
|
|
auto voidStarTy = LLVMType::getInt8PtrTy(context);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
auto voidStarStarTy = voidStarTy.getPointerTo();
|
2020-08-28 06:09:10 +08:00
|
|
|
auto wrapperTy = LLVMType::getFunctionTy(LLVMType::getVoidTy(context),
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
{voidStarStarTy, voidStarStarTy},
|
|
|
|
/*isVarArg=*/false);
|
2020-10-08 08:12:52 +08:00
|
|
|
constexpr char kRefbackrtWrapperPrefix[] = "__refbackrt_wrapper_";
|
|
|
|
auto wrapperName = (Twine(kRefbackrtWrapperPrefix) + func.getName()).str();
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
OpBuilder moduleBuilder(func.getParentRegion());
|
|
|
|
LLVMFuncOp wrapper = moduleBuilder.create<LLVMFuncOp>(
|
|
|
|
func.getLoc(), wrapperName, wrapperTy, LLVM::Linkage::External);
|
|
|
|
|
|
|
|
// Create the function body.
|
|
|
|
Block &body = *wrapper.addEntryBlock();
|
|
|
|
auto builder = OpBuilder::atBlockBegin(&body);
|
|
|
|
auto callArgs =
|
|
|
|
loadCallArgs(body.getArgument(0), funcTy, builder, func.getLoc());
|
|
|
|
auto call = builder.create<LLVM::CallOp>(func.getLoc(), func, callArgs);
|
|
|
|
storeWrapperResults(call, body.getArgument(1), builder, func.getLoc());
|
|
|
|
builder.create<LLVM::ReturnOp>(func.getLoc(), ValueRange());
|
|
|
|
return wrapper;
|
2020-05-21 09:48:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class LowerToLLVM : public LowerToLLVMBase<LowerToLLVM> {
|
2020-09-10 15:23:46 +08:00
|
|
|
void getDependentDialects(DialectRegistry ®istry) const override {
|
|
|
|
registry.insert<LLVM::LLVMDialect>();
|
|
|
|
}
|
|
|
|
|
|
|
|
void runOnOperation() override {
|
2020-05-21 09:48:53 +08:00
|
|
|
auto module = getOperation();
|
|
|
|
auto *context = &getContext();
|
|
|
|
|
|
|
|
LLVMTypeConverter converter(context);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
|
2020-10-08 08:12:52 +08:00
|
|
|
// refbackrt::TensorType is passed as a `void*` in the ABI.
|
|
|
|
converter.addConversion([&](refbackrt::TensorType type) {
|
2020-08-28 06:09:10 +08:00
|
|
|
return LLVMType::getInt8PtrTy(context);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
});
|
|
|
|
|
2020-05-21 09:48:53 +08:00
|
|
|
OwningRewritePatternList patterns;
|
|
|
|
LLVMConversionTarget target(*context);
|
|
|
|
target.addDynamicallyLegalOp<FuncOp>(
|
|
|
|
[&](FuncOp op) { return converter.isSignatureLegal(op.getType()); });
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
populateCompilerRuntimePatterns(module, patterns, converter);
|
2020-05-21 09:48:53 +08:00
|
|
|
target.addLegalOp<ModuleOp, ModuleTerminatorOp>();
|
|
|
|
populateStdToLLVMConversionPatterns(converter, patterns);
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
patterns.insert<LowerModuleMetadata>(context);
|
2020-05-21 09:48:53 +08:00
|
|
|
|
2020-09-25 08:14:21 +08:00
|
|
|
// TODO: Move these "std to std" legalizations to their own pass if we grow
|
|
|
|
// lots of these patterns.
|
|
|
|
populateExpandTanhPattern(patterns, context);
|
|
|
|
|
2020-10-30 06:25:55 +08:00
|
|
|
if (failed(applyFullConversion(module, target, std::move(patterns)))) {
|
2020-05-21 09:48:53 +08:00
|
|
|
return signalPassFailure();
|
|
|
|
}
|
Rework e2e flow to use new "npcomprt"
This ~totally reworks the existing "runtime" stuff to be more
principled and usable, such as from Python. It's still not fully
production-quality, mainly in the department of memory management (e.g.
it currently leaks memory; we need to figure out "who frees memrefs" +
the analysis and transformation needed to do that (maybe use upstream
buffer allocation pass?)).
The user API is in include/npcomp/runtime/UserAPI.h, though
include/npcomp/JITRuntime/JITModule.h is a friendlier wrapper.
The stuff under {include,lib}/runtime is totally firewalled from the
compiler and tiny (<6kB, though no attention has gone into optimizing
that size). For example, we don't link in libSupport into the runtime,
instead having our own bare bones replacements for basics like ArrayRef
(the JITRuntime helps with bridging that gap, since it *can* depend on
all common LLVM utilities).
The overall features of npcomprt is that it exposes a module that
with multiple function entry points. Each function has arguments and
results that are tensor-valued, and npcomprt::Tensor is the runtime type
that is used to interact with that (and a npcomprt::Ref<T>
reference-counting wrapper is provided to wrap npcomprt::Tensor in the
common case).
From an implementation perspective, an npcomprt module at the
LLVM/object/binary level exposes a single module descriptor struct that
has pointers to other metadata (currently just a list of function
metadata descriptors). All interactions with the npcomp runtime are
keyed off of that module descriptor, including function lookups and
dispatching. This is done to dodge platform ABI issues and also allow
enough reflection to e.g. verify provided arguments.
Most of the compiler-side work here was in LowerToNpcomprtABI and
LowerToLLVM.
Also,
- Rename npcomp_rt/NpcompRt to npcomprt/Npcomprt; it was getting
annoying to type the underscores/caps.
- misc improvements to bash_helpers.sh
2020-07-09 08:15:40 +08:00
|
|
|
// Rewrite llvm.mlir.addressof ops that reference the original exported
|
|
|
|
// functions from the module to instead refer to wrapper functions.
|
|
|
|
// These wrapper functions have a fixed ABI
|
|
|
|
// (`void f(void **inputs, void **results)`) which we can interface to from
|
|
|
|
// external code without dealing with platform-dependent
|
|
|
|
// register-level calling conventions. We embed enough information in the
|
|
|
|
// module metadata to make sure that calling code can e.g. preallocate
|
|
|
|
// enough outputs and with the right types to safely funnel through this
|
|
|
|
// convention.
|
|
|
|
module.walk([&](LLVM::AddressOfOp op) {
|
|
|
|
auto originalFunc =
|
|
|
|
module.lookupSymbol<LLVM::LLVMFuncOp>(op.global_name());
|
|
|
|
if (!originalFunc)
|
|
|
|
return;
|
|
|
|
auto wrapper = createWrapperFunc(originalFunc);
|
|
|
|
op.getResult().setType(wrapper.getType().getPointerTo());
|
|
|
|
Builder builder(op.getContext());
|
|
|
|
op.setAttr("global_name", builder.getSymbolRefAttr(wrapper.getName()));
|
|
|
|
});
|
2020-05-21 09:48:53 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<ModuleOp>> mlir::NPCOMP::createLowerToLLVMPass() {
|
|
|
|
return std::make_unique<LowerToLLVM>();
|
|
|
|
}
|