Bump llvm-project to c68d2895a1f4019b387c69d1e5eec31b0eb5e7b0

- dialect registration
- StringAttr::get: order of context arg
- math dialect
- LogicalResult nodiscard
- error message for invalid broadcast
pull/168/head
Sean Silva 2021-02-22 12:08:17 -08:00
parent 8486968925
commit c424c24ed8
19 changed files with 42 additions and 27 deletions

View File

@ -21,6 +21,7 @@ cd $td/build
ninja
ninja check-npcomp
ninja check-frontends-pytorch
echo
echo "========"

@ -1 +1 @@
Subproject commit 46e764a628da81795af3f64bd28970b7bd4115d6
Subproject commit c68d2895a1f4019b387c69d1e5eec31b0eb5e7b0

View File

@ -14,9 +14,11 @@
#include "npcomp/InitAll.h"
void npcompRegisterAllDialects(MlirContext context) {
mlir::NPCOMP::registerAllDialects(unwrap(context)->getDialectRegistry());
mlir::DialectRegistry registry;
mlir::NPCOMP::registerAllDialects(registry);
unwrap(context)->appendDialectRegistry(registry);
// TODO: Don't eagerly load once D88162 is in and clients can do this.
unwrap(context)->getDialectRegistry().loadAll(unwrap(context));
unwrap(context)->loadAllAvailableDialects();
}
void npcompRegisterAllPasses() {

View File

@ -123,7 +123,7 @@ MlirType npcompSlotObjectTypeGet(MlirContext context, MlirStringRef className,
intptr_t slotTypeCount,
const MlirType *slotTypes) {
MLIRContext *cppContext = unwrap(context);
auto classNameAttr = StringAttr::get(unwrap(className), cppContext);
auto classNameAttr = StringAttr::get(cppContext, unwrap(className));
SmallVector<Type> slotTypesCpp;
slotTypesCpp.resize(slotTypeCount);
for (intptr_t i = 0; i < slotTypeCount; ++i) {

View File

@ -9,6 +9,7 @@
#include "npcomp/Conversion/TCFToStd/TCFToStd.h"
#include "../PassDetail.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Dialect/Traits.h"
@ -108,9 +109,9 @@ public:
static LogicalResult
matchAndRewriteUnaryElementwise(Operation *op, PatternRewriter &rewriter) {
if (isa<tcf::ExpOp>(op)) {
rewriter.replaceOpWithNewOp<ExpOp>(op, op->getOperand(0));
rewriter.replaceOpWithNewOp<math::ExpOp>(op, op->getOperand(0));
} else if (isa<tcf::TanhOp>(op)) {
rewriter.replaceOpWithNewOp<TanhOp>(op, op->getOperand(0));
rewriter.replaceOpWithNewOp<math::TanhOp>(op, op->getOperand(0));
} else {
op->dump();
llvm::report_fatal_error(

View File

@ -81,7 +81,7 @@ public:
LLVM_DEBUG(llvm::dbgs()
<< "generated layer_name: '" << layerName << "'\n");
auto attr = StringAttr::get(layerName, module.getContext());
auto attr = StringAttr::get(module.getContext(), layerName);
op->setAttr(StringRef("layer_name"), attr);
});
}

View File

@ -114,7 +114,7 @@ Type BasicpyDialect::parseType(DialectAsmParser &parser) const {
}
if (parser.parseGreater())
return Type();
return SlotObjectType::get(StringAttr::get(className, getContext()),
return SlotObjectType::get(StringAttr::get(getContext(), className),
slotTypes);
}
if (keyword == "StrType")

View File

@ -441,7 +441,7 @@ public:
TypeEquations equations;
TypeEquationPopulator p(equations);
p.runOnFunction(func);
(void)p.runOnFunction(func);
LLVM_DEBUG(equations.report(llvm::dbgs()));
TypeUnifier unifier;

View File

@ -182,8 +182,9 @@ ObjectGraphGlobalizer::recursivelyTraverseClassType(ClassTypeOp classType) {
for (auto attr : classType.getOps<AttrOp>()) {
nameStack.push_back(attr.name().str());
if (auto type = attr.type().dyn_cast<NnModuleType>()) {
recursivelyTraverseClassType(
symbolTable.lookup<ClassTypeOp>(type.getClassName()));
if (failed(recursivelyTraverseClassType(
symbolTable.lookup<ClassTypeOp>(type.getClassName()))))
return failure();
} else {
auto linkageName = llvm::join(nameStack, ".");
auto globalSlot = globalBuilder.create<GlobalSlotOp>(
@ -200,7 +201,8 @@ ObjectGraphGlobalizer::recursivelyTraverseClassType(ClassTypeOp classType) {
auto linkageName = llvm::join(nameStack, ".");
nameStack.pop_back();
if (!methodLinkageNames.insert({method.function(), linkageName}).second)
method.emitError() << "unbound function shared by multiple methods";
return method.emitError()
<< "unbound function shared by multiple methods";
}
return success();
}

View File

@ -8,6 +8,8 @@
#include "npcomp/InitAll.h"
#include "mlir/IR/Dialect.h"
#include "npcomp/Conversion/Passes.h"
#include "npcomp/Dialect/ATen/IR/ATenDialect.h"
#include "npcomp/Dialect/ATen/Transforms/Passes.h"
#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h"
@ -22,10 +24,8 @@
#include "npcomp/Dialect/TCP/Transforms/Passes.h"
#include "npcomp/Dialect/Torch/IR/TorchDialect.h"
#include "npcomp/Dialect/Torch/Transforms/Passes.h"
#include "npcomp/Typing/Transforms/Passes.h"
#include "npcomp/Conversion/Passes.h"
#include "npcomp/RefBackend/RefBackend.h"
#include "npcomp/Typing/Transforms/Passes.h"
void mlir::NPCOMP::registerAllDialects(mlir::DialectRegistry &registry) {
// clang-format off

View File

@ -9,6 +9,7 @@
#include "npcomp/RefBackend/JITHelpers/JITModule.h"
#include "mlir/ExecutionEngine/CRunnerUtils.h"
#include "mlir/ExecutionEngine/OptUtils.h"
#include "mlir/Target/LLVMIR.h"
#include "npcomp/RefBackend/RefBackend.h"
using namespace refback;
@ -36,6 +37,9 @@ void JITModule::buildBackendCompilationPipeline(PassManager &pm,
llvm::Expected<std::unique_ptr<JITModule>>
JITModule::fromCompiledModule(mlir::ModuleOp module,
llvm::ArrayRef<llvm::StringRef> sharedLibs) {
// Ensure LLVM Dialect -> LLVM IR translations are available.
mlir::registerLLVMDialectTranslation(*module->getContext());
// Build the JITModule.
auto expectedEngine = ExecutionEngine::create(
module, /*llvmModuleBuilder=*/nullptr,
/*transformer=*/[](llvm::Module *) { return Error::success(); },

View File

@ -12,6 +12,7 @@
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Math/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Transforms/DialectConversion.h"

View File

@ -179,7 +179,7 @@ struct RestrictedCanonicalizer
op->getCanonicalizationPatterns(patterns, context);
Operation *op = getOperation();
applyPatternsAndFoldGreedily(op->getRegions(), std::move(patterns));
(void)applyPatternsAndFoldGreedily(op->getRegions(), std::move(patterns));
}
};
} // end anonymous namespace

View File

@ -82,11 +82,12 @@ void PropagationWorklist::propagateTransitivity() {
ValueType *
GreedyTypeNodeVarResolver::unionCandidateTypes(const ValueTypeSet &candidates) {
if (candidates.empty()) {
mlir::emitOptionalError(loc, "no candidate types were identified");
(void)mlir::emitOptionalError(loc, "no candidate types were identified");
return nullptr;
}
if (candidates.size() != 1) {
mlir::emitOptionalError(loc, "ambiguous candidate types were identified");
(void)mlir::emitOptionalError(loc,
"ambiguous candidate types were identified");
return nullptr;
}

View File

@ -73,7 +73,8 @@ mlir::Type TypeNode::constructIrType(Context &context,
const TypeVarMap &mapping,
MLIRContext *mlirContext,
llvm::Optional<Location> loc) {
mlir::emitOptionalError(loc, "base class cannot construct concrete types");
(void)mlir::emitOptionalError(loc,
"base class cannot construct concrete types");
return {};
}

View File

@ -242,7 +242,7 @@ public:
auto &env = cpaContext.getCurrentEnvironment();
InitialConstraintGenerator p(env);
p.runOnFunction(func);
(void)p.runOnFunction(func);
CPA::PropagationWorklist prop(env);
do {

View File

@ -2,7 +2,7 @@
// CHECK-LABEL: func @unary_ops(
// CHECK-SAME: %[[ARG:.*]]: tensor<?xf32>) -> tensor<?xf32> {
// CHECK: %[[RET:.*]] = exp %[[ARG]] : tensor<?xf32>
// CHECK: %[[RET:.*]] = math.exp %[[ARG]] : tensor<?xf32>
// CHECK: return %[[RET]] : tensor<?xf32>
// CHECK: }
func @unary_ops(%arg0: tensor<?xf32>) -> tensor<?xf32> {

View File

@ -5,7 +5,7 @@
// RUN: -shared-libs=%npcomp_runtime_shlib 2>&1 \
// RUN: | FileCheck %s
// CHECK: NPCOMP: aborting: invalid broadcast
// CHECK: NPCOMP: aborting: required broadcastable shapes
func @invalid_broadcast(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%0 = tcf.add %arg0, %arg1 : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>

View File

@ -11,16 +11,17 @@
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/InitLLVM.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/AsmState.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllPasses.h"
#include "mlir/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Target/LLVMIR.h"
#include "npcomp-c/InitLLVM.h"
#include "npcomp/InitAll.h"
#include "npcomp/RefBackend/JITHelpers/JITModule.h"
#include "llvm/Support/InitLLVM.h"
using namespace mlir;
using llvm::Error;
@ -119,11 +120,9 @@ static void printOutputs(ArrayRef<refbackrt::Ref<refbackrt::Tensor>> outputs,
}
}
Error compileAndRun(std::string mlirFile, mlir::DialectRegistry &registry,
Error compileAndRun(std::string mlirFile, mlir::MLIRContext &context,
std::string invokeFunction, ArrayRef<StringRef> argValues,
ArrayRef<StringRef> sharedLibs, bool optimize) {
MLIRContext context;
registry.loadAll(&context);
OwningModuleRef moduleRef = parseSourceFile(mlirFile, &context);
if (!moduleRef)
return make_string_error(Twine("could not open ") + mlirFile);
@ -186,6 +185,9 @@ int main(int argc, char **argv) {
mlir::registerAllPasses();
mlir::NPCOMP::registerAllDialects(registry);
mlir::NPCOMP::registerAllPasses();
MLIRContext context;
context.appendDialectRegistry(registry);
context.loadAllAvailableDialects();
llvm::InitLLVM y(argc, argv);
npcompInitializeLLVMCodegen();
@ -200,7 +202,7 @@ int main(int argc, char **argv) {
SmallVector<StringRef, 6> argValues(options.argValues.begin(),
options.argValues.end());
Error error =
compileAndRun(options.inputFile, registry, options.invokeFunction,
compileAndRun(options.inputFile, context, options.invokeFunction,
argValues, sharedLibs, options.optimize);
int exitCode = EXIT_SUCCESS;