2020-04-27 08:55:15 +08:00
|
|
|
//===- npcomp-opt.cpp -------------------------------------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-05-16 05:26:53 +08:00
|
|
|
#include "mlir/IR/AsmState.h"
|
2020-04-27 08:55:15 +08:00
|
|
|
#include "mlir/IR/Dialect.h"
|
|
|
|
#include "mlir/IR/MLIRContext.h"
|
|
|
|
#include "mlir/InitAllDialects.h"
|
|
|
|
#include "mlir/InitAllPasses.h"
|
|
|
|
#include "mlir/Pass/Pass.h"
|
|
|
|
#include "mlir/Pass/PassManager.h"
|
|
|
|
#include "mlir/Support/FileUtilities.h"
|
|
|
|
#include "mlir/Support/MlirOptMain.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/InitLLVM.h"
|
|
|
|
#include "llvm/Support/SourceMgr.h"
|
|
|
|
#include "llvm/Support/ToolOutputFile.h"
|
|
|
|
|
2020-05-22 07:35:53 +08:00
|
|
|
#include "npcomp/InitAll.h"
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
#include "torch-mlir/InitAll.h"
|
2020-04-27 08:55:15 +08:00
|
|
|
|
|
|
|
static llvm::cl::opt<std::string> inputFilename(llvm::cl::Positional,
|
|
|
|
llvm::cl::desc("<input file>"),
|
|
|
|
llvm::cl::init("-"));
|
|
|
|
|
|
|
|
static llvm::cl::opt<std::string>
|
|
|
|
outputFilename("o", llvm::cl::desc("Output filename"),
|
|
|
|
llvm::cl::value_desc("filename"), llvm::cl::init("-"));
|
|
|
|
|
|
|
|
static llvm::cl::opt<bool> splitInputFile(
|
|
|
|
"split-input-file",
|
|
|
|
llvm::cl::desc("Split the input file into pieces and process each "
|
|
|
|
"chunk independently"),
|
|
|
|
llvm::cl::init(false));
|
|
|
|
|
|
|
|
static llvm::cl::opt<bool> verifyDiagnostics(
|
|
|
|
"verify-diagnostics",
|
|
|
|
llvm::cl::desc("Check that emitted diagnostics match "
|
|
|
|
"expected-* lines on the corresponding line"),
|
|
|
|
llvm::cl::init(false));
|
|
|
|
|
|
|
|
static llvm::cl::opt<bool> verifyPasses(
|
|
|
|
"verify-each",
|
|
|
|
llvm::cl::desc("Run the verifier after each transformation pass"),
|
|
|
|
llvm::cl::init(true));
|
|
|
|
|
|
|
|
static llvm::cl::opt<bool> allowUnregisteredDialects(
|
|
|
|
"allow-unregistered-dialect",
|
|
|
|
llvm::cl::desc("Allow operation with no registered dialects"),
|
|
|
|
llvm::cl::init(false));
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
2020-08-28 06:09:10 +08:00
|
|
|
mlir::DialectRegistry registry;
|
|
|
|
|
2020-05-16 05:26:53 +08:00
|
|
|
mlir::registerAsmPrinterCLOptions();
|
2020-08-02 07:06:58 +08:00
|
|
|
mlir::registerMLIRContextCLOptions();
|
2020-05-16 05:26:53 +08:00
|
|
|
|
2020-08-28 06:09:10 +08:00
|
|
|
mlir::registerAllDialects(registry);
|
2020-04-27 08:55:15 +08:00
|
|
|
mlir::registerAllPasses();
|
|
|
|
|
2020-08-28 06:09:10 +08:00
|
|
|
mlir::NPCOMP::registerAllDialects(registry);
|
2020-05-22 07:35:53 +08:00
|
|
|
mlir::NPCOMP::registerAllPasses();
|
2020-04-27 08:55:15 +08:00
|
|
|
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
mlir::torch::registerAllDialects(registry);
|
|
|
|
mlir::torch::registerAllPasses();
|
|
|
|
|
2020-04-27 08:55:15 +08:00
|
|
|
llvm::InitLLVM y(argc, argv);
|
|
|
|
|
|
|
|
// Register any pass manager command line options.
|
|
|
|
mlir::registerPassManagerCLOptions();
|
|
|
|
mlir::PassPipelineCLParser passPipeline("", "Compiler passes to run");
|
|
|
|
|
|
|
|
// Parse pass names in main to ensure static initialization completed.
|
|
|
|
llvm::cl::ParseCommandLineOptions(argc, argv,
|
|
|
|
"MLIR modular optimizer driver\n");
|
|
|
|
|
|
|
|
// Set up the input file.
|
|
|
|
std::string errorMessage;
|
|
|
|
auto file = mlir::openInputFile(inputFilename, &errorMessage);
|
|
|
|
if (!file) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto output = mlir::openOutputFile(outputFilename, &errorMessage);
|
|
|
|
if (!output) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2020-08-28 06:09:10 +08:00
|
|
|
if (failed(MlirOptMain(output->os(), std::move(file), passPipeline, registry,
|
2020-04-27 08:55:15 +08:00
|
|
|
splitInputFile, verifyDiagnostics, verifyPasses,
|
|
|
|
allowUnregisteredDialects))) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
// Keep the output file if the invocation of MlirOptMain was successful.
|
|
|
|
output->keep();
|
|
|
|
return 0;
|
|
|
|
}
|