2021-06-19 03:40:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2021-09-30 00:03:40 +08:00
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
2021-06-19 03:40:40 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-09-24 11:59:12 +08:00
|
|
|
#include "torch-mlir/Conversion/TorchToSCF/TorchToSCF.h"
|
2021-06-19 03:40:40 +08:00
|
|
|
|
|
|
|
#include "../PassDetail.h"
|
2022-10-05 21:28:06 +08:00
|
|
|
#include "mlir/Dialect/Arith/IR/Arith.h"
|
2022-06-23 11:23:46 +08:00
|
|
|
#include "mlir/Dialect/SCF/IR/SCF.h"
|
2022-03-31 22:24:44 +08:00
|
|
|
#include "mlir/IR/BuiltinTypes.h"
|
2021-06-19 03:40:40 +08:00
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
2022-03-31 22:24:44 +08:00
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchTypes.h"
|
2021-09-24 11:59:12 +08:00
|
|
|
#include "torch-mlir/Dialect/TorchConversion/IR/TorchConversionDialect.h"
|
|
|
|
#include "torch-mlir/Dialect/TorchConversion/Transforms/BackendTypeConversion.h"
|
2021-06-19 03:40:40 +08:00
|
|
|
|
|
|
|
using namespace mlir;
|
2021-09-24 11:59:12 +08:00
|
|
|
using namespace mlir::torch;
|
[torch-mlir earthmoving (1/N)] C/C++ code movement.
This creates the `external/torch-mlir` directory as an
LLVM_EXTERNAL_PROJECTS-compatible project (analogous to
`iree-dialects`) and completes movement/rename of all pure MLIR C/C++
compiler code into there. The next step will be to move all the Python
code / code that links/includes PyTorch C++ code (which currently lives
in `frontends/pytorch`) into a subdirectory here.
I call this "earthmoving" because it is mostly mechanical changes and
renames. As a quick summary (we can change this down the road easily)
- C++ `mlir::NPCOMP::Torch -> mlir::torch::Torch`
- CAPI `npcompTorchListTypeGet -> torchMlirTorchListTypeGet`
- preprocessor `#ifndef NPCOMP_ -> #ifndef TORCHMLIR_`
- CMake `NPCOMPFoo -> TorchMLIRFoo`
The goal of this is to create a standalone project creating a center of
mass for entry into the MLIR ecosystem from PyTorch, suitable in scope
for eventual inclusion/ownership in PyTorch. The idea is that
`external/torch-mlir` will some day be pulled out into its own
repository, and then npcomp will simply pull it in as a submodule.
Layering-wise, what lives in `torch-mlir` lowers code from PyTorch
(currently TorchScript, but TorchFX or pytorch/xla-style tracing are
possible extensions) down to what we have been calling the "Torch
backend contract" which is cleaned up IR (inlining, simplifcation,
conversion to value tensors, ...) entirely in the `torch` dialect. This
is the branching off point for further lowering, of which npcomp takes
one opinion (outside `torch-mlir` of course!), namely the
`TorchConversion` dialect/transforms which lower to IR suitable for IREE
and other linalg-on-tensors based lower-level compilers.
Summary of changes:
- move `{include,lib,test}/Dialect/Torch` into `torch-mlir`
- move relevant parts of CAPI into `torch-mlir`.
- leave a few things related to the `torch-mlir` Python build commented
out, which should be resolved in a subsequent change.
2021-09-10 03:24:10 +08:00
|
|
|
using namespace mlir::torch::Torch;
|
2021-06-19 03:40:40 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
class ConvertTorchPrimIfYieldOp : public OpConversionPattern<PrimIfYieldOp> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<PrimIfYieldOp>::OpConversionPattern;
|
|
|
|
LogicalResult
|
2021-11-16 07:00:53 +08:00
|
|
|
matchAndRewrite(PrimIfYieldOp op, OpAdaptor adaptor,
|
2021-06-19 03:40:40 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
2021-11-16 07:00:53 +08:00
|
|
|
rewriter.replaceOpWithNewOp<scf::YieldOp>(op, adaptor.getOperands());
|
2021-06-19 03:40:40 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class ConvertTorchPrimIfOp : public OpConversionPattern<PrimIfOp> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<PrimIfOp>::OpConversionPattern;
|
|
|
|
LogicalResult
|
2021-11-16 07:00:53 +08:00
|
|
|
matchAndRewrite(PrimIfOp op, OpAdaptor adaptor,
|
2021-06-19 03:40:40 +08:00
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
SmallVector<Type, 1> newResultTypes;
|
|
|
|
if (failed(getTypeConverter()->convertTypes(op.getResultTypes(),
|
|
|
|
newResultTypes)))
|
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"could not convert PrimIfOp outputs");
|
2021-11-16 07:00:53 +08:00
|
|
|
auto scfIf = rewriter.create<scf::IfOp>(op->getLoc(), newResultTypes,
|
2022-12-08 04:20:41 +08:00
|
|
|
adaptor.getCondition(),
|
2021-11-16 07:00:53 +08:00
|
|
|
/*withElseRegion=*/true);
|
2021-06-19 03:40:40 +08:00
|
|
|
auto inlineIfCase = [&](Region &srcRegion, Region &dstRegion) {
|
|
|
|
rewriter.inlineRegionBefore(srcRegion, dstRegion, dstRegion.begin());
|
|
|
|
rewriter.eraseBlock(&dstRegion.back());
|
|
|
|
};
|
2022-12-08 04:20:41 +08:00
|
|
|
inlineIfCase(op.getThenRegion(), scfIf.getThenRegion());
|
|
|
|
inlineIfCase(op.getElseRegion(), scfIf.getElseRegion());
|
2021-06-19 03:40:40 +08:00
|
|
|
rewriter.replaceOp(op, scfIf.getResults());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-03-31 22:24:44 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Converts the Torch::PrimLoopOp which is ``While-like`` into scf::WhileOp.
|
|
|
|
class ConvertTorchPrimLoopWhileLikeOp : public OpConversionPattern<PrimLoopOp> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<PrimLoopOp>::OpConversionPattern;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(PrimLoopOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
// Return failure on for-like loops.
|
|
|
|
if (op.isForLike())
|
|
|
|
return failure();
|
|
|
|
|
2023-08-16 00:53:28 +08:00
|
|
|
const TypeConverter *typeConverter = getTypeConverter();
|
2022-03-31 22:24:44 +08:00
|
|
|
SmallVector<Type, 1> newResultTypes;
|
|
|
|
if (failed(
|
|
|
|
typeConverter->convertTypes(op.getResultTypes(), newResultTypes)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "could not convert PrimLoopOp outputs");
|
|
|
|
|
|
|
|
// Create scf.while operation using the operands of torch::primloop. The
|
|
|
|
// first argument of the primloop correspond to `maxTripCount` which
|
|
|
|
// can be omitted in the `scf.while` operation.
|
2022-12-08 04:20:41 +08:00
|
|
|
Value condition = adaptor.getInitialCondition();
|
|
|
|
ValueRange iterArgsInit = adaptor.getIterArgsInit();
|
2022-03-31 22:24:44 +08:00
|
|
|
SmallVector<Value> scfWhileOpOperands{condition};
|
|
|
|
scfWhileOpOperands.append(iterArgsInit.begin(), iterArgsInit.end());
|
|
|
|
auto scfWhileOp = rewriter.create<scf::WhileOp>(
|
|
|
|
op->getLoc(), newResultTypes, scfWhileOpOperands);
|
|
|
|
|
|
|
|
// Populate the before region of the scf.while operation. The `before`
|
|
|
|
// region will have only one block and the arguments of the block must match
|
|
|
|
// the arguments of `scf.while` operation.
|
|
|
|
SmallVector<Type> beforeRegionArgTypes;
|
|
|
|
SmallVector<Location> beforeRegionArgLocs;
|
|
|
|
for (Value value : scfWhileOp->getOperands()) {
|
|
|
|
beforeRegionArgTypes.push_back(value.getType());
|
|
|
|
beforeRegionArgLocs.push_back(value.getLoc());
|
|
|
|
}
|
|
|
|
auto *beforeBlock = rewriter.createBlock(
|
|
|
|
&scfWhileOp.getBefore(), scfWhileOp.getBefore().begin(),
|
|
|
|
beforeRegionArgTypes, beforeRegionArgLocs);
|
|
|
|
|
|
|
|
rewriter.setInsertionPointToEnd(beforeBlock);
|
|
|
|
// Fetch the condition passed as the iter argument. Pass rest of the
|
|
|
|
// arguments to the after block.
|
|
|
|
auto scfConditionOp = rewriter.create<scf::ConditionOp>(
|
|
|
|
op.getLoc(), beforeBlock->getArgument(0),
|
|
|
|
beforeBlock->getArguments().drop_front());
|
|
|
|
|
|
|
|
// Populate the after region.
|
|
|
|
if (!scfWhileOp.getAfter().empty())
|
|
|
|
rewriter.eraseBlock(&scfWhileOp.getAfter().back());
|
|
|
|
|
|
|
|
SmallVector<Type> afterRegionArgTypes;
|
|
|
|
SmallVector<Location> afterRegionArgLocs;
|
|
|
|
for (Value value : scfConditionOp.getArgs()) {
|
|
|
|
afterRegionArgTypes.push_back(value.getType());
|
|
|
|
afterRegionArgLocs.push_back(value.getLoc());
|
|
|
|
}
|
|
|
|
auto *afterBlock = rewriter.createBlock(
|
|
|
|
&scfWhileOp.getAfter(), scfWhileOp.getAfter().begin(),
|
|
|
|
afterRegionArgTypes, afterRegionArgLocs);
|
|
|
|
|
|
|
|
// Rewrite uses of the torch loop block arguments to the new while-loop
|
|
|
|
// "after" arguments. Leave the induction variable of prim loop(first
|
|
|
|
// argument) because while like prim loops does not use the induction
|
|
|
|
// variable.
|
|
|
|
for (const auto &barg :
|
2022-12-08 04:20:41 +08:00
|
|
|
enumerate(op.getRegion().front().getArguments().drop_front())) {
|
2022-03-31 22:24:44 +08:00
|
|
|
Value to = afterBlock->getArgument(barg.index());
|
|
|
|
Type targetType = to.getType();
|
|
|
|
Value torchArg = to;
|
|
|
|
|
|
|
|
// If the target type is non-torch type, then use TypeConverter to convert
|
|
|
|
// the type of the source.
|
2024-04-11 21:47:35 +08:00
|
|
|
if (isa<mlir::FloatType>(targetType)) {
|
2022-03-31 22:24:44 +08:00
|
|
|
targetType = Torch::FloatType::get(op->getContext());
|
|
|
|
torchArg = typeConverter->materializeSourceConversion(
|
|
|
|
rewriter, scfWhileOp.getLoc(), targetType, {to});
|
2024-04-11 21:47:35 +08:00
|
|
|
} else if (isa<mlir::IntegerType>(targetType)) {
|
2022-03-31 22:24:44 +08:00
|
|
|
unsigned bitWidth = targetType.getIntOrFloatBitWidth();
|
|
|
|
if (bitWidth == 1)
|
|
|
|
targetType = Torch::BoolType::get(op->getContext());
|
|
|
|
else
|
|
|
|
targetType = Torch::IntType::get(op->getContext());
|
|
|
|
torchArg = typeConverter->materializeSourceConversion(
|
|
|
|
rewriter, scfWhileOp.getLoc(), targetType, {to});
|
|
|
|
}
|
|
|
|
if (!torchArg)
|
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"unsupported type of the operand");
|
|
|
|
barg.value().replaceAllUsesWith(torchArg);
|
|
|
|
}
|
|
|
|
// Inline torch loop body operations into 'after' region.
|
|
|
|
PatternRewriter::InsertionGuard guard(rewriter);
|
|
|
|
for (auto &operation :
|
2022-12-08 04:20:41 +08:00
|
|
|
llvm::make_early_inc_range(op.getRegion().front().getOperations())) {
|
2022-03-31 22:24:44 +08:00
|
|
|
if (auto primLoopConditionOp = dyn_cast<PrimLoopConditionOp>(operation)) {
|
|
|
|
// Fix up the terminator.
|
|
|
|
SmallVector<Value> loopConditionIterArgs;
|
2022-12-08 04:20:41 +08:00
|
|
|
Value torchShouldContinue = primLoopConditionOp.getShouldContinue();
|
2022-03-31 22:24:44 +08:00
|
|
|
Value shouldContinue = typeConverter->materializeTargetConversion(
|
|
|
|
rewriter, scfWhileOp->getLoc(),
|
|
|
|
typeConverter->convertType(torchShouldContinue.getType()),
|
|
|
|
{torchShouldContinue});
|
|
|
|
if (!shouldContinue)
|
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"unsupported type of the operand");
|
|
|
|
loopConditionIterArgs.push_back(shouldContinue);
|
2022-12-08 04:20:41 +08:00
|
|
|
for (auto torchArg : primLoopConditionOp.getIterArgs()) {
|
2022-03-31 22:24:44 +08:00
|
|
|
Type torchType = torchArg.getType();
|
|
|
|
|
|
|
|
// If the argument is a torch tensor, directly add it in the list of
|
|
|
|
// iter args.
|
2024-04-11 21:47:35 +08:00
|
|
|
if (isa<Torch::BaseTensorType>(torchType)) {
|
2022-03-31 22:24:44 +08:00
|
|
|
loopConditionIterArgs.push_back(torchArg);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Value arg = typeConverter->materializeTargetConversion(
|
|
|
|
rewriter, scfWhileOp->getLoc(),
|
|
|
|
typeConverter->convertType(torchArg.getType()), {torchArg});
|
|
|
|
if (!arg)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unsupported type of the operand");
|
|
|
|
loopConditionIterArgs.push_back(arg);
|
|
|
|
}
|
|
|
|
rewriter.create<scf::YieldOp>(scfWhileOp.getLoc(),
|
|
|
|
loopConditionIterArgs);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
operation.moveBefore(afterBlock, afterBlock->end());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rewriter.replaceOp(op, scfWhileOp->getResults());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// Converts the Torch::PrimLoopOp which is ``For-like`` into scf::ForOp.
|
|
|
|
class ConvertTorchPrimLoopForLikeOp : public OpConversionPattern<PrimLoopOp> {
|
|
|
|
public:
|
|
|
|
using OpConversionPattern<PrimLoopOp>::OpConversionPattern;
|
|
|
|
LogicalResult
|
|
|
|
matchAndRewrite(PrimLoopOp op, OpAdaptor adaptor,
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
// Return failure on while-like loops.
|
|
|
|
if (!op.isForLike())
|
|
|
|
return failure();
|
|
|
|
|
2023-08-16 00:53:28 +08:00
|
|
|
const TypeConverter *typeConverter = getTypeConverter();
|
2022-03-31 22:24:44 +08:00
|
|
|
SmallVector<Type, 1> newResultTypes;
|
|
|
|
if (failed(
|
|
|
|
typeConverter->convertTypes(op.getResultTypes(), newResultTypes)))
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "could not convert PrimLoopOp outputs");
|
|
|
|
|
|
|
|
// Calculate the lower bound, upper bound and step indices. Currently only
|
|
|
|
// lower-bound = 0 and step = 1 is supported.
|
|
|
|
Location loc = op.getLoc();
|
|
|
|
Value lowerBoundIndex = rewriter.create<arith::ConstantIndexOp>(loc, 0);
|
|
|
|
Value stepIndex = rewriter.create<arith::ConstantIndexOp>(loc, 1);
|
|
|
|
Value upperBoundIndex = rewriter.create<arith::IndexCastOp>(
|
2022-12-08 04:20:41 +08:00
|
|
|
loc, rewriter.getIndexType(), adaptor.getMaxTripCount());
|
2022-03-31 22:24:44 +08:00
|
|
|
auto scfForOp =
|
|
|
|
rewriter.create<scf::ForOp>(loc, lowerBoundIndex, upperBoundIndex,
|
2022-12-08 04:20:41 +08:00
|
|
|
stepIndex, adaptor.getIterArgsInit());
|
2022-03-31 22:24:44 +08:00
|
|
|
|
|
|
|
SmallVector<Type> regionArgTypes;
|
|
|
|
SmallVector<Location> regionArgLocs;
|
2023-09-27 05:15:55 +08:00
|
|
|
for (Value value : scfForOp.getRegion().front().getArguments()) {
|
2022-03-31 22:24:44 +08:00
|
|
|
regionArgTypes.push_back(value.getType());
|
|
|
|
regionArgLocs.push_back(value.getLoc());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Populate the loop body region.
|
2023-09-27 05:15:55 +08:00
|
|
|
if (!scfForOp.getRegion().empty())
|
|
|
|
rewriter.eraseBlock(&scfForOp.getRegion().back());
|
2022-03-31 22:24:44 +08:00
|
|
|
|
2023-09-27 05:15:55 +08:00
|
|
|
auto *block = rewriter.createBlock(&scfForOp.getRegion(),
|
|
|
|
scfForOp.getRegion().begin(),
|
2022-03-31 22:24:44 +08:00
|
|
|
regionArgTypes, regionArgLocs);
|
|
|
|
|
|
|
|
// Rewrite uses of the torch loop block arguments to the new for-loop
|
|
|
|
// "block" arguments
|
2022-12-08 04:20:41 +08:00
|
|
|
for (const auto &barg : enumerate(op.getRegion().front().getArguments())) {
|
2022-03-31 22:24:44 +08:00
|
|
|
Value to = block->getArgument(barg.index());
|
|
|
|
if (to.getType().isa<mlir::IndexType>())
|
|
|
|
to =
|
|
|
|
rewriter.create<arith::IndexCastOp>(loc, rewriter.getI64Type(), to);
|
|
|
|
Type targetType = to.getType();
|
|
|
|
Value torchArg = to;
|
|
|
|
|
|
|
|
// If the target type is non-torch type, then use TypeConverter to convert
|
|
|
|
// the type of the source.
|
2024-04-11 21:47:35 +08:00
|
|
|
if (isa<mlir::FloatType>(targetType)) {
|
2022-03-31 22:24:44 +08:00
|
|
|
targetType = Torch::FloatType::get(op->getContext());
|
|
|
|
torchArg = typeConverter->materializeSourceConversion(
|
|
|
|
rewriter, scfForOp.getLoc(), targetType, {to});
|
2024-04-11 21:47:35 +08:00
|
|
|
} else if (isa<mlir::IntegerType>(targetType)) {
|
2022-03-31 22:24:44 +08:00
|
|
|
unsigned bitWidth = targetType.getIntOrFloatBitWidth();
|
|
|
|
if (bitWidth == 1)
|
|
|
|
targetType = Torch::BoolType::get(op->getContext());
|
|
|
|
else
|
|
|
|
targetType = Torch::IntType::get(op->getContext());
|
|
|
|
torchArg = typeConverter->materializeSourceConversion(
|
|
|
|
rewriter, scfForOp.getLoc(), targetType, {to});
|
2024-03-21 02:04:02 +08:00
|
|
|
} else if (auto tty = dyn_cast<RankedTensorType>(targetType)) {
|
|
|
|
targetType =
|
|
|
|
op.getIterArgsInit()[barg.index() - scfForOp.getNumInductionVars()]
|
|
|
|
.getType();
|
|
|
|
torchArg = typeConverter->materializeSourceConversion(
|
|
|
|
rewriter, scfForOp.getLoc(), targetType, {to});
|
2022-03-31 22:24:44 +08:00
|
|
|
}
|
2024-03-21 02:04:02 +08:00
|
|
|
|
2022-03-31 22:24:44 +08:00
|
|
|
if (!torchArg)
|
|
|
|
return rewriter.notifyMatchFailure(op,
|
|
|
|
"unsupported type of the operand");
|
|
|
|
barg.value().replaceAllUsesWith(torchArg);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Inline torch loop body operations into 'after' region.
|
|
|
|
PatternRewriter::InsertionGuard guard(rewriter);
|
|
|
|
for (auto &operation :
|
2022-12-08 04:20:41 +08:00
|
|
|
llvm::make_early_inc_range(op.getRegion().front().getOperations())) {
|
2022-03-31 22:24:44 +08:00
|
|
|
if (auto primLoopConditionOp = dyn_cast<PrimLoopConditionOp>(operation)) {
|
|
|
|
// Fix up the terminator.
|
|
|
|
SmallVector<Value> loopConditionIterArgs;
|
2022-12-08 04:20:41 +08:00
|
|
|
for (auto torchArg : primLoopConditionOp.getIterArgs()) {
|
2022-03-31 22:24:44 +08:00
|
|
|
Value arg = typeConverter->materializeTargetConversion(
|
|
|
|
rewriter, scfForOp.getLoc(),
|
|
|
|
typeConverter->convertType(torchArg.getType()), {torchArg});
|
|
|
|
if (!arg)
|
|
|
|
return rewriter.notifyMatchFailure(
|
|
|
|
op, "unsupported type of the operand");
|
|
|
|
loopConditionIterArgs.push_back(arg);
|
|
|
|
}
|
|
|
|
rewriter.create<scf::YieldOp>(scfForOp.getLoc(), loopConditionIterArgs);
|
|
|
|
} else {
|
|
|
|
operation.moveBefore(block, block->end());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rewriter.replaceOp(op, scfForOp->getResults());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2021-06-19 03:40:40 +08:00
|
|
|
namespace {
|
|
|
|
class ConvertTorchToSCF : public ConvertTorchToSCFBase<ConvertTorchToSCF> {
|
|
|
|
public:
|
|
|
|
void getDependentDialects(DialectRegistry ®istry) const override {
|
2022-10-05 21:28:06 +08:00
|
|
|
registry.insert<scf::SCFDialect, arith::ArithDialect>();
|
Add TorchToIREE and factor out TorchConversion dialect.
This converts a basic list op (torch.prim.ListConstruct) to the IREE
dialect.
```
def forward(self, x: float):
return [x, x]
```
turns into:
```
builtin.func @forward(%arg0: !torch.float) -> !torch.list<!torch.float> {
%0 = torch.prim.ListConstruct %arg0, %arg0 : (!torch.float, !torch.float) -> !torch.list<!torch.float>
return %0 : !torch.list<!torch.float>
}
```
which turns into:
```
builtin.func @forward(%arg0: f64) -> !iree.list<f64> {
%c1 = constant 1 : index
%c0 = constant 0 : index
%c2 = constant 2 : index
%0 = iree.list.create %c2 : !iree.list<f64>
iree.list.set %0[%c0], %arg0 : !iree.list<f64>, f64
iree.list.set %0[%c1], %arg0 : !iree.list<f64>, f64
return %0 : !iree.list<f64>
}
```
As part of doing this, I realized that it was time to formalize the IR
form that we reach right before running TorchTo{Linalg,Std,...}. We now
call it the "Torch backend contract". We then lower the "Torch backend
contract" to the "npcomp backend contract", which involves the new
TorchConversion (`torch_c`) dialect, which holds ops that need to
operate on both the npcomp backend types (e.g. builtin tensors, i1, IREE
list, etc.) and the `!torch` types.
This made more sense, as I realized that if I didn't factor out
`torch_c` then the Torch dialect would have a dependency on IREE
dialect (we previously didn't notice this was an issue because we only
depended on `builtin` types), which seemed wrong to me.
Recommended review order:
- TorchToIREE.cpp / `TorchToIREE/basic.mlir`
- Look at the new structure of createTorchScriptToNpcompBackendPipeline.
It now lives in TorchConversion/Transforms/Passes.cpp and cleanly
calls into `Torch::createTorchScriptToTorchBackendPipeline` for the
frontend lowering to the Torch backend contract.
- Mechanical change extracting
`torch_c.{to,from}_{i1,i64,f64,builtin_tensor,iree_list}` into a new
TorchConversion dialect, and a few passes specific to the lowering
from the Torch backend contract to the npcomp backend contract.
- Minor fixes to TorchToLinalg.cpp to use unconverted operands (now that
we convert lists as part of operand materialization, we need to use
the original operands). Also added test for AtenMaxPool2dOp and fixed
m_TorchConstantIntList.
- TmpDeleteDeadIREELists pass. Temporary pass for deleting dead IREE lists that
are created as part of operand materialization for conv/max pool/avg pool ops
in TorchToLinalg.
2021-08-12 05:40:08 +08:00
|
|
|
TorchConversion::getBackendTypeConversionDependentDialects(registry);
|
2021-06-19 03:40:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void runOnOperation() override {
|
|
|
|
MLIRContext *context = &getContext();
|
|
|
|
ConversionTarget target(*context);
|
2022-03-31 22:24:44 +08:00
|
|
|
target.addLegalDialect<Torch::TorchDialect, scf::SCFDialect,
|
2022-10-05 21:28:06 +08:00
|
|
|
arith::ArithDialect>();
|
2021-06-19 03:40:40 +08:00
|
|
|
|
|
|
|
TypeConverter typeConverter;
|
|
|
|
typeConverter.addConversion([](Type type) { return type; });
|
Add TorchToIREE and factor out TorchConversion dialect.
This converts a basic list op (torch.prim.ListConstruct) to the IREE
dialect.
```
def forward(self, x: float):
return [x, x]
```
turns into:
```
builtin.func @forward(%arg0: !torch.float) -> !torch.list<!torch.float> {
%0 = torch.prim.ListConstruct %arg0, %arg0 : (!torch.float, !torch.float) -> !torch.list<!torch.float>
return %0 : !torch.list<!torch.float>
}
```
which turns into:
```
builtin.func @forward(%arg0: f64) -> !iree.list<f64> {
%c1 = constant 1 : index
%c0 = constant 0 : index
%c2 = constant 2 : index
%0 = iree.list.create %c2 : !iree.list<f64>
iree.list.set %0[%c0], %arg0 : !iree.list<f64>, f64
iree.list.set %0[%c1], %arg0 : !iree.list<f64>, f64
return %0 : !iree.list<f64>
}
```
As part of doing this, I realized that it was time to formalize the IR
form that we reach right before running TorchTo{Linalg,Std,...}. We now
call it the "Torch backend contract". We then lower the "Torch backend
contract" to the "npcomp backend contract", which involves the new
TorchConversion (`torch_c`) dialect, which holds ops that need to
operate on both the npcomp backend types (e.g. builtin tensors, i1, IREE
list, etc.) and the `!torch` types.
This made more sense, as I realized that if I didn't factor out
`torch_c` then the Torch dialect would have a dependency on IREE
dialect (we previously didn't notice this was an issue because we only
depended on `builtin` types), which seemed wrong to me.
Recommended review order:
- TorchToIREE.cpp / `TorchToIREE/basic.mlir`
- Look at the new structure of createTorchScriptToNpcompBackendPipeline.
It now lives in TorchConversion/Transforms/Passes.cpp and cleanly
calls into `Torch::createTorchScriptToTorchBackendPipeline` for the
frontend lowering to the Torch backend contract.
- Mechanical change extracting
`torch_c.{to,from}_{i1,i64,f64,builtin_tensor,iree_list}` into a new
TorchConversion dialect, and a few passes specific to the lowering
from the Torch backend contract to the npcomp backend contract.
- Minor fixes to TorchToLinalg.cpp to use unconverted operands (now that
we convert lists as part of operand materialization, we need to use
the original operands). Also added test for AtenMaxPool2dOp and fixed
m_TorchConstantIntList.
- TmpDeleteDeadIREELists pass. Temporary pass for deleting dead IREE lists that
are created as part of operand materialization for conv/max pool/avg pool ops
in TorchToLinalg.
2021-08-12 05:40:08 +08:00
|
|
|
TorchConversion::setupBackendTypeConversion(target, typeConverter);
|
2021-06-19 03:40:40 +08:00
|
|
|
|
|
|
|
RewritePatternSet patterns(context);
|
|
|
|
target.addIllegalOp<PrimIfOp>();
|
|
|
|
patterns.add<ConvertTorchPrimIfOp>(typeConverter, context);
|
|
|
|
target.addIllegalOp<PrimIfYieldOp>();
|
|
|
|
patterns.add<ConvertTorchPrimIfYieldOp>(typeConverter, context);
|
2022-03-31 22:24:44 +08:00
|
|
|
target.addIllegalOp<PrimLoopOp>();
|
|
|
|
patterns.add<ConvertTorchPrimLoopWhileLikeOp>(typeConverter, context);
|
|
|
|
patterns.add<ConvertTorchPrimLoopForLikeOp>(typeConverter, context);
|
2021-06-19 03:40:40 +08:00
|
|
|
|
|
|
|
if (failed(applyPartialConversion(getOperation(), target,
|
|
|
|
std::move(patterns))))
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2022-04-27 03:27:51 +08:00
|
|
|
std::unique_ptr<OperationPass<func::FuncOp>>
|
2021-09-24 11:59:12 +08:00
|
|
|
mlir::torch::createConvertTorchToSCFPass() {
|
2021-06-19 03:40:40 +08:00
|
|
|
return std::make_unique<ConvertTorchToSCF>();
|
|
|
|
}
|