2022-08-05 02:39:21 +08:00
|
|
|
//===- LowerToBackendContract.cpp --------------------------------*- C++-*-===//
|
|
|
|
//
|
|
|
|
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "PassDetail.h"
|
|
|
|
|
|
|
|
#include "mlir/IR/BuiltinOps.h"
|
|
|
|
#include "mlir/Pass/PassManager.h"
|
2022-12-09 01:26:38 +08:00
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
2022-08-05 02:39:21 +08:00
|
|
|
#include "mlir/Transforms/Passes.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
|
|
|
#include "torch-mlir/Dialect/Torch/Transforms/Passes.h"
|
2022-12-09 01:26:38 +08:00
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
2023-03-25 10:50:01 +08:00
|
|
|
#include "llvm/ADT/StringSet.h"
|
2023-07-18 22:32:26 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2022-08-05 02:39:21 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "torch-lower-to-backend-contract"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::torch;
|
|
|
|
using namespace mlir::torch::Torch;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Checking the backend contract.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-12-09 01:26:38 +08:00
|
|
|
static void markDecomposedOpsAsIllegal(MLIRContext *context,
|
|
|
|
ConversionTarget &target,
|
2023-03-25 10:50:01 +08:00
|
|
|
llvm::StringSet<> backendLegalOps);
|
2022-12-09 01:26:38 +08:00
|
|
|
|
2022-08-05 02:39:21 +08:00
|
|
|
static LogicalResult checkType(Operation *op, Type type,
|
|
|
|
bool actuallyEmitDiagnostics) {
|
|
|
|
// Allow various scalar types that backends are expected to be able to handle.
|
2022-08-19 14:53:21 +08:00
|
|
|
if (type.isa<Torch::IntType, Torch::FloatType, Torch::BoolType,
|
|
|
|
Torch::DeviceType>())
|
2022-08-05 02:39:21 +08:00
|
|
|
return success();
|
|
|
|
|
|
|
|
// Backends are not expected to support dynamic computations on these types,
|
|
|
|
// but they frequently appear as parameters to ops which backends
|
|
|
|
// can statically pattern match and eliminate from the program.
|
|
|
|
// For example, a tensor operand might be optional, and the backend
|
|
|
|
// will pattern-match statically whether it is passed as a tensor or None.
|
|
|
|
if (type.isa<Torch::NoneType, Torch::StringType>())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
// We blanket prohibit non-value-semantic tensors.
|
|
|
|
// All of our backends are currently based on value-semantic tensors, so
|
|
|
|
// we consider it our responsibility to lower all non-value-semantic tensors
|
|
|
|
// to value-semantic tensors.
|
|
|
|
if (type.isa<NonValueTensorType>()) {
|
|
|
|
if (actuallyEmitDiagnostics) {
|
|
|
|
return op
|
|
|
|
->emitError("unsupported by backend contract: non-value tensor type")
|
|
|
|
.attachNote()
|
|
|
|
.append("this is likely due to a missing case in the "
|
|
|
|
"MaximizeValueSemantics pass");
|
|
|
|
} else {
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For value-semantic tensors, we require at least a known rank and dtype.
|
|
|
|
// We are not aware of a situation where our backends can handle an unranked
|
|
|
|
// tensor type or a tensor with a dynamic dtype.
|
|
|
|
//
|
|
|
|
// There are somewhat fundamental reasons for this. In particular, the problem
|
|
|
|
// of unranked codegen is completely different from the problem of ranked
|
|
|
|
// codegen (since ranked corresponds to a fixed loop nest structure). For all
|
|
|
|
// codegen systems we are aware of, the program must be reduced to operate
|
|
|
|
// on ranked tensors at some point in compilation, and we are not aware of
|
|
|
|
// any backend with a general solution to this problem before it reaches
|
|
|
|
// codegen. So we consider it our responsibility to eliminate unranked tensor
|
|
|
|
// from the program.
|
|
|
|
//
|
|
|
|
// We aren't aware of any backend with any infrastructure to represent dynamic
|
|
|
|
// dtypes, let alone transform and optimize them. Additionally, it is unlikely
|
|
|
|
// that any backend, even if it supports dynamic dtypes in some form, will
|
|
|
|
// have an sufficiently rich system for representing PyTorch type promotion
|
|
|
|
// rules. So we consider it our responsibility to ensure that all dtypes are
|
|
|
|
// statically known.
|
|
|
|
if (auto tensorType = type.dyn_cast<ValueTensorType>()) {
|
|
|
|
if (!tensorType.hasSizes()) {
|
|
|
|
if (actuallyEmitDiagnostics) {
|
|
|
|
return op
|
|
|
|
->emitError(
|
|
|
|
"unsupported by backend contract: tensor with unknown rank")
|
|
|
|
.attachNote()
|
2022-12-14 00:25:41 +08:00
|
|
|
.append("this is likely due to a missing transfer function "
|
|
|
|
"in abstract_interp_lib_gen.py");
|
2022-08-05 02:39:21 +08:00
|
|
|
} else {
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!tensorType.hasDtype()) {
|
|
|
|
if (actuallyEmitDiagnostics) {
|
|
|
|
return op
|
|
|
|
->emitError(
|
|
|
|
"unsupported by backend contract: tensor with unknown dtype")
|
|
|
|
.attachNote()
|
2023-05-13 04:40:45 +08:00
|
|
|
.append("this is likely due to a missing transfer function in "
|
|
|
|
"abstract_interp_lib_gen.py");
|
2022-08-05 02:39:21 +08:00
|
|
|
} else {
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Optional types are also in the category of types which we don't expect
|
|
|
|
// backends to dynamically compute with, but they can be pattern matched
|
|
|
|
// in many cases that are practically necessary.
|
|
|
|
if (auto optionalType = type.dyn_cast<OptionalType>()) {
|
|
|
|
// TODO: Be stricter about tensor types.
|
|
|
|
// See comment below for ListType.
|
|
|
|
if (optionalType.getContainedType().isa<ValueTensorType>())
|
|
|
|
return success();
|
|
|
|
return checkType(op, optionalType.getContainedType(),
|
|
|
|
actuallyEmitDiagnostics);
|
|
|
|
}
|
|
|
|
// List types are also in the category of types which we don't expect
|
|
|
|
// backends to dynamically compute with, but they can be pattern matched
|
|
|
|
// in many cases that are practically necessary. For example, the
|
|
|
|
// strides of a convolution op are represented as a list.
|
|
|
|
if (auto listType = type.dyn_cast<ListType>()) {
|
|
|
|
// TODO: Be stricter about tensor types.
|
|
|
|
// For the moment, there are cases (such as for torch.cat) where we end
|
|
|
|
// up with `!torch.list<vtensor>` which doesn't have shape or dtype in
|
|
|
|
// the contained type information. Somehow this slips through and works.
|
|
|
|
// We should be stricter about this and properly infer the contained type
|
|
|
|
// and shape.
|
|
|
|
if (listType.getContainedType().isa<ValueTensorType>())
|
|
|
|
return success();
|
|
|
|
return checkType(op, listType.getContainedType(), actuallyEmitDiagnostics);
|
|
|
|
}
|
|
|
|
// Tuple types are also in the category of types which we don't expect
|
|
|
|
// backends to dynamically compute with, but they can be pattern matched
|
|
|
|
// in many cases that are practically necessary.
|
|
|
|
if (auto tupleType = type.dyn_cast<Torch::TupleType>()) {
|
|
|
|
for (auto containedType : tupleType.getContainedTypes()) {
|
|
|
|
if (failed(checkType(op, containedType, actuallyEmitDiagnostics)))
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unsupported type.
|
|
|
|
if (actuallyEmitDiagnostics) {
|
|
|
|
return op->emitError("unsupported by backend contract: type ") << type;
|
|
|
|
} else {
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-09 01:26:38 +08:00
|
|
|
static LogicalResult checkOpIsBackendLegal(Operation *op,
|
|
|
|
const ConversionTarget &target,
|
|
|
|
bool actuallyEmitDiagnostics) {
|
|
|
|
if (target.isLegal(op))
|
|
|
|
return success();
|
|
|
|
|
|
|
|
if (actuallyEmitDiagnostics) {
|
|
|
|
return op->emitError("found an op that was marked as backend illegal")
|
|
|
|
.attachNote()
|
|
|
|
.append("this is likely due to DecomposeComplexOps being unable to "
|
|
|
|
"decompose this op");
|
|
|
|
} else {
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-05 02:39:21 +08:00
|
|
|
static bool satisfiesBackendContract(ModuleOp module,
|
2022-12-09 01:26:38 +08:00
|
|
|
const ConversionTarget &target,
|
2022-08-05 02:39:21 +08:00
|
|
|
bool actuallyEmitDiagnostics = false) {
|
|
|
|
// We do not permit `torch.global_slot`'s in the backend contract, since
|
|
|
|
// support for them is not widespread, and this does not align with PyTorch's
|
|
|
|
// more tracing-based direction.
|
|
|
|
//
|
|
|
|
// We just check for the GlobalSlotModuleInitializerOp since its verifier
|
|
|
|
// ensures that the set of global slots matches those initialized by the
|
|
|
|
// module initializer.
|
|
|
|
auto walkResult0 = module.walk([&](Torch::GlobalSlotModuleInitializerOp op) {
|
|
|
|
if (actuallyEmitDiagnostics) {
|
|
|
|
// Report the error on the terminator to avoid dumping the whole
|
|
|
|
// initializer itself, which can have pages of ops in it.
|
|
|
|
op.getBody()
|
|
|
|
->getTerminator()
|
|
|
|
->emitError("unsupported by backend contract: module initializers")
|
|
|
|
.attachNote()
|
|
|
|
.append("this is likely due to InlineGlobalSlots being unable to "
|
|
|
|
"inline a global slot");
|
|
|
|
}
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
});
|
|
|
|
if (walkResult0.wasInterrupted())
|
|
|
|
return false;
|
|
|
|
|
2023-03-20 23:27:08 +08:00
|
|
|
// Check for unimplemented operators first to give more direct diagnostics.
|
|
|
|
walkResult0 = module.walk([&](Torch::OperatorOp op) {
|
|
|
|
if (llvm::all_of(op.getResults(), [&op](auto res) {
|
|
|
|
return succeeded(
|
|
|
|
checkType(op.getOperation(), res.getType(), /*actuallyEmitDiagnostics=*/false));
|
|
|
|
})) {
|
|
|
|
return WalkResult::advance();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (actuallyEmitDiagnostics) {
|
|
|
|
op->emitError("unsupported by backend contract: Unimplemented operator '"
|
|
|
|
+ op.getName() + "'");
|
|
|
|
}
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
});
|
|
|
|
if (walkResult0.wasInterrupted())
|
|
|
|
return false;
|
|
|
|
|
2022-12-09 01:26:38 +08:00
|
|
|
// Check all the types of all Value's in the program and the legality of all
|
|
|
|
// the ops.
|
2022-08-05 02:39:21 +08:00
|
|
|
//
|
|
|
|
// A pre-order walk gives a more intuitive "first error".
|
|
|
|
// TODO: Should we report more than the first error?
|
|
|
|
// How do we avoid making it too spammy?
|
|
|
|
auto walkResult1 = module.walk<WalkOrder::PreOrder>([&](Block *block) {
|
|
|
|
for (BlockArgument arg : block->getArguments())
|
|
|
|
if (failed(checkType(block->getParentOp(), arg.getType(),
|
|
|
|
actuallyEmitDiagnostics))) {
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
}
|
2022-12-09 01:26:38 +08:00
|
|
|
for (Operation &op : *block) {
|
|
|
|
if (failed(checkOpIsBackendLegal(&op, target, actuallyEmitDiagnostics)))
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
|
2022-08-05 02:39:21 +08:00
|
|
|
for (OpResult result : op.getResults())
|
|
|
|
if (failed(checkType(&op, result.getType(), actuallyEmitDiagnostics)))
|
|
|
|
return WalkResult::interrupt();
|
2022-12-09 01:26:38 +08:00
|
|
|
}
|
2022-08-05 02:39:21 +08:00
|
|
|
|
|
|
|
return WalkResult::advance();
|
|
|
|
});
|
|
|
|
if (walkResult1.wasInterrupted())
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-12-16 00:32:52 +08:00
|
|
|
// Explicitly set ops and dialects allowed and not allowed in backend contract.
|
|
|
|
static ConversionTarget
|
|
|
|
getBackendContractTarget(MLIRContext *context, bool decompose,
|
2023-03-25 10:50:01 +08:00
|
|
|
llvm::StringSet<> backendLegalOpsSet) {
|
2022-12-16 00:32:52 +08:00
|
|
|
ConversionTarget target(*context);
|
|
|
|
target.addLegalDialect<func::FuncDialect, Torch::TorchDialect>();
|
|
|
|
if (decompose)
|
2023-03-25 10:50:01 +08:00
|
|
|
markDecomposedOpsAsIllegal(context, target, backendLegalOpsSet);
|
2022-12-16 00:32:52 +08:00
|
|
|
return target;
|
|
|
|
}
|
|
|
|
|
2022-08-05 02:39:21 +08:00
|
|
|
namespace {
|
|
|
|
class LowerToBackendContractPass
|
|
|
|
: public LowerToBackendContractBase<LowerToBackendContractPass> {
|
|
|
|
public:
|
|
|
|
LowerToBackendContractPass() = default;
|
2022-08-18 07:23:52 +08:00
|
|
|
LowerToBackendContractPass(int maxIterations, bool decompose,
|
2023-03-25 10:50:01 +08:00
|
|
|
ArrayRef<std::string> backendLegalOps,
|
|
|
|
StringRef extraLibrary) {
|
2022-08-05 02:39:21 +08:00
|
|
|
this->maxIterations = maxIterations;
|
|
|
|
this->decompose = decompose;
|
2022-08-18 07:23:52 +08:00
|
|
|
this->backendLegalOps = backendLegalOps;
|
2023-03-25 10:50:01 +08:00
|
|
|
this->extraLibrary = extraLibrary.str();
|
2022-08-05 02:39:21 +08:00
|
|
|
}
|
|
|
|
void runOnOperation() override {
|
|
|
|
ModuleOp module = getOperation();
|
2022-12-09 01:26:38 +08:00
|
|
|
MLIRContext *context = &getContext();
|
2023-03-25 10:50:01 +08:00
|
|
|
|
|
|
|
backendLegalOpsSet.clear();
|
|
|
|
backendLegalOpsSet.insert(backendLegalOps.begin(), backendLegalOps.end());
|
2022-12-16 00:32:52 +08:00
|
|
|
ConversionTarget target =
|
2023-03-25 10:50:01 +08:00
|
|
|
getBackendContractTarget(context, decompose, backendLegalOpsSet);
|
2022-08-05 02:39:21 +08:00
|
|
|
|
|
|
|
OpPassManager pm(module.getOperationName());
|
|
|
|
TorchLoweringPipelineOptions options;
|
|
|
|
options.decompose = decompose;
|
2022-08-18 07:23:52 +08:00
|
|
|
options.backendLegalOps = backendLegalOps;
|
2023-03-25 10:50:01 +08:00
|
|
|
options.extraLibrary = extraLibrary;
|
2022-08-05 02:39:21 +08:00
|
|
|
createTorchSimplificationPipeline(pm, options);
|
|
|
|
|
|
|
|
int i = 0;
|
|
|
|
do {
|
|
|
|
if (i++ == maxIterations) {
|
|
|
|
LLVM_DEBUG({
|
|
|
|
llvm::dbgs() << "LowerToBackendContractPass: "
|
|
|
|
<< "failed to satisfy backend contract after "
|
|
|
|
<< maxIterations
|
|
|
|
<< " iterations of the simplification pipeline\n";
|
|
|
|
});
|
|
|
|
// Show the diagnostics.
|
2022-12-09 01:26:38 +08:00
|
|
|
(void)satisfiesBackendContract(module, target,
|
2022-08-05 02:39:21 +08:00
|
|
|
/*actuallyEmitDiagnostics=*/true);
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (failed(runPipeline(pm, module)))
|
|
|
|
return signalPassFailure();
|
2022-12-09 01:26:38 +08:00
|
|
|
} while (!satisfiesBackendContract(module, target));
|
2022-08-05 02:39:21 +08:00
|
|
|
LLVM_DEBUG({
|
|
|
|
llvm::dbgs() << "LowerToBackendContractPass: "
|
|
|
|
<< "succeeded after " << i
|
|
|
|
<< " iterations of the simplification pipeline\n";
|
|
|
|
});
|
|
|
|
}
|
2023-03-25 10:50:01 +08:00
|
|
|
private:
|
|
|
|
llvm::StringSet<> backendLegalOpsSet;
|
2022-08-05 02:39:21 +08:00
|
|
|
};
|
2022-10-05 06:53:28 +08:00
|
|
|
|
2023-01-25 11:14:17 +08:00
|
|
|
class VerifyBackendContractNoDecompositionsPass
|
|
|
|
: public VerifyBackendContractNoDecompositionsBase<VerifyBackendContractNoDecompositionsPass> {
|
2022-10-05 06:53:28 +08:00
|
|
|
public:
|
2023-01-25 11:14:17 +08:00
|
|
|
VerifyBackendContractNoDecompositionsPass() = default;
|
|
|
|
|
2022-10-05 06:53:28 +08:00
|
|
|
void runOnOperation() override {
|
2022-12-09 01:26:38 +08:00
|
|
|
MLIRContext *context = &getContext();
|
2022-12-16 00:32:52 +08:00
|
|
|
ConversionTarget target =
|
2023-01-25 11:14:17 +08:00
|
|
|
getBackendContractTarget(context, /*decompose*/false,
|
2023-03-25 10:50:01 +08:00
|
|
|
/*backendLegalOpsSet*/{});
|
2022-12-16 00:32:52 +08:00
|
|
|
|
2022-12-09 01:26:38 +08:00
|
|
|
if (!satisfiesBackendContract(getOperation(), target,
|
|
|
|
/*actuallyEmitDiagnostics=*/true)) {
|
2022-10-05 06:53:28 +08:00
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2022-08-05 02:39:21 +08:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<ModuleOp>>
|
2022-08-18 07:23:52 +08:00
|
|
|
mlir::torch::Torch::createLowerToBackendContractPass(
|
2023-03-25 10:50:01 +08:00
|
|
|
int maxIterations, bool decompose, ArrayRef<std::string> backendLegalOps,
|
|
|
|
StringRef extraLibrary) {
|
|
|
|
return std::make_unique<LowerToBackendContractPass>(
|
|
|
|
maxIterations, decompose, backendLegalOps, extraLibrary);
|
2022-08-05 02:39:21 +08:00
|
|
|
}
|
2022-10-05 06:53:28 +08:00
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<ModuleOp>>
|
2023-01-25 11:14:17 +08:00
|
|
|
mlir::torch::Torch::createVerifyBackendContractNoDecompositionsPass() {
|
|
|
|
return std::make_unique<VerifyBackendContractNoDecompositionsPass>();
|
2022-10-05 06:53:28 +08:00
|
|
|
}
|
2022-12-09 01:26:38 +08:00
|
|
|
|
|
|
|
// The backend contract guarantees that ops with decompositions available will
|
|
|
|
// be decomposed. The only way to have an op reach the backend contract without
|
|
|
|
// getting decomposed is by having the user explicitly specify that op in the
|
2023-03-25 10:50:01 +08:00
|
|
|
// `backendLegalOpsSet` argument to the `LowerToBackendContractPass`. Therefore,
|
2022-12-09 01:26:38 +08:00
|
|
|
// here we mark as illegal all ops with decompositions except for those in
|
2023-03-25 10:50:01 +08:00
|
|
|
// `backendLegalOpsSet`.
|
2022-12-09 01:26:38 +08:00
|
|
|
//
|
|
|
|
// The legality check takes place here instead of in the `DecomposeComplexOps`
|
|
|
|
// pass for two reasons:
|
|
|
|
// 1. Makes sure the `DecomposeComplexOps` pass always succeeds, allowing it to
|
|
|
|
// run multiple times. This is needed for graphs where static information such
|
|
|
|
// as dtypes and shapes takes multiple iterations to propagate through the
|
|
|
|
// entire graph. `DecomposeComplexOps` pass failing would cause the entire
|
|
|
|
// `LowerToBackendContractPass` to fail
|
|
|
|
// 2. Makes the legality requirements in the backend contract for ops with
|
|
|
|
// decompositions explicit in this file
|
|
|
|
static void markDecomposedOpsAsIllegal(MLIRContext *context,
|
|
|
|
ConversionTarget &target,
|
2023-03-25 10:50:01 +08:00
|
|
|
llvm::StringSet<> backendLegalOpsSet) {
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenSoftmaxIntOp>();
|
|
|
|
target.addIllegalOp<Aten_SoftmaxOp>();
|
|
|
|
target.addIllegalOp<Aten_LogSoftmaxOp>();
|
|
|
|
target.addIllegalOp<AtenLogSoftmaxIntOp>();
|
|
|
|
target.addIllegalOp<AtenEmptyLikeOp>();
|
|
|
|
target.addIllegalOp<AtenOnesLikeOp>();
|
|
|
|
target.addIllegalOp<AtenZerosLikeOp>();
|
2023-03-11 09:25:25 +08:00
|
|
|
target.addIllegalOp<AtenStackOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenRollOp>();
|
|
|
|
target.addIllegalOp<AtenRepeatOp>();
|
|
|
|
target.addIllegalOp<AtenExpandOp>();
|
|
|
|
target.addIllegalOp<AtenFlattenUsingIntsOp>();
|
|
|
|
target.addIllegalOp<AtenWhereScalarOp>();
|
|
|
|
target.addIllegalOp<AtenWhereScalarOtherOp>();
|
|
|
|
target.addIllegalOp<AtenWhereScalarSelfOp>();
|
2023-02-11 05:58:39 +08:00
|
|
|
target.addIllegalOp<AtenMaskedFillScalarOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenSizeOp>();
|
|
|
|
target.addIllegalOp<AtenReshapeOp>();
|
|
|
|
target.addIllegalOp<Aten_SoftmaxBackwardDataOp>();
|
|
|
|
target.addIllegalOp<AtenTanhBackwardOp>();
|
|
|
|
target.addIllegalOp<AtenAddmmOp>();
|
|
|
|
target.addIllegalOp<AtenMeanOp>();
|
|
|
|
target.addIllegalOp<AtenMeanDimOp>();
|
2023-02-21 12:08:29 +08:00
|
|
|
target.addIllegalOp<AtenNormScalarOptDimOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenSelectIntOp>();
|
|
|
|
target.addIllegalOp<AtenMvOp>();
|
|
|
|
target.addIllegalOp<AtenTOp>();
|
|
|
|
target.addIllegalOp<Aten_LogSoftmaxBackwardDataOp>();
|
|
|
|
target.addDynamicallyLegalOp<AtenMatmulOp>([](AtenMatmulOp op) {
|
2022-12-20 18:17:27 +08:00
|
|
|
std::optional<unsigned> lhsRank = getTensorRank(op.getSelf());
|
|
|
|
std::optional<unsigned> rhsRank = getTensorRank(op.getOther());
|
2022-12-13 00:56:28 +08:00
|
|
|
if (!lhsRank || !rhsRank)
|
|
|
|
return false;
|
2022-12-09 01:26:38 +08:00
|
|
|
// Make aten.matmul legal if the following condition is satisfied.
|
2022-12-13 00:56:28 +08:00
|
|
|
return (*lhsRank != 2 || *rhsRank != 2) && (*lhsRank != 3 || *rhsRank != 3);
|
2022-12-09 01:26:38 +08:00
|
|
|
});
|
|
|
|
target.addIllegalOp<AtenAddcmulOp>();
|
|
|
|
target.addIllegalOp<AtenAddcdivOp>();
|
|
|
|
target.addIllegalOp<AtenLayerNormOp>();
|
|
|
|
target.addIllegalOp<AtenNativeLayerNormOp>();
|
|
|
|
target.addIllegalOp<AtenNativeBatchNormOp>();
|
|
|
|
target.addIllegalOp<Aten_ConvolutionOp, Aten_ConvolutionDeprecatedOp>();
|
|
|
|
target.addIllegalOp<AtenConvolutionBackwardOp>();
|
|
|
|
target.addIllegalOp<AtenConv2dOp>();
|
|
|
|
target.addIllegalOp<AtenConvTranspose2dInputOp>();
|
|
|
|
target.addIllegalOp<AtenArangeOp>();
|
|
|
|
target.addIllegalOp<AtenArangeStartOp>();
|
|
|
|
target.addIllegalOp<AtenArgmaxOp>();
|
|
|
|
target.addIllegalOp<AtenSquareOp>();
|
|
|
|
target.addIllegalOp<AtenVarOp>();
|
|
|
|
target.addIllegalOp<AtenStdOp>();
|
|
|
|
target.addIllegalOp<Aten_UnsafeViewOp>();
|
|
|
|
target.addIllegalOp<Aten_ReshapeAliasOp>();
|
|
|
|
target.addIllegalOp<AtenBernoulliOp>();
|
|
|
|
target.addIllegalOp<ValsemVariantAtenBernoulliFloatOp>();
|
2023-02-16 01:06:29 +08:00
|
|
|
target.addIllegalOp<AtenBernoulliPOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenBernoulliTensorOp>();
|
|
|
|
target.addIllegalOp<AtenZeroOp>();
|
2023-06-07 10:06:27 +08:00
|
|
|
target.addIllegalOp<AtenIsnanOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenRandLikeOp>();
|
|
|
|
target.addIllegalOp<AtenHardsigmoidOp>();
|
|
|
|
target.addIllegalOp<AtenRelu6Op>();
|
2023-08-25 22:42:29 +08:00
|
|
|
target.addIllegalOp<AtenEluOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenHardswishOp>();
|
|
|
|
target.addIllegalOp<AtenSoftplusOp>();
|
|
|
|
target.addIllegalOp<AtenSiluOp>();
|
|
|
|
target.addIllegalOp<AtenNewZerosOp>();
|
|
|
|
target.addIllegalOp<AtenNewOnesOp>();
|
|
|
|
target.addIllegalOp<AtenHardtanhOp>();
|
|
|
|
target.addIllegalOp<AtenFullOp>();
|
|
|
|
target.addIllegalOp<AtenLinearOp>();
|
|
|
|
target.addIllegalOp<AtenMishOp>();
|
|
|
|
target.addIllegalOp<AtenFullLikeOp>();
|
2023-09-12 22:29:08 +08:00
|
|
|
target.addIllegalOp<AtenNewFullOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenIndexPutOp>();
|
|
|
|
target.addIllegalOp<AtenExpandAsOp>();
|
|
|
|
target.addIllegalOp<Aten_ToCopyOp>();
|
|
|
|
target.addIllegalOp<AtenDropoutOp>();
|
2023-06-27 14:19:33 +08:00
|
|
|
target.addIllegalOp<AtenNativeDropoutOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenNewEmptyOp>();
|
|
|
|
target.addIllegalOp<AtenIndexPutHackedTwinOp>();
|
2023-07-14 15:26:54 +08:00
|
|
|
target.addIllegalOp<Aten_UnsafeIndexPutHackedTwinOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenPadOp>();
|
|
|
|
target.addIllegalOp<AtenToDtypeLayoutOp>();
|
|
|
|
target.addIllegalOp<AtenToDeviceOp>();
|
[Torch Dialect] add support for adaptive_avgpool_1d (#2342)
* [MLIR][TORCH] Fix aten.cumsum lowering for int32 input (#2351)
Signed-Off By: Vivek Khandelwal <vivek@nod-labs.com>
[Stablehlo] Add converter to stablehlo for aten.(Int,Float,Bool).Tensor op (#2340)
[Stablehlo] Add converter to stablehlo for aten.(Int,Float,Bool).Tensor op and configure crashing e2e sets for stablehlo backend.
update PyTorch version to 2.1.0.dev20230729 (#2354)
- torch version: 2.1.0.dev20230729
- torch commit hash: b638df0afb83572724032c824c64e481bb4499a0
- torchvision version: 0.16.0.dev20230729
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
update PyTorch version to 2.1.0.dev20230730 (#2356)
- torch version: 2.1.0.dev20230730
- torch commit hash: 0ff243ff350268cc98fe03fa6364375ee2824742
- torchvision version: 0.16.0.dev20230730
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
update PyTorch version to 2.1.0.dev20230731 (#2359)
- torch version: 2.1.0.dev20230731
- torch commit hash: 6298ac688f8caafe30d71ff2ea2e20fbb32065c7
- torchvision version: 0.16.0.dev20230731
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
LTC->MLIR Debug Info support (#1922)
* LTC->MLIR Debug Info support
* SW-95317 Propagate Lazy->Jit->MLIR scope name.
* Enhance location information based on op names
Currently, the location information attached to the ops just considers
the filename, line number and column number. Attaching operation name
would help identify the type of computation by just looking at the
profile of execution.
* Update locations logic; updated debug-info.py test
* Use {scope}/{op_name} format to track names by default
---------
Co-authored-by: Gleb Kazantaev <gleb.kazantaev@cerebras.net>
Co-authored-by: Mark Browning <mark@cerebras.net>
Co-authored-by: Vimal Patel <vimal@polymagelabs.com>
build: update llvm tag to 41895843
Summary of changes:
- Update tags
llvm: 41895843b5915bb78e9d02aa711fa10f7174db43
mhlo: 4726d31f7025da66de0dea709bd56c462edb83c2
Signed-Off By: Vivek Khandelwal <vivek@nod-labs.com>
update PyTorch version to 2.1.0.dev20230802 (#2366)
- torch version: 2.1.0.dev20230802
- torch commit hash: c89b16917755c2abbef7b6420e340baf9ae8089e
- torchvision version: 0.16.0.dev20230802
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
Change Python version from 3.10 to 3.11 in installation instructions (#2370)
Add CITATION file (#2371)
Add packaging as an install dependency (#2369)
Needed by `torch_mlir._version`. Resolves #2368.
[Torch Dialect] emit aten.masked_scatter and aten.masked_scatter_ op (#2358)
* [Torch Dialect] emit aten.masked_scatter and aten.masked_scatter_ op
update PyTorch version to 2.1.0.dev20230803 (#2372)
- torch version: 2.1.0.dev20230803
- torch commit hash: f89c73be3a3e8274d025ac46a33a780853841c9e
- torchvision version: 0.16.0.dev20230803
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
Prevent failed stable CI job from cancelling nightly jobs (#2373)
The CI jobs that use stable PyTorch are currently not required to pass
in order for a patch to get merged in `main`. This commit makes sure
that if a CI job for stable PyTorch fails, it does not cancel the
other required jobs.
[Torch Dialect] emit aten.tile op and decompose it into aten.repeat (#2355)
update
update xfail sets
update xfail_sets
update
fix xfail_sets
update:
update
update:
update
parent 22e88d523b1970b2e904eb5421d49d987a3d255e
author jianzhe.xiao <jianzhe.xiao@bytedance.com> 1691114110 +0800
committer jianzhe.xiao <jianzhe.xiao@bytedance.com> 1691114119 +0800
[Stablehlo] Add converter to stablehlo for aten.(Int,Float,Bool).Tensor op (#2340)
[Stablehlo] Add converter to stablehlo for aten.(Int,Float,Bool).Tensor op and configure crashing e2e sets for stablehlo backend.
update PyTorch version to 2.1.0.dev20230729 (#2354)
- torch version: 2.1.0.dev20230729
- torch commit hash: b638df0afb83572724032c824c64e481bb4499a0
- torchvision version: 0.16.0.dev20230729
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
update PyTorch version to 2.1.0.dev20230730 (#2356)
- torch version: 2.1.0.dev20230730
- torch commit hash: 0ff243ff350268cc98fe03fa6364375ee2824742
- torchvision version: 0.16.0.dev20230730
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
update PyTorch version to 2.1.0.dev20230731 (#2359)
- torch version: 2.1.0.dev20230731
- torch commit hash: 6298ac688f8caafe30d71ff2ea2e20fbb32065c7
- torchvision version: 0.16.0.dev20230731
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
LTC->MLIR Debug Info support (#1922)
* LTC->MLIR Debug Info support
* SW-95317 Propagate Lazy->Jit->MLIR scope name.
* Enhance location information based on op names
Currently, the location information attached to the ops just considers
the filename, line number and column number. Attaching operation name
would help identify the type of computation by just looking at the
profile of execution.
* Update locations logic; updated debug-info.py test
* Use {scope}/{op_name} format to track names by default
---------
Co-authored-by: Gleb Kazantaev <gleb.kazantaev@cerebras.net>
Co-authored-by: Mark Browning <mark@cerebras.net>
Co-authored-by: Vimal Patel <vimal@polymagelabs.com>
build: update llvm tag to 41895843
Summary of changes:
- Update tags
llvm: 41895843b5915bb78e9d02aa711fa10f7174db43
mhlo: 4726d31f7025da66de0dea709bd56c462edb83c2
Signed-Off By: Vivek Khandelwal <vivek@nod-labs.com>
update PyTorch version to 2.1.0.dev20230802 (#2366)
- torch version: 2.1.0.dev20230802
- torch commit hash: c89b16917755c2abbef7b6420e340baf9ae8089e
- torchvision version: 0.16.0.dev20230802
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
Change Python version from 3.10 to 3.11 in installation instructions (#2370)
Add CITATION file (#2371)
Add packaging as an install dependency (#2369)
Needed by `torch_mlir._version`. Resolves #2368.
[Torch Dialect] emit aten.masked_scatter and aten.masked_scatter_ op (#2358)
* [Torch Dialect] emit aten.masked_scatter and aten.masked_scatter_ op
update PyTorch version to 2.1.0.dev20230803 (#2372)
- torch version: 2.1.0.dev20230803
- torch commit hash: f89c73be3a3e8274d025ac46a33a780853841c9e
- torchvision version: 0.16.0.dev20230803
Co-authored-by: Roll PyTorch Action <torch-mlir@users.noreply.github.com>
Prevent failed stable CI job from cancelling nightly jobs (#2373)
The CI jobs that use stable PyTorch are currently not required to pass
in order for a patch to get merged in `main`. This commit makes sure
that if a CI job for stable PyTorch fails, it does not cancel the
other required jobs.
[Torch Dialect] emit aten.tile op and decompose it into aten.repeat (#2355)
update
update xfail sets
update xfail_sets
update
fix xfail_sets
update:
update
update:
add support for adaptive_pool_id
update xfail sets
update xfail_sets
update
fix xfail_sets
update:
update:
* update
---------
Co-authored-by: Vivek Khandelwal <vivekkhandelwal1424@gmail.com>
2023-08-05 07:48:09 +08:00
|
|
|
target.addIllegalOp<AtenAdaptiveAvgPool1dOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenAdaptiveAvgPool2dOp>();
|
|
|
|
target.addIllegalOp<AtenClampMinOp>();
|
|
|
|
target.addIllegalOp<AtenClampMaxOp>();
|
|
|
|
target.addIllegalOp<AtenBaddbmmOp>();
|
|
|
|
target.addIllegalOp<AtenFloorDivideOp>();
|
|
|
|
target.addIllegalOp<AtenNumpyTOp>();
|
|
|
|
target.addIllegalOp<AtenSelectScatterOp>();
|
|
|
|
target.addIllegalOp<AtenVarDimOp>();
|
|
|
|
target.addIllegalOp<AtenAmaxOp>();
|
|
|
|
target.addIllegalOp<AtenVarCorrectionOp>();
|
|
|
|
target.addIllegalOp<AtenStdDimOp>();
|
2022-12-22 13:02:40 +08:00
|
|
|
target.addIllegalOp<AtenStdCorrectionOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenNarrowOp>();
|
2023-07-20 16:46:44 +08:00
|
|
|
target.addIllegalOp<AtenNarrowTensorOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<Aten_EmbeddingBagOp>();
|
|
|
|
target.addIllegalOp<AtenLiftFreshCopyOp>();
|
2023-08-15 19:36:08 +08:00
|
|
|
target.addIllegalOp<AtenIndexTensorOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenMseLossOp>();
|
|
|
|
target.addIllegalOp<AtenRandintLowOp>();
|
2023-04-04 17:31:21 +08:00
|
|
|
target.addIllegalOp<AtenRandintOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenVarMeanCorrectionOp>();
|
|
|
|
target.addIllegalOp<PrimsConvertElementTypeOp>();
|
2023-01-11 14:01:45 +08:00
|
|
|
target.addIllegalOp<PrimsVarOp>();
|
|
|
|
target.addIllegalOp<PrimsSqrtOp>();
|
2023-09-02 02:13:58 +08:00
|
|
|
target.addIllegalOp<AtenRandOp>();
|
2022-12-09 01:26:38 +08:00
|
|
|
target.addIllegalOp<AtenRandnOp>();
|
|
|
|
target.addIllegalOp<AtenRandnGeneratorOp>();
|
2023-01-16 19:40:21 +08:00
|
|
|
target.addIllegalOp<AtenRandnLikeOp>();
|
2022-12-09 23:22:26 +08:00
|
|
|
target.addIllegalOp<AtenVarMeanOp>();
|
2022-12-29 22:52:23 +08:00
|
|
|
target.addIllegalOp<AtenNewEmptyStridedOp>();
|
2023-09-14 01:04:31 +08:00
|
|
|
target.addIllegalOp<AtenEmptyStridedOp>();
|
2023-02-03 10:20:47 +08:00
|
|
|
target.addIllegalOp<AtenBucketizeTensorOp>();
|
2022-11-16 13:57:58 +08:00
|
|
|
target.addIllegalOp<PrimsSqueezeOp>();
|
2023-01-02 22:34:39 +08:00
|
|
|
target.addIllegalOp<AtenMovedimIntOp>();
|
2023-04-11 16:02:28 +08:00
|
|
|
target.addIllegalOp<AtenOneHotOp>();
|
2023-03-15 16:00:03 +08:00
|
|
|
target.addIllegalOp<AtenCrossEntropyLossOp>();
|
2023-04-26 15:14:06 +08:00
|
|
|
target.addIllegalOp<AtenVarMeanDimOp>();
|
2023-05-02 21:29:00 +08:00
|
|
|
target.addIllegalOp<AtenTopkOp>();
|
2023-06-01 11:38:50 +08:00
|
|
|
target.addIllegalOp<AtenScalarTensorOp>();
|
2022-10-16 05:46:06 +08:00
|
|
|
target.addIllegalOp<AtenScatterValueOp>();
|
2023-07-20 09:51:58 +08:00
|
|
|
target.addIllegalOp<AtenTypeAsOp>();
|
2023-08-04 09:05:34 +08:00
|
|
|
target.addIllegalOp<AtenTileOp>();
|
2023-03-25 10:50:01 +08:00
|
|
|
for (auto &opName : backendLegalOpsSet) {
|
|
|
|
target.addLegalOp(
|
|
|
|
OperationName(kTorchOpPrefix + opName.first().str(), context));
|
2022-12-09 01:26:38 +08:00
|
|
|
}
|
2023-03-25 10:50:01 +08:00
|
|
|
target.addDynamicallyLegalOp<OperatorOp>(
|
|
|
|
[backendLegalOpsSet](OperatorOp opOp) {
|
|
|
|
auto opName = opOp->getAttr("name").cast<StringAttr>().getValue();
|
|
|
|
return backendLegalOpsSet.contains(opName);
|
|
|
|
});
|
2022-12-09 01:26:38 +08:00
|
|
|
}
|